mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-10 05:24:39 +00:00
test
# What does this PR do? ## Test Plan
This commit is contained in:
parent
96886afaca
commit
521009048a
207 changed files with 71733 additions and 2042 deletions
|
@ -247,6 +247,7 @@ class LiteLLMOpenAIMixin(
|
|||
guided_choice: list[str] | None = None,
|
||||
prompt_logprobs: int | None = None,
|
||||
suffix: str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> OpenAICompletion:
|
||||
model_obj = await self.model_store.get_model(model)
|
||||
params = await prepare_openai_completion_params(
|
||||
|
@ -271,6 +272,7 @@ class LiteLLMOpenAIMixin(
|
|||
prompt_logprobs=prompt_logprobs,
|
||||
api_key=self.get_api_key(),
|
||||
api_base=self.api_base,
|
||||
**kwargs,
|
||||
)
|
||||
return await litellm.atext_completion(**params)
|
||||
|
||||
|
@ -299,6 +301,7 @@ class LiteLLMOpenAIMixin(
|
|||
top_logprobs: int | None = None,
|
||||
top_p: float | None = None,
|
||||
user: str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
|
||||
# Add usage tracking for streaming when telemetry is active
|
||||
from llama_stack.providers.utils.telemetry.tracing import get_current_span
|
||||
|
@ -335,6 +338,7 @@ class LiteLLMOpenAIMixin(
|
|||
user=user,
|
||||
api_key=self.get_api_key(),
|
||||
api_base=self.api_base,
|
||||
**kwargs,
|
||||
)
|
||||
return await litellm.acompletion(**params)
|
||||
|
||||
|
|
|
@ -247,6 +247,7 @@ class OpenAIMixin(NeedsRequestProviderData, ABC, BaseModel):
|
|||
guided_choice: list[str] | None = None,
|
||||
prompt_logprobs: int | None = None,
|
||||
suffix: str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> OpenAICompletion:
|
||||
"""
|
||||
Direct OpenAI completion API call.
|
||||
|
@ -261,6 +262,9 @@ class OpenAIMixin(NeedsRequestProviderData, ABC, BaseModel):
|
|||
if guided_choice:
|
||||
extra_body["guided_choice"] = guided_choice
|
||||
|
||||
# Merge any additional kwargs into extra_body
|
||||
extra_body.update(kwargs)
|
||||
|
||||
# TODO: fix openai_completion to return type compatible with OpenAI's API response
|
||||
resp = await self.client.completions.create(
|
||||
**await prepare_openai_completion_params(
|
||||
|
@ -313,6 +317,7 @@ class OpenAIMixin(NeedsRequestProviderData, ABC, BaseModel):
|
|||
top_logprobs: int | None = None,
|
||||
top_p: float | None = None,
|
||||
user: str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
|
||||
"""
|
||||
Direct OpenAI chat completion API call.
|
||||
|
@ -361,7 +366,10 @@ class OpenAIMixin(NeedsRequestProviderData, ABC, BaseModel):
|
|||
user=user,
|
||||
)
|
||||
|
||||
resp = await self.client.chat.completions.create(**params)
|
||||
# Pass any additional provider-specific parameters as extra_body
|
||||
extra_body = kwargs if kwargs else {}
|
||||
|
||||
resp = await self.client.chat.completions.create(**params, extra_body=extra_body)
|
||||
|
||||
return await self._maybe_overwrite_id(resp, stream) # type: ignore[no-any-return]
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue