# What does this PR do?


## Test Plan
This commit is contained in:
Eric Huang 2025-10-08 13:54:19 -07:00
parent 96886afaca
commit 521009048a
207 changed files with 71733 additions and 2042 deletions

View file

@ -247,6 +247,7 @@ class LiteLLMOpenAIMixin(
guided_choice: list[str] | None = None,
prompt_logprobs: int | None = None,
suffix: str | None = None,
**kwargs: Any,
) -> OpenAICompletion:
model_obj = await self.model_store.get_model(model)
params = await prepare_openai_completion_params(
@ -271,6 +272,7 @@ class LiteLLMOpenAIMixin(
prompt_logprobs=prompt_logprobs,
api_key=self.get_api_key(),
api_base=self.api_base,
**kwargs,
)
return await litellm.atext_completion(**params)
@ -299,6 +301,7 @@ class LiteLLMOpenAIMixin(
top_logprobs: int | None = None,
top_p: float | None = None,
user: str | None = None,
**kwargs: Any,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
# Add usage tracking for streaming when telemetry is active
from llama_stack.providers.utils.telemetry.tracing import get_current_span
@ -335,6 +338,7 @@ class LiteLLMOpenAIMixin(
user=user,
api_key=self.get_api_key(),
api_base=self.api_base,
**kwargs,
)
return await litellm.acompletion(**params)