# What does this PR do?


## Test Plan
This commit is contained in:
Eric Huang 2025-10-08 13:38:52 -07:00
parent 96886afaca
commit 001bf15bf8
12 changed files with 69 additions and 1 deletions

View file

@ -173,5 +173,6 @@ class MetaReferenceInferenceImpl(
top_logprobs: int | None = None,
top_p: float | None = None,
user: str | None = None,
**kwargs: Any,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
raise NotImplementedError("OpenAI chat completion not supported by meta-reference inference provider")

View file

@ -124,5 +124,6 @@ class SentenceTransformersInferenceImpl(
top_logprobs: int | None = None,
top_p: float | None = None,
user: str | None = None,
**kwargs: Any,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
raise NotImplementedError("OpenAI chat completion not supported by sentence transformers provider")

View file

@ -186,5 +186,6 @@ class BedrockInferenceAdapter(
top_logprobs: int | None = None,
top_p: float | None = None,
user: str | None = None,
**kwargs: Any,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
raise NotImplementedError("OpenAI chat completion not supported by the Bedrock provider")

View file

@ -153,6 +153,7 @@ class PassthroughInferenceAdapter(Inference):
top_logprobs: int | None = None,
top_p: float | None = None,
user: str | None = None,
**kwargs: Any,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
client = self._get_client()
model_obj = await self.model_store.get_model(model)
@ -181,6 +182,7 @@ class PassthroughInferenceAdapter(Inference):
top_logprobs=top_logprobs,
top_p=top_p,
user=user,
**kwargs,
)
return await client.inference.openai_chat_completion(**params)

View file

@ -57,6 +57,7 @@ class RunpodInferenceAdapter(OpenAIMixin):
top_logprobs: int | None = None,
top_p: float | None = None,
user: str | None = None,
**kwargs: Any,
):
"""Override to add RunPod-specific stream_options requirement."""
if stream and not stream_options:
@ -86,4 +87,5 @@ class RunpodInferenceAdapter(OpenAIMixin):
top_logprobs=top_logprobs,
top_p=top_p,
user=user,
**kwargs,
)

View file

@ -102,6 +102,7 @@ class VLLMInferenceAdapter(OpenAIMixin):
top_logprobs: int | None = None,
top_p: float | None = None,
user: str | None = None,
**kwargs: Any,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
max_tokens = max_tokens or self.config.max_tokens
@ -136,4 +137,5 @@ class VLLMInferenceAdapter(OpenAIMixin):
top_logprobs=top_logprobs,
top_p=top_p,
user=user,
**kwargs,
)

View file

@ -299,6 +299,7 @@ class LiteLLMOpenAIMixin(
top_logprobs: int | None = None,
top_p: float | None = None,
user: str | None = None,
**kwargs: Any,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
# Add usage tracking for streaming when telemetry is active
from llama_stack.providers.utils.telemetry.tracing import get_current_span
@ -335,6 +336,7 @@ class LiteLLMOpenAIMixin(
user=user,
api_key=self.get_api_key(),
api_base=self.api_base,
**kwargs,
)
return await litellm.acompletion(**params)

View file

@ -313,6 +313,7 @@ class OpenAIMixin(NeedsRequestProviderData, ABC, BaseModel):
top_logprobs: int | None = None,
top_p: float | None = None,
user: str | None = None,
**kwargs: Any,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
"""
Direct OpenAI chat completion API call.
@ -361,7 +362,10 @@ class OpenAIMixin(NeedsRequestProviderData, ABC, BaseModel):
user=user,
)
resp = await self.client.chat.completions.create(**params)
# Pass any additional provider-specific parameters as extra_body
extra_body = kwargs if kwargs else {}
resp = await self.client.chat.completions.create(**params, extra_body=extra_body)
return await self._maybe_overwrite_id(resp, stream) # type: ignore[no-any-return]