# What does this PR do?


## Test Plan
This commit is contained in:
Eric Huang 2025-10-08 13:54:19 -07:00
parent 96886afaca
commit 521009048a
207 changed files with 71733 additions and 2042 deletions

View file

@ -1052,6 +1052,7 @@ class InferenceProvider(Protocol):
prompt_logprobs: int | None = None,
# for fill-in-the-middle type completion
suffix: str | None = None,
**kwargs: Any,
) -> OpenAICompletion:
"""Create completion.
@ -1075,6 +1076,7 @@ class InferenceProvider(Protocol):
:param top_p: (Optional) The top p to use.
:param user: (Optional) The user to use.
:param suffix: (Optional) The suffix that should be appended to the completion.
:param kwargs: (Optional) Additional provider-specific parameters to pass through as extra_body.
:returns: An OpenAICompletion.
"""
...
@ -1106,6 +1108,7 @@ class InferenceProvider(Protocol):
top_logprobs: int | None = None,
top_p: float | None = None,
user: str | None = None,
**kwargs: Any,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
"""Create chat completions.
@ -1134,6 +1137,7 @@ class InferenceProvider(Protocol):
:param top_logprobs: (Optional) The top log probabilities to use.
:param top_p: (Optional) The top p to use.
:param user: (Optional) The user to use.
:param kwargs: (Optional) Additional provider-specific parameters to pass through as extra_body (e.g., chat_template_kwargs for vLLM).
:returns: An OpenAIChatCompletion.
"""
...

View file

@ -201,6 +201,7 @@ class InferenceRouter(Inference):
guided_choice: list[str] | None = None,
prompt_logprobs: int | None = None,
suffix: str | None = None,
**kwargs: Any,
) -> OpenAICompletion:
logger.debug(
f"InferenceRouter.openai_completion: {model=}, {stream=}, {prompt=}",
@ -227,6 +228,7 @@ class InferenceRouter(Inference):
guided_choice=guided_choice,
prompt_logprobs=prompt_logprobs,
suffix=suffix,
**kwargs,
)
provider = await self.routing_table.get_provider_impl(model_obj.identifier)
if stream:
@ -277,6 +279,7 @@ class InferenceRouter(Inference):
top_logprobs: int | None = None,
top_p: float | None = None,
user: str | None = None,
**kwargs: Any,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
logger.debug(
f"InferenceRouter.openai_chat_completion: {model=}, {stream=}, {messages=}",
@ -323,6 +326,7 @@ class InferenceRouter(Inference):
top_logprobs=top_logprobs,
top_p=top_p,
user=user,
**kwargs,
)
provider = await self.routing_table.get_provider_impl(model_obj.identifier)
if stream:

View file

@ -173,5 +173,6 @@ class MetaReferenceInferenceImpl(
top_logprobs: int | None = None,
top_p: float | None = None,
user: str | None = None,
**kwargs: Any,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
raise NotImplementedError("OpenAI chat completion not supported by meta-reference inference provider")

View file

@ -96,6 +96,7 @@ class SentenceTransformersInferenceImpl(
prompt_logprobs: int | None = None,
# for fill-in-the-middle type completion
suffix: str | None = None,
**kwargs: Any,
) -> OpenAICompletion:
raise NotImplementedError("OpenAI completion not supported by sentence transformers provider")
@ -124,5 +125,6 @@ class SentenceTransformersInferenceImpl(
top_logprobs: int | None = None,
top_p: float | None = None,
user: str | None = None,
**kwargs: Any,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
raise NotImplementedError("OpenAI chat completion not supported by sentence transformers provider")

View file

@ -158,6 +158,7 @@ class BedrockInferenceAdapter(
prompt_logprobs: int | None = None,
# for fill-in-the-middle type completion
suffix: str | None = None,
**kwargs: Any,
) -> OpenAICompletion:
raise NotImplementedError("OpenAI completion not supported by the Bedrock provider")
@ -186,5 +187,6 @@ class BedrockInferenceAdapter(
top_logprobs: int | None = None,
top_p: float | None = None,
user: str | None = None,
**kwargs: Any,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
raise NotImplementedError("OpenAI chat completion not supported by the Bedrock provider")

View file

@ -63,5 +63,6 @@ class DatabricksInferenceAdapter(OpenAIMixin):
guided_choice: list[str] | None = None,
prompt_logprobs: int | None = None,
suffix: str | None = None,
**kwargs: Any,
) -> OpenAICompletion:
raise NotImplementedError()

View file

@ -54,6 +54,7 @@ class LlamaCompatInferenceAdapter(OpenAIMixin):
guided_choice: list[str] | None = None,
prompt_logprobs: int | None = None,
suffix: str | None = None,
**kwargs: Any,
) -> OpenAICompletion:
raise NotImplementedError()

View file

@ -100,6 +100,7 @@ class PassthroughInferenceAdapter(Inference):
guided_choice: list[str] | None = None,
prompt_logprobs: int | None = None,
suffix: str | None = None,
**kwargs: Any,
) -> OpenAICompletion:
client = self._get_client()
model_obj = await self.model_store.get_model(model)
@ -124,6 +125,7 @@ class PassthroughInferenceAdapter(Inference):
user=user,
guided_choice=guided_choice,
prompt_logprobs=prompt_logprobs,
**kwargs,
)
return await client.inference.openai_completion(**params)
@ -153,6 +155,7 @@ class PassthroughInferenceAdapter(Inference):
top_logprobs: int | None = None,
top_p: float | None = None,
user: str | None = None,
**kwargs: Any,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
client = self._get_client()
model_obj = await self.model_store.get_model(model)
@ -181,6 +184,7 @@ class PassthroughInferenceAdapter(Inference):
top_logprobs=top_logprobs,
top_p=top_p,
user=user,
**kwargs,
)
return await client.inference.openai_chat_completion(**params)

View file

@ -57,6 +57,7 @@ class RunpodInferenceAdapter(OpenAIMixin):
top_logprobs: int | None = None,
top_p: float | None = None,
user: str | None = None,
**kwargs: Any,
):
"""Override to add RunPod-specific stream_options requirement."""
if stream and not stream_options:
@ -86,4 +87,5 @@ class RunpodInferenceAdapter(OpenAIMixin):
top_logprobs=top_logprobs,
top_p=top_p,
user=user,
**kwargs,
)

View file

@ -102,6 +102,7 @@ class VLLMInferenceAdapter(OpenAIMixin):
top_logprobs: int | None = None,
top_p: float | None = None,
user: str | None = None,
**kwargs: Any,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
max_tokens = max_tokens or self.config.max_tokens
@ -136,4 +137,5 @@ class VLLMInferenceAdapter(OpenAIMixin):
top_logprobs=top_logprobs,
top_p=top_p,
user=user,
**kwargs,
)

View file

@ -247,6 +247,7 @@ class LiteLLMOpenAIMixin(
guided_choice: list[str] | None = None,
prompt_logprobs: int | None = None,
suffix: str | None = None,
**kwargs: Any,
) -> OpenAICompletion:
model_obj = await self.model_store.get_model(model)
params = await prepare_openai_completion_params(
@ -271,6 +272,7 @@ class LiteLLMOpenAIMixin(
prompt_logprobs=prompt_logprobs,
api_key=self.get_api_key(),
api_base=self.api_base,
**kwargs,
)
return await litellm.atext_completion(**params)
@ -299,6 +301,7 @@ class LiteLLMOpenAIMixin(
top_logprobs: int | None = None,
top_p: float | None = None,
user: str | None = None,
**kwargs: Any,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
# Add usage tracking for streaming when telemetry is active
from llama_stack.providers.utils.telemetry.tracing import get_current_span
@ -335,6 +338,7 @@ class LiteLLMOpenAIMixin(
user=user,
api_key=self.get_api_key(),
api_base=self.api_base,
**kwargs,
)
return await litellm.acompletion(**params)

View file

@ -247,6 +247,7 @@ class OpenAIMixin(NeedsRequestProviderData, ABC, BaseModel):
guided_choice: list[str] | None = None,
prompt_logprobs: int | None = None,
suffix: str | None = None,
**kwargs: Any,
) -> OpenAICompletion:
"""
Direct OpenAI completion API call.
@ -261,6 +262,9 @@ class OpenAIMixin(NeedsRequestProviderData, ABC, BaseModel):
if guided_choice:
extra_body["guided_choice"] = guided_choice
# Merge any additional kwargs into extra_body
extra_body.update(kwargs)
# TODO: fix openai_completion to return type compatible with OpenAI's API response
resp = await self.client.completions.create(
**await prepare_openai_completion_params(
@ -313,6 +317,7 @@ class OpenAIMixin(NeedsRequestProviderData, ABC, BaseModel):
top_logprobs: int | None = None,
top_p: float | None = None,
user: str | None = None,
**kwargs: Any,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
"""
Direct OpenAI chat completion API call.
@ -361,7 +366,10 @@ class OpenAIMixin(NeedsRequestProviderData, ABC, BaseModel):
user=user,
)
resp = await self.client.chat.completions.create(**params)
# Pass any additional provider-specific parameters as extra_body
extra_body = kwargs if kwargs else {}
resp = await self.client.chat.completions.create(**params, extra_body=extra_body)
return await self._maybe_overwrite_id(resp, stream) # type: ignore[no-any-return]