featu: support passing "extra body" throught to providers

# What does this PR do?
Allows passing through extra_body parameters to inference providers.


closes #2720

## Test Plan
CI and added new test
This commit is contained in:
Eric Huang 2025-10-10 14:41:09 -07:00
parent cb7fb0705b
commit 70d341c385
18 changed files with 1725 additions and 93 deletions

View file

@ -1058,8 +1058,6 @@ class OpenAICompletionRequest(BaseModel):
:param top_p: (Optional) The top p to use.
:param user: (Optional) The user to use.
:param suffix: (Optional) The suffix that should be appended to the completion.
:param guided_choice: (Optional) vLLM-specific parameter for guided generation with a list of choices.
:param prompt_logprobs: (Optional) vLLM-specific parameter for number of log probabilities to return for prompt tokens.
"""
model_config = ConfigDict(extra="allow")
@ -1082,12 +1080,6 @@ class OpenAICompletionRequest(BaseModel):
temperature: float | None = None
top_p: float | None = None
user: str | None = None
# vLLM-specific parameters (documented here but also allowed via extra fields)
guided_choice: list[str] | None = None
prompt_logprobs: int | None = None
# for fill-in-the-middle type completion
suffix: str | None = None

View file

@ -246,7 +246,8 @@ class InferenceRouter(Inference):
provider = await self.routing_table.get_provider_impl(model_obj.identifier)
if params.stream:
response_stream = await provider.openai_chat_completion(params)
extra_body = dict(params.__pydantic_extra__ or {})
response_stream = await provider.openai_chat_completion(params, **extra_body)
# For streaming, the provider returns AsyncIterator[OpenAIChatCompletionChunk]
# We need to add metrics to each chunk and store the final completion
@ -319,7 +320,8 @@ class InferenceRouter(Inference):
async def _nonstream_openai_chat_completion(
self, provider: Inference, params: OpenAIChatCompletionRequest
) -> OpenAIChatCompletion:
response = await provider.openai_chat_completion(params)
extra_body = dict(params.__pydantic_extra__ or {})
response = await provider.openai_chat_completion(params, **extra_body)
for choice in response.choices:
# some providers return an empty list for no tool calls in non-streaming responses
# but the OpenAI API returns None. So, set tool_calls to None if it's empty

View file

@ -15,6 +15,8 @@ from pydantic import ConfigDict
from llama_stack.apis.inference import (
OpenAIChatCompletion,
OpenAIChatCompletionRequest,
OpenAICompletion,
OpenAICompletionRequest,
ToolChoice,
)
from llama_stack.log import get_logger
@ -91,6 +93,14 @@ class VLLMInferenceAdapter(OpenAIMixin):
log.warning(f"Not checking model availability for {model} as API token may trigger OAuth workflow")
return True
async def openai_completion(
self,
params: OpenAICompletionRequest,
) -> OpenAICompletion:
# Extract vLLM-specific parameters from extra fields and pass as kwargs
extra_body = dict(params.__pydantic_extra__ or {})
return await super().openai_completion(params, **extra_body)
async def openai_chat_completion(
self,
params: OpenAIChatCompletionRequest,
@ -108,4 +118,6 @@ class VLLMInferenceAdapter(OpenAIMixin):
if not params.tools and params.tool_choice is not None:
params.tool_choice = ToolChoice.none.value
return await super().openai_chat_completion(params)
# Extract vLLM-specific parameters from extra fields and pass as kwargs
extra_body = dict(params.__pydantic_extra__ or {})
return await super().openai_chat_completion(params, **extra_body)

View file

@ -248,8 +248,6 @@ class LiteLLMOpenAIMixin(
temperature=params.temperature,
top_p=params.top_p,
user=params.user,
guided_choice=params.guided_choice,
prompt_logprobs=params.prompt_logprobs,
suffix=params.suffix,
api_key=self.get_api_key(),
api_base=self.api_base,

View file

@ -224,19 +224,11 @@ class OpenAIMixin(NeedsRequestProviderData, ABC, BaseModel):
async def openai_completion(
self,
params: OpenAICompletionRequest,
**kwargs: Any,
) -> OpenAICompletion:
"""
Direct OpenAI completion API call.
"""
# Handle parameters that are not supported by OpenAI API, but may be by the provider
# prompt_logprobs is supported by vLLM
# guided_choice is supported by vLLM
# TODO: test coverage
extra_body: dict[str, Any] = {}
if params.prompt_logprobs is not None and params.prompt_logprobs >= 0:
extra_body["prompt_logprobs"] = params.prompt_logprobs
if params.guided_choice:
extra_body["guided_choice"] = params.guided_choice
# TODO: fix openai_completion to return type compatible with OpenAI's API response
completion_kwargs = await prepare_openai_completion_params(
@ -259,13 +251,16 @@ class OpenAIMixin(NeedsRequestProviderData, ABC, BaseModel):
user=params.user,
suffix=params.suffix,
)
resp = await self.client.completions.create(**completion_kwargs, extra_body=extra_body)
if extra_body := kwargs:
completion_kwargs["extra_body"] = extra_body
resp = await self.client.completions.create(**completion_kwargs)
return await self._maybe_overwrite_id(resp, params.stream) # type: ignore[no-any-return]
async def openai_chat_completion(
self,
params: OpenAIChatCompletionRequest,
**kwargs: Any,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
"""
Direct OpenAI chat completion API call.
@ -316,6 +311,8 @@ class OpenAIMixin(NeedsRequestProviderData, ABC, BaseModel):
user=params.user,
)
if extra_body := kwargs:
request_params["extra_body"] = extra_body
resp = await self.client.chat.completions.create(**request_params)
return await self._maybe_overwrite_id(resp, params.stream) # type: ignore[no-any-return]