featu: support passing "extra body" throught to providers

# What does this PR do?
Allows passing through extra_body parameters to inference providers.


closes #2720

## Test Plan
CI and added new test
This commit is contained in:
Eric Huang 2025-10-10 15:46:56 -07:00
parent 80d58ab519
commit 10c7e67fca
35 changed files with 1893 additions and 200 deletions

View file

@ -49,7 +49,7 @@ from llama_stack.apis.inference import (
Inference,
Message,
OpenAIAssistantMessageParam,
OpenAIChatCompletionRequest,
OpenAIChatCompletionRequestWithExtraBody,
OpenAIDeveloperMessageParam,
OpenAIMessageParam,
OpenAISystemMessageParam,
@ -583,7 +583,7 @@ class ChatAgent(ShieldRunnerMixin):
max_tokens = getattr(sampling_params, "max_tokens", None)
# Use OpenAI chat completion
params = OpenAIChatCompletionRequest(
params = OpenAIChatCompletionRequestWithExtraBody(
model=self.agent_config.model,
messages=openai_messages,
tools=openai_tools if openai_tools else None,

View file

@ -49,7 +49,7 @@ from llama_stack.apis.inference import (
OpenAIAssistantMessageParam,
OpenAIChatCompletion,
OpenAIChatCompletionChunk,
OpenAIChatCompletionRequest,
OpenAIChatCompletionRequestWithExtraBody,
OpenAIChatCompletionToolCall,
OpenAIChoice,
OpenAIMessageParam,
@ -169,7 +169,7 @@ class StreamingResponseOrchestrator:
# (some providers don't support non-empty response_format when tools are present)
response_format = None if self.ctx.response_format.type == "text" else self.ctx.response_format
logger.debug(f"calling openai_chat_completion with tools: {self.ctx.chat_tools}")
params = OpenAIChatCompletionRequest(
params = OpenAIChatCompletionRequestWithExtraBody(
model=self.ctx.model,
messages=messages,
tools=self.ctx.chat_tools,

View file

@ -22,8 +22,8 @@ from llama_stack.apis.files import Files, OpenAIFilePurpose
from llama_stack.apis.inference import (
Inference,
OpenAIAssistantMessageParam,
OpenAIChatCompletionRequest,
OpenAICompletionRequest,
OpenAIChatCompletionRequestWithExtraBody,
OpenAICompletionRequestWithExtraBody,
OpenAIDeveloperMessageParam,
OpenAIMessageParam,
OpenAISystemMessageParam,
@ -608,7 +608,7 @@ class ReferenceBatchesImpl(Batches):
# TODO(SECURITY): review body for security issues
if request.url == "/v1/chat/completions":
request.body["messages"] = [convert_to_openai_message_param(msg) for msg in request.body["messages"]]
chat_params = OpenAIChatCompletionRequest(**request.body)
chat_params = OpenAIChatCompletionRequestWithExtraBody(**request.body)
chat_response = await self.inference_api.openai_chat_completion(chat_params)
# this is for mypy, we don't allow streaming so we'll get the right type
@ -623,7 +623,7 @@ class ReferenceBatchesImpl(Batches):
},
}
elif request.url == "/v1/completions":
completion_params = OpenAICompletionRequest(**request.body)
completion_params = OpenAICompletionRequestWithExtraBody(**request.body)
completion_response = await self.inference_api.openai_completion(completion_params)
# this is for mypy, we don't allow streaming so we'll get the right type

View file

@ -14,8 +14,8 @@ from llama_stack.apis.datasetio import DatasetIO
from llama_stack.apis.datasets import Datasets
from llama_stack.apis.inference import (
Inference,
OpenAIChatCompletionRequest,
OpenAICompletionRequest,
OpenAIChatCompletionRequestWithExtraBody,
OpenAICompletionRequestWithExtraBody,
OpenAISystemMessageParam,
OpenAIUserMessageParam,
UserMessage,
@ -175,7 +175,7 @@ class MetaReferenceEvalImpl(
sampling_params["stop"] = candidate.sampling_params.stop
input_content = json.loads(x[ColumnName.completion_input.value])
params = OpenAICompletionRequest(
params = OpenAICompletionRequestWithExtraBody(
model=candidate.model,
prompt=input_content,
**sampling_params,
@ -195,7 +195,7 @@ class MetaReferenceEvalImpl(
messages += [OpenAISystemMessageParam(**x) for x in chat_completion_input_json if x["role"] == "system"]
messages += input_messages
params = OpenAIChatCompletionRequest(
params = OpenAIChatCompletionRequestWithExtraBody(
model=candidate.model,
messages=messages,
**sampling_params,

View file

@ -9,8 +9,8 @@ from collections.abc import AsyncIterator
from llama_stack.apis.inference import (
InferenceProvider,
OpenAIChatCompletionRequest,
OpenAICompletionRequest,
OpenAIChatCompletionRequestWithExtraBody,
OpenAICompletionRequestWithExtraBody,
)
from llama_stack.apis.inference.inference import (
OpenAIChatCompletion,
@ -67,7 +67,7 @@ class MetaReferenceInferenceImpl(
async def openai_completion(
self,
params: OpenAICompletionRequest,
params: OpenAICompletionRequestWithExtraBody,
) -> OpenAICompletion:
raise NotImplementedError("OpenAI completion not supported by meta reference provider")
@ -153,6 +153,6 @@ class MetaReferenceInferenceImpl(
async def openai_chat_completion(
self,
params: OpenAIChatCompletionRequest,
params: OpenAIChatCompletionRequestWithExtraBody,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
raise NotImplementedError("OpenAI chat completion not supported by meta-reference inference provider")

View file

@ -8,8 +8,8 @@ from collections.abc import AsyncIterator
from llama_stack.apis.inference import (
InferenceProvider,
OpenAIChatCompletionRequest,
OpenAICompletionRequest,
OpenAIChatCompletionRequestWithExtraBody,
OpenAICompletionRequestWithExtraBody,
)
from llama_stack.apis.inference.inference import (
OpenAIChatCompletion,
@ -72,12 +72,12 @@ class SentenceTransformersInferenceImpl(
async def openai_completion(
self,
params: OpenAICompletionRequest,
params: OpenAICompletionRequestWithExtraBody,
) -> OpenAICompletion:
raise NotImplementedError("OpenAI completion not supported by sentence transformers provider")
async def openai_chat_completion(
self,
params: OpenAIChatCompletionRequest,
params: OpenAIChatCompletionRequestWithExtraBody,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
raise NotImplementedError("OpenAI chat completion not supported by sentence transformers provider")

View file

@ -13,7 +13,7 @@ from llama_stack.apis.common.content_types import ImageContentItem, TextContentI
from llama_stack.apis.inference import (
Inference,
Message,
OpenAIChatCompletionRequest,
OpenAIChatCompletionRequestWithExtraBody,
OpenAIUserMessageParam,
UserMessage,
)
@ -296,7 +296,7 @@ class LlamaGuardShield:
else:
shield_input_message = self.build_text_shield_input(messages)
params = OpenAIChatCompletionRequest(
params = OpenAIChatCompletionRequestWithExtraBody(
model=self.model,
messages=[shield_input_message],
stream=False,
@ -384,7 +384,7 @@ class LlamaGuardShield:
# TODO: Add Image based support for OpenAI Moderations
shield_input_message = self.build_text_shield_input(messages)
params = OpenAIChatCompletionRequest(
params = OpenAIChatCompletionRequestWithExtraBody(
model=self.model,
messages=[shield_input_message],
stream=False,

View file

@ -6,7 +6,7 @@
import re
from typing import Any
from llama_stack.apis.inference import Inference, OpenAIChatCompletionRequest
from llama_stack.apis.inference import Inference, OpenAIChatCompletionRequestWithExtraBody
from llama_stack.apis.scoring import ScoringResultRow
from llama_stack.apis.scoring_functions import ScoringFnParams
from llama_stack.providers.utils.scoring.base_scoring_fn import RegisteredBaseScoringFn
@ -55,7 +55,7 @@ class LlmAsJudgeScoringFn(RegisteredBaseScoringFn):
generated_answer=generated_answer,
)
params = OpenAIChatCompletionRequest(
params = OpenAIChatCompletionRequestWithExtraBody(
model=fn_def.params.judge_model,
messages=[
{

View file

@ -8,7 +8,7 @@
from jinja2 import Template
from llama_stack.apis.common.content_types import InterleavedContent
from llama_stack.apis.inference import OpenAIChatCompletionRequest, OpenAIUserMessageParam
from llama_stack.apis.inference import OpenAIChatCompletionRequestWithExtraBody, OpenAIUserMessageParam
from llama_stack.apis.tools.rag_tool import (
DefaultRAGQueryGeneratorConfig,
LLMRAGQueryGeneratorConfig,
@ -65,7 +65,7 @@ async def llm_rag_query_generator(
model = config.model
message = OpenAIUserMessageParam(content=rendered_content)
params = OpenAIChatCompletionRequest(
params = OpenAIChatCompletionRequestWithExtraBody(
model=model,
messages=[message],
stream=False,

View file

@ -12,8 +12,8 @@ from botocore.client import BaseClient
from llama_stack.apis.inference import (
ChatCompletionRequest,
Inference,
OpenAIChatCompletionRequest,
OpenAICompletionRequest,
OpenAIChatCompletionRequestWithExtraBody,
OpenAICompletionRequestWithExtraBody,
OpenAIEmbeddingsResponse,
)
from llama_stack.apis.inference.inference import (
@ -134,12 +134,12 @@ class BedrockInferenceAdapter(
async def openai_completion(
self,
params: OpenAICompletionRequest,
params: OpenAICompletionRequestWithExtraBody,
) -> OpenAICompletion:
raise NotImplementedError("OpenAI completion not supported by the Bedrock provider")
async def openai_chat_completion(
self,
params: OpenAIChatCompletionRequest,
params: OpenAIChatCompletionRequestWithExtraBody,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
raise NotImplementedError("OpenAI chat completion not supported by the Bedrock provider")

View file

@ -8,7 +8,7 @@ from collections.abc import Iterable
from databricks.sdk import WorkspaceClient
from llama_stack.apis.inference import OpenAICompletion, OpenAICompletionRequest
from llama_stack.apis.inference import OpenAICompletion, OpenAICompletionRequestWithExtraBody
from llama_stack.log import get_logger
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
@ -39,6 +39,6 @@ class DatabricksInferenceAdapter(OpenAIMixin):
async def openai_completion(
self,
params: OpenAICompletionRequest,
params: OpenAICompletionRequestWithExtraBody,
) -> OpenAICompletion:
raise NotImplementedError()

View file

@ -3,7 +3,12 @@
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from llama_stack.apis.inference.inference import OpenAICompletion, OpenAICompletionRequest, OpenAIEmbeddingsResponse
from llama_stack.apis.inference.inference import (
OpenAICompletion,
OpenAICompletionRequestWithExtraBody,
OpenAIEmbeddingsResponse,
)
from llama_stack.log import get_logger
from llama_stack.providers.remote.inference.llama_openai_compat.config import LlamaCompatConfig
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
@ -29,7 +34,7 @@ class LlamaCompatInferenceAdapter(OpenAIMixin):
async def openai_completion(
self,
params: OpenAICompletionRequest,
params: OpenAICompletionRequestWithExtraBody,
) -> OpenAICompletion:
raise NotImplementedError()

View file

@ -13,9 +13,9 @@ from llama_stack.apis.inference import (
Inference,
OpenAIChatCompletion,
OpenAIChatCompletionChunk,
OpenAIChatCompletionRequest,
OpenAIChatCompletionRequestWithExtraBody,
OpenAICompletion,
OpenAICompletionRequest,
OpenAICompletionRequestWithExtraBody,
OpenAIEmbeddingsResponse,
)
from llama_stack.apis.models import Model
@ -79,7 +79,7 @@ class PassthroughInferenceAdapter(Inference):
async def openai_completion(
self,
params: OpenAICompletionRequest,
params: OpenAICompletionRequestWithExtraBody,
) -> OpenAICompletion:
client = self._get_client()
model_obj = await self.model_store.get_model(params.model)
@ -93,7 +93,7 @@ class PassthroughInferenceAdapter(Inference):
async def openai_chat_completion(
self,
params: OpenAIChatCompletionRequest,
params: OpenAIChatCompletionRequestWithExtraBody,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
client = self._get_client()
model_obj = await self.model_store.get_model(params.model)

View file

@ -9,7 +9,7 @@ from collections.abc import AsyncIterator
from llama_stack.apis.inference import (
OpenAIChatCompletion,
OpenAIChatCompletionChunk,
OpenAIChatCompletionRequest,
OpenAIChatCompletionRequestWithExtraBody,
)
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
@ -31,7 +31,7 @@ class RunpodInferenceAdapter(OpenAIMixin):
async def openai_chat_completion(
self,
params: OpenAIChatCompletionRequest,
params: OpenAIChatCompletionRequestWithExtraBody,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
"""Override to add RunPod-specific stream_options requirement."""
params = params.model_copy()

View file

@ -14,7 +14,7 @@ from pydantic import ConfigDict
from llama_stack.apis.inference import (
OpenAIChatCompletion,
OpenAIChatCompletionRequest,
OpenAIChatCompletionRequestWithExtraBody,
ToolChoice,
)
from llama_stack.log import get_logger
@ -93,7 +93,7 @@ class VLLMInferenceAdapter(OpenAIMixin):
async def openai_chat_completion(
self,
params: OpenAIChatCompletionRequest,
params: OpenAIChatCompletionRequestWithExtraBody,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
params = params.model_copy()

View file

@ -16,9 +16,9 @@ from llama_stack.apis.inference import (
JsonSchemaResponseFormat,
OpenAIChatCompletion,
OpenAIChatCompletionChunk,
OpenAIChatCompletionRequest,
OpenAIChatCompletionRequestWithExtraBody,
OpenAICompletion,
OpenAICompletionRequest,
OpenAICompletionRequestWithExtraBody,
OpenAIEmbeddingData,
OpenAIEmbeddingsResponse,
OpenAIEmbeddingUsage,
@ -226,7 +226,7 @@ class LiteLLMOpenAIMixin(
async def openai_completion(
self,
params: OpenAICompletionRequest,
params: OpenAICompletionRequestWithExtraBody,
) -> OpenAICompletion:
model_obj = await self.model_store.get_model(params.model)
@ -248,8 +248,6 @@ class LiteLLMOpenAIMixin(
temperature=params.temperature,
top_p=params.top_p,
user=params.user,
guided_choice=params.guided_choice,
prompt_logprobs=params.prompt_logprobs,
suffix=params.suffix,
api_key=self.get_api_key(),
api_base=self.api_base,
@ -258,7 +256,7 @@ class LiteLLMOpenAIMixin(
async def openai_chat_completion(
self,
params: OpenAIChatCompletionRequest,
params: OpenAIChatCompletionRequestWithExtraBody,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
# Add usage tracking for streaming when telemetry is active
from llama_stack.providers.utils.telemetry.tracing import get_current_span

View file

@ -17,9 +17,9 @@ from llama_stack.apis.inference import (
Model,
OpenAIChatCompletion,
OpenAIChatCompletionChunk,
OpenAIChatCompletionRequest,
OpenAIChatCompletionRequestWithExtraBody,
OpenAICompletion,
OpenAICompletionRequest,
OpenAICompletionRequestWithExtraBody,
OpenAIEmbeddingData,
OpenAIEmbeddingsResponse,
OpenAIEmbeddingUsage,
@ -223,21 +223,11 @@ class OpenAIMixin(NeedsRequestProviderData, ABC, BaseModel):
async def openai_completion(
self,
params: OpenAICompletionRequest,
params: OpenAICompletionRequestWithExtraBody,
) -> OpenAICompletion:
"""
Direct OpenAI completion API call.
"""
# Handle parameters that are not supported by OpenAI API, but may be by the provider
# prompt_logprobs is supported by vLLM
# guided_choice is supported by vLLM
# TODO: test coverage
extra_body: dict[str, Any] = {}
if params.prompt_logprobs is not None and params.prompt_logprobs >= 0:
extra_body["prompt_logprobs"] = params.prompt_logprobs
if params.guided_choice:
extra_body["guided_choice"] = params.guided_choice
# TODO: fix openai_completion to return type compatible with OpenAI's API response
completion_kwargs = await prepare_openai_completion_params(
model=await self._get_provider_model_id(params.model),
@ -259,13 +249,15 @@ class OpenAIMixin(NeedsRequestProviderData, ABC, BaseModel):
user=params.user,
suffix=params.suffix,
)
resp = await self.client.completions.create(**completion_kwargs, extra_body=extra_body)
if extra_body := params.model_extra:
completion_kwargs["extra_body"] = extra_body
resp = await self.client.completions.create(**completion_kwargs)
return await self._maybe_overwrite_id(resp, params.stream) # type: ignore[no-any-return]
async def openai_chat_completion(
self,
params: OpenAIChatCompletionRequest,
params: OpenAIChatCompletionRequestWithExtraBody,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
"""
Direct OpenAI chat completion API call.
@ -316,6 +308,8 @@ class OpenAIMixin(NeedsRequestProviderData, ABC, BaseModel):
user=params.user,
)
if extra_body := params.model_extra:
request_params["extra_body"] = extra_body
resp = await self.client.chat.completions.create(**request_params)
return await self._maybe_overwrite_id(resp, params.stream) # type: ignore[no-any-return]