featu: support passing "extra body" throught to providers

# What does this PR do?
Allows passing through extra_body parameters to inference providers.


closes #2720

## Test Plan
CI and added new test
This commit is contained in:
Eric Huang 2025-10-10 16:00:30 -07:00
parent 80d58ab519
commit c4dbaa9d4c
41 changed files with 3145 additions and 200 deletions

View file

@ -12,8 +12,8 @@ from botocore.client import BaseClient
from llama_stack.apis.inference import (
ChatCompletionRequest,
Inference,
OpenAIChatCompletionRequest,
OpenAICompletionRequest,
OpenAIChatCompletionRequestWithExtraBody,
OpenAICompletionRequestWithExtraBody,
OpenAIEmbeddingsResponse,
)
from llama_stack.apis.inference.inference import (
@ -134,12 +134,12 @@ class BedrockInferenceAdapter(
async def openai_completion(
self,
params: OpenAICompletionRequest,
params: OpenAICompletionRequestWithExtraBody,
) -> OpenAICompletion:
raise NotImplementedError("OpenAI completion not supported by the Bedrock provider")
async def openai_chat_completion(
self,
params: OpenAIChatCompletionRequest,
params: OpenAIChatCompletionRequestWithExtraBody,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
raise NotImplementedError("OpenAI chat completion not supported by the Bedrock provider")

View file

@ -8,7 +8,7 @@ from collections.abc import Iterable
from databricks.sdk import WorkspaceClient
from llama_stack.apis.inference import OpenAICompletion, OpenAICompletionRequest
from llama_stack.apis.inference import OpenAICompletion, OpenAICompletionRequestWithExtraBody
from llama_stack.log import get_logger
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
@ -39,6 +39,6 @@ class DatabricksInferenceAdapter(OpenAIMixin):
async def openai_completion(
self,
params: OpenAICompletionRequest,
params: OpenAICompletionRequestWithExtraBody,
) -> OpenAICompletion:
raise NotImplementedError()

View file

@ -3,7 +3,12 @@
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from llama_stack.apis.inference.inference import OpenAICompletion, OpenAICompletionRequest, OpenAIEmbeddingsResponse
from llama_stack.apis.inference.inference import (
OpenAICompletion,
OpenAICompletionRequestWithExtraBody,
OpenAIEmbeddingsResponse,
)
from llama_stack.log import get_logger
from llama_stack.providers.remote.inference.llama_openai_compat.config import LlamaCompatConfig
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
@ -29,7 +34,7 @@ class LlamaCompatInferenceAdapter(OpenAIMixin):
async def openai_completion(
self,
params: OpenAICompletionRequest,
params: OpenAICompletionRequestWithExtraBody,
) -> OpenAICompletion:
raise NotImplementedError()

View file

@ -13,9 +13,9 @@ from llama_stack.apis.inference import (
Inference,
OpenAIChatCompletion,
OpenAIChatCompletionChunk,
OpenAIChatCompletionRequest,
OpenAIChatCompletionRequestWithExtraBody,
OpenAICompletion,
OpenAICompletionRequest,
OpenAICompletionRequestWithExtraBody,
OpenAIEmbeddingsResponse,
)
from llama_stack.apis.models import Model
@ -79,7 +79,7 @@ class PassthroughInferenceAdapter(Inference):
async def openai_completion(
self,
params: OpenAICompletionRequest,
params: OpenAICompletionRequestWithExtraBody,
) -> OpenAICompletion:
client = self._get_client()
model_obj = await self.model_store.get_model(params.model)
@ -93,7 +93,7 @@ class PassthroughInferenceAdapter(Inference):
async def openai_chat_completion(
self,
params: OpenAIChatCompletionRequest,
params: OpenAIChatCompletionRequestWithExtraBody,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
client = self._get_client()
model_obj = await self.model_store.get_model(params.model)

View file

@ -9,7 +9,7 @@ from collections.abc import AsyncIterator
from llama_stack.apis.inference import (
OpenAIChatCompletion,
OpenAIChatCompletionChunk,
OpenAIChatCompletionRequest,
OpenAIChatCompletionRequestWithExtraBody,
)
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
@ -31,7 +31,7 @@ class RunpodInferenceAdapter(OpenAIMixin):
async def openai_chat_completion(
self,
params: OpenAIChatCompletionRequest,
params: OpenAIChatCompletionRequestWithExtraBody,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
"""Override to add RunPod-specific stream_options requirement."""
params = params.model_copy()

View file

@ -14,7 +14,7 @@ from pydantic import ConfigDict
from llama_stack.apis.inference import (
OpenAIChatCompletion,
OpenAIChatCompletionRequest,
OpenAIChatCompletionRequestWithExtraBody,
ToolChoice,
)
from llama_stack.log import get_logger
@ -93,7 +93,7 @@ class VLLMInferenceAdapter(OpenAIMixin):
async def openai_chat_completion(
self,
params: OpenAIChatCompletionRequest,
params: OpenAIChatCompletionRequestWithExtraBody,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
params = params.model_copy()