mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-12 21:58:38 +00:00
featu: support passing "extra body" throught to providers
# What does this PR do? Allows passing through extra_body parameters to inference providers. closes #2720 ## Test Plan CI and added new test
This commit is contained in:
parent
cb7fb0705b
commit
dbaaeea255
21 changed files with 1799 additions and 96 deletions
|
@ -1058,8 +1058,6 @@ class OpenAICompletionRequest(BaseModel):
|
|||
:param top_p: (Optional) The top p to use.
|
||||
:param user: (Optional) The user to use.
|
||||
:param suffix: (Optional) The suffix that should be appended to the completion.
|
||||
:param guided_choice: (Optional) vLLM-specific parameter for guided generation with a list of choices.
|
||||
:param prompt_logprobs: (Optional) vLLM-specific parameter for number of log probabilities to return for prompt tokens.
|
||||
"""
|
||||
|
||||
model_config = ConfigDict(extra="allow")
|
||||
|
@ -1082,12 +1080,6 @@ class OpenAICompletionRequest(BaseModel):
|
|||
temperature: float | None = None
|
||||
top_p: float | None = None
|
||||
user: str | None = None
|
||||
|
||||
# vLLM-specific parameters (documented here but also allowed via extra fields)
|
||||
guided_choice: list[str] | None = None
|
||||
prompt_logprobs: int | None = None
|
||||
|
||||
# for fill-in-the-middle type completion
|
||||
suffix: str | None = None
|
||||
|
||||
|
||||
|
|
|
@ -194,12 +194,13 @@ class InferenceRouter(Inference):
|
|||
params.model = model_obj.identifier
|
||||
|
||||
provider = await self.routing_table.get_provider_impl(model_obj.identifier)
|
||||
extra_body = dict(params.__pydantic_extra__ or {})
|
||||
if params.stream:
|
||||
return await provider.openai_completion(params)
|
||||
return await provider.openai_completion(params, **extra_body)
|
||||
# TODO: Metrics do NOT work with openai_completion stream=True due to the fact
|
||||
# that we do not return an AsyncIterator, our tests expect a stream of chunks we cannot intercept currently.
|
||||
|
||||
response = await provider.openai_completion(params)
|
||||
response = await provider.openai_completion(params, **extra_body)
|
||||
if self.telemetry:
|
||||
metrics = self._construct_metrics(
|
||||
prompt_tokens=response.usage.prompt_tokens,
|
||||
|
@ -246,7 +247,8 @@ class InferenceRouter(Inference):
|
|||
|
||||
provider = await self.routing_table.get_provider_impl(model_obj.identifier)
|
||||
if params.stream:
|
||||
response_stream = await provider.openai_chat_completion(params)
|
||||
extra_body = dict(params.__pydantic_extra__ or {})
|
||||
response_stream = await provider.openai_chat_completion(params, **extra_body)
|
||||
|
||||
# For streaming, the provider returns AsyncIterator[OpenAIChatCompletionChunk]
|
||||
# We need to add metrics to each chunk and store the final completion
|
||||
|
@ -319,7 +321,8 @@ class InferenceRouter(Inference):
|
|||
async def _nonstream_openai_chat_completion(
|
||||
self, provider: Inference, params: OpenAIChatCompletionRequest
|
||||
) -> OpenAIChatCompletion:
|
||||
response = await provider.openai_chat_completion(params)
|
||||
extra_body = dict(params.__pydantic_extra__ or {})
|
||||
response = await provider.openai_chat_completion(params, **extra_body)
|
||||
for choice in response.choices:
|
||||
# some providers return an empty list for no tool calls in non-streaming responses
|
||||
# but the OpenAI API returns None. So, set tool_calls to None if it's empty
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
# the root directory of this source tree.
|
||||
|
||||
from collections.abc import Iterable
|
||||
from typing import Any
|
||||
|
||||
from databricks.sdk import WorkspaceClient
|
||||
|
||||
|
@ -40,5 +41,6 @@ class DatabricksInferenceAdapter(OpenAIMixin):
|
|||
async def openai_completion(
|
||||
self,
|
||||
params: OpenAICompletionRequest,
|
||||
**kwargs: Any,
|
||||
) -> OpenAICompletion:
|
||||
raise NotImplementedError()
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
from typing import Any
|
||||
|
||||
from llama_stack.apis.inference.inference import OpenAICompletion, OpenAICompletionRequest, OpenAIEmbeddingsResponse
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.remote.inference.llama_openai_compat.config import LlamaCompatConfig
|
||||
|
@ -30,6 +32,7 @@ class LlamaCompatInferenceAdapter(OpenAIMixin):
|
|||
async def openai_completion(
|
||||
self,
|
||||
params: OpenAICompletionRequest,
|
||||
**kwargs: Any,
|
||||
) -> OpenAICompletion:
|
||||
raise NotImplementedError()
|
||||
|
||||
|
@ -40,5 +43,6 @@ class LlamaCompatInferenceAdapter(OpenAIMixin):
|
|||
encoding_format: str | None = "float",
|
||||
dimensions: int | None = None,
|
||||
user: str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> OpenAIEmbeddingsResponse:
|
||||
raise NotImplementedError()
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
from collections.abc import AsyncIterator
|
||||
from typing import Any
|
||||
from urllib.parse import urljoin
|
||||
|
||||
import httpx
|
||||
|
@ -94,6 +95,7 @@ class VLLMInferenceAdapter(OpenAIMixin):
|
|||
async def openai_chat_completion(
|
||||
self,
|
||||
params: OpenAIChatCompletionRequest,
|
||||
**kwargs: Any,
|
||||
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
|
||||
params = params.model_copy()
|
||||
|
||||
|
@ -108,4 +110,4 @@ class VLLMInferenceAdapter(OpenAIMixin):
|
|||
if not params.tools and params.tool_choice is not None:
|
||||
params.tool_choice = ToolChoice.none.value
|
||||
|
||||
return await super().openai_chat_completion(params)
|
||||
return await super().openai_chat_completion(params, **kwargs)
|
||||
|
|
|
@ -248,8 +248,6 @@ class LiteLLMOpenAIMixin(
|
|||
temperature=params.temperature,
|
||||
top_p=params.top_p,
|
||||
user=params.user,
|
||||
guided_choice=params.guided_choice,
|
||||
prompt_logprobs=params.prompt_logprobs,
|
||||
suffix=params.suffix,
|
||||
api_key=self.get_api_key(),
|
||||
api_base=self.api_base,
|
||||
|
|
|
@ -224,20 +224,11 @@ class OpenAIMixin(NeedsRequestProviderData, ABC, BaseModel):
|
|||
async def openai_completion(
|
||||
self,
|
||||
params: OpenAICompletionRequest,
|
||||
**kwargs: Any,
|
||||
) -> OpenAICompletion:
|
||||
"""
|
||||
Direct OpenAI completion API call.
|
||||
"""
|
||||
# Handle parameters that are not supported by OpenAI API, but may be by the provider
|
||||
# prompt_logprobs is supported by vLLM
|
||||
# guided_choice is supported by vLLM
|
||||
# TODO: test coverage
|
||||
extra_body: dict[str, Any] = {}
|
||||
if params.prompt_logprobs is not None and params.prompt_logprobs >= 0:
|
||||
extra_body["prompt_logprobs"] = params.prompt_logprobs
|
||||
if params.guided_choice:
|
||||
extra_body["guided_choice"] = params.guided_choice
|
||||
|
||||
# TODO: fix openai_completion to return type compatible with OpenAI's API response
|
||||
completion_kwargs = await prepare_openai_completion_params(
|
||||
model=await self._get_provider_model_id(params.model),
|
||||
|
@ -259,13 +250,16 @@ class OpenAIMixin(NeedsRequestProviderData, ABC, BaseModel):
|
|||
user=params.user,
|
||||
suffix=params.suffix,
|
||||
)
|
||||
resp = await self.client.completions.create(**completion_kwargs, extra_body=extra_body)
|
||||
if extra_body := kwargs:
|
||||
completion_kwargs["extra_body"] = extra_body
|
||||
resp = await self.client.completions.create(**completion_kwargs)
|
||||
|
||||
return await self._maybe_overwrite_id(resp, params.stream) # type: ignore[no-any-return]
|
||||
|
||||
async def openai_chat_completion(
|
||||
self,
|
||||
params: OpenAIChatCompletionRequest,
|
||||
**kwargs: Any,
|
||||
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
|
||||
"""
|
||||
Direct OpenAI chat completion API call.
|
||||
|
@ -316,6 +310,8 @@ class OpenAIMixin(NeedsRequestProviderData, ABC, BaseModel):
|
|||
user=params.user,
|
||||
)
|
||||
|
||||
if extra_body := kwargs:
|
||||
request_params["extra_body"] = extra_body
|
||||
resp = await self.client.chat.completions.create(**request_params)
|
||||
|
||||
return await self._maybe_overwrite_id(resp, params.stream) # type: ignore[no-any-return]
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue