Merge 1e891489a8 into sapling-pr-archive-ehhuang

This commit is contained in:
ehhuang 2025-10-08 13:23:35 -07:00 committed by GitHub
commit 5025e02d81
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
13 changed files with 170 additions and 4 deletions

View file

@ -7372,12 +7372,36 @@
"user": { "user": {
"type": "string", "type": "string",
"description": "(Optional) The user to use." "description": "(Optional) The user to use."
},
"kwargs": {
"oneOf": [
{
"type": "null"
},
{
"type": "boolean"
},
{
"type": "number"
},
{
"type": "string"
},
{
"type": "array"
},
{
"type": "object"
}
],
"description": "(Optional) Additional provider-specific parameters to pass through as extra_body (e.g., chat_template_kwargs for vLLM)."
} }
}, },
"additionalProperties": false, "additionalProperties": false,
"required": [ "required": [
"model", "model",
"messages" "messages",
"kwargs"
], ],
"title": "OpenaiChatCompletionRequest" "title": "OpenaiChatCompletionRequest"
}, },

View file

@ -5434,10 +5434,22 @@ components:
user: user:
type: string type: string
description: (Optional) The user to use. description: (Optional) The user to use.
kwargs:
oneOf:
- type: 'null'
- type: boolean
- type: number
- type: string
- type: array
- type: object
description: >-
(Optional) Additional provider-specific parameters to pass through as
extra_body (e.g., chat_template_kwargs for vLLM).
additionalProperties: false additionalProperties: false
required: required:
- model - model
- messages - messages
- kwargs
title: OpenaiChatCompletionRequest title: OpenaiChatCompletionRequest
OpenAIChatCompletion: OpenAIChatCompletion:
type: object type: object

View file

@ -5167,12 +5167,36 @@
"user": { "user": {
"type": "string", "type": "string",
"description": "(Optional) The user to use." "description": "(Optional) The user to use."
},
"kwargs": {
"oneOf": [
{
"type": "null"
},
{
"type": "boolean"
},
{
"type": "number"
},
{
"type": "string"
},
{
"type": "array"
},
{
"type": "object"
}
],
"description": "(Optional) Additional provider-specific parameters to pass through as extra_body (e.g., chat_template_kwargs for vLLM)."
} }
}, },
"additionalProperties": false, "additionalProperties": false,
"required": [ "required": [
"model", "model",
"messages" "messages",
"kwargs"
], ],
"title": "OpenaiChatCompletionRequest" "title": "OpenaiChatCompletionRequest"
}, },

View file

@ -3888,10 +3888,22 @@ components:
user: user:
type: string type: string
description: (Optional) The user to use. description: (Optional) The user to use.
kwargs:
oneOf:
- type: 'null'
- type: boolean
- type: number
- type: string
- type: array
- type: object
description: >-
(Optional) Additional provider-specific parameters to pass through as
extra_body (e.g., chat_template_kwargs for vLLM).
additionalProperties: false additionalProperties: false
required: required:
- model - model
- messages - messages
- kwargs
title: OpenaiChatCompletionRequest title: OpenaiChatCompletionRequest
OpenAIChatCompletion: OpenAIChatCompletion:
type: object type: object

View file

@ -6839,12 +6839,36 @@
"user": { "user": {
"type": "string", "type": "string",
"description": "(Optional) The user to use." "description": "(Optional) The user to use."
},
"kwargs": {
"oneOf": [
{
"type": "null"
},
{
"type": "boolean"
},
{
"type": "number"
},
{
"type": "string"
},
{
"type": "array"
},
{
"type": "object"
}
],
"description": "(Optional) Additional provider-specific parameters to pass through as extra_body (e.g., chat_template_kwargs for vLLM)."
} }
}, },
"additionalProperties": false, "additionalProperties": false,
"required": [ "required": [
"model", "model",
"messages" "messages",
"kwargs"
], ],
"title": "OpenaiChatCompletionRequest" "title": "OpenaiChatCompletionRequest"
}, },

View file

@ -5101,10 +5101,22 @@ components:
user: user:
type: string type: string
description: (Optional) The user to use. description: (Optional) The user to use.
kwargs:
oneOf:
- type: 'null'
- type: boolean
- type: number
- type: string
- type: array
- type: object
description: >-
(Optional) Additional provider-specific parameters to pass through as
extra_body (e.g., chat_template_kwargs for vLLM).
additionalProperties: false additionalProperties: false
required: required:
- model - model
- messages - messages
- kwargs
title: OpenaiChatCompletionRequest title: OpenaiChatCompletionRequest
OpenAIChatCompletion: OpenAIChatCompletion:
type: object type: object

View file

@ -1106,6 +1106,7 @@ class InferenceProvider(Protocol):
top_logprobs: int | None = None, top_logprobs: int | None = None,
top_p: float | None = None, top_p: float | None = None,
user: str | None = None, user: str | None = None,
**kwargs: Any,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]: ) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
"""Create chat completions. """Create chat completions.
@ -1134,6 +1135,7 @@ class InferenceProvider(Protocol):
:param top_logprobs: (Optional) The top log probabilities to use. :param top_logprobs: (Optional) The top log probabilities to use.
:param top_p: (Optional) The top p to use. :param top_p: (Optional) The top p to use.
:param user: (Optional) The user to use. :param user: (Optional) The user to use.
:param kwargs: (Optional) Additional provider-specific parameters to pass through as extra_body (e.g., chat_template_kwargs for vLLM).
:returns: An OpenAIChatCompletion. :returns: An OpenAIChatCompletion.
""" """
... ...

View file

@ -277,6 +277,7 @@ class InferenceRouter(Inference):
top_logprobs: int | None = None, top_logprobs: int | None = None,
top_p: float | None = None, top_p: float | None = None,
user: str | None = None, user: str | None = None,
**kwargs: Any,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]: ) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
logger.debug( logger.debug(
f"InferenceRouter.openai_chat_completion: {model=}, {stream=}, {messages=}", f"InferenceRouter.openai_chat_completion: {model=}, {stream=}, {messages=}",
@ -323,6 +324,7 @@ class InferenceRouter(Inference):
top_logprobs=top_logprobs, top_logprobs=top_logprobs,
top_p=top_p, top_p=top_p,
user=user, user=user,
**kwargs,
) )
provider = await self.routing_table.get_provider_impl(model_obj.identifier) provider = await self.routing_table.get_provider_impl(model_obj.identifier)
if stream: if stream:

View file

@ -153,6 +153,7 @@ class PassthroughInferenceAdapter(Inference):
top_logprobs: int | None = None, top_logprobs: int | None = None,
top_p: float | None = None, top_p: float | None = None,
user: str | None = None, user: str | None = None,
**kwargs: Any,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]: ) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
client = self._get_client() client = self._get_client()
model_obj = await self.model_store.get_model(model) model_obj = await self.model_store.get_model(model)
@ -181,6 +182,7 @@ class PassthroughInferenceAdapter(Inference):
top_logprobs=top_logprobs, top_logprobs=top_logprobs,
top_p=top_p, top_p=top_p,
user=user, user=user,
**kwargs,
) )
return await client.inference.openai_chat_completion(**params) return await client.inference.openai_chat_completion(**params)

View file

@ -57,6 +57,7 @@ class RunpodInferenceAdapter(OpenAIMixin):
top_logprobs: int | None = None, top_logprobs: int | None = None,
top_p: float | None = None, top_p: float | None = None,
user: str | None = None, user: str | None = None,
**kwargs: Any,
): ):
"""Override to add RunPod-specific stream_options requirement.""" """Override to add RunPod-specific stream_options requirement."""
if stream and not stream_options: if stream and not stream_options:
@ -86,4 +87,5 @@ class RunpodInferenceAdapter(OpenAIMixin):
top_logprobs=top_logprobs, top_logprobs=top_logprobs,
top_p=top_p, top_p=top_p,
user=user, user=user,
**kwargs,
) )

View file

@ -102,6 +102,7 @@ class VLLMInferenceAdapter(OpenAIMixin):
top_logprobs: int | None = None, top_logprobs: int | None = None,
top_p: float | None = None, top_p: float | None = None,
user: str | None = None, user: str | None = None,
**kwargs: Any,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]: ) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
max_tokens = max_tokens or self.config.max_tokens max_tokens = max_tokens or self.config.max_tokens
@ -136,4 +137,5 @@ class VLLMInferenceAdapter(OpenAIMixin):
top_logprobs=top_logprobs, top_logprobs=top_logprobs,
top_p=top_p, top_p=top_p,
user=user, user=user,
**kwargs,
) )

View file

@ -313,6 +313,7 @@ class OpenAIMixin(NeedsRequestProviderData, ABC, BaseModel):
top_logprobs: int | None = None, top_logprobs: int | None = None,
top_p: float | None = None, top_p: float | None = None,
user: str | None = None, user: str | None = None,
**kwargs: Any,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]: ) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
""" """
Direct OpenAI chat completion API call. Direct OpenAI chat completion API call.
@ -361,7 +362,10 @@ class OpenAIMixin(NeedsRequestProviderData, ABC, BaseModel):
user=user, user=user,
) )
resp = await self.client.chat.completions.create(**params) # Pass any additional provider-specific parameters as extra_body
extra_body = kwargs if kwargs else {}
resp = await self.client.chat.completions.create(**params, extra_body=extra_body)
return await self._maybe_overwrite_id(resp, stream) # type: ignore[no-any-return] return await self._maybe_overwrite_id(resp, stream) # type: ignore[no-any-return]

View file

@ -186,3 +186,47 @@ async def test_openai_chat_completion_is_async(vllm_inference_adapter):
assert mock_create_client.call_count == 4 # no cheating assert mock_create_client.call_count == 4 # no cheating
assert total_time < (sleep_time * 2), f"Total time taken: {total_time}s exceeded expected max" assert total_time < (sleep_time * 2), f"Total time taken: {total_time}s exceeded expected max"
async def test_extra_body_forwarding(vllm_inference_adapter):
"""
Test that extra_body parameters (e.g., chat_template_kwargs) are correctly
forwarded to the underlying OpenAI client.
"""
mock_model = Model(identifier="mock-model", provider_resource_id="mock-model", provider_id="vllm-inference")
vllm_inference_adapter.model_store.get_model.return_value = mock_model
with patch.object(VLLMInferenceAdapter, "client", new_callable=PropertyMock) as mock_client_property:
mock_client = MagicMock()
mock_client.chat.completions.create = AsyncMock(
return_value=OpenAIChatCompletion(
id="chatcmpl-abc123",
created=1,
model="mock-model",
choices=[
OpenAIChoice(
message=OpenAIAssistantMessageParam(
content="test response",
),
finish_reason="stop",
index=0,
)
],
)
)
mock_client_property.return_value = mock_client
# Test with chat_template_kwargs for Granite thinking mode
await vllm_inference_adapter.openai_chat_completion(
"mock-model",
messages=[],
stream=False,
chat_template_kwargs={"thinking": True},
)
# Verify that the client was called with extra_body containing chat_template_kwargs
mock_client.chat.completions.create.assert_called_once()
call_kwargs = mock_client.chat.completions.create.call_args.kwargs
assert "extra_body" in call_kwargs
assert "chat_template_kwargs" in call_kwargs["extra_body"]
assert call_kwargs["extra_body"]["chat_template_kwargs"] == {"thinking": True}