featu: support passing "extra body" throught to providers

# What does this PR do?
Allows passing through extra_body parameters to inference providers.


closes #2720

## Test Plan
CI and added new test
This commit is contained in:
Eric Huang 2025-10-10 13:47:36 -07:00
parent 361fcaf2f3
commit c40d8dcbee
3 changed files with 59 additions and 0 deletions

View file

@ -230,6 +230,9 @@ class LiteLLMOpenAIMixin(
) -> OpenAICompletion:
model_obj = await self.model_store.get_model(params.model)
# Extract extra fields
extra_body = dict(params.__pydantic_extra__ or {})
request_params = await prepare_openai_completion_params(
model=self.get_litellm_model_name(model_obj.provider_resource_id),
prompt=params.prompt,
@ -253,6 +256,7 @@ class LiteLLMOpenAIMixin(
suffix=params.suffix,
api_key=self.get_api_key(),
api_base=self.api_base,
**extra_body,
)
return await litellm.atext_completion(**request_params)
@ -272,6 +276,9 @@ class LiteLLMOpenAIMixin(
model_obj = await self.model_store.get_model(params.model)
# Extract extra fields
extra_body = dict(params.__pydantic_extra__ or {})
request_params = await prepare_openai_completion_params(
model=self.get_litellm_model_name(model_obj.provider_resource_id),
messages=params.messages,
@ -298,6 +305,7 @@ class LiteLLMOpenAIMixin(
user=params.user,
api_key=self.get_api_key(),
api_base=self.api_base,
**extra_body,
)
return await litellm.acompletion(**request_params)

View file

@ -228,6 +228,9 @@ class OpenAIMixin(NeedsRequestProviderData, ABC, BaseModel):
"""
Direct OpenAI completion API call.
"""
# Extract extra fields using Pydantic's built-in __pydantic_extra__
extra_body = dict(params.__pydantic_extra__ or {})
# Handle parameters that are not supported by OpenAI API, but may be by the provider
# prompt_logprobs is supported by vLLM
# guided_choice is supported by vLLM
@ -316,6 +319,9 @@ class OpenAIMixin(NeedsRequestProviderData, ABC, BaseModel):
user=params.user,
)
# Extract any additional provider-specific parameters using Pydantic's __pydantic_extra__
if extra_body := dict(params.__pydantic_extra__ or {}):
request_params["extra_body"] = extra_body
resp = await self.client.chat.completions.create(**request_params)
return await self._maybe_overwrite_id(resp, params.stream) # type: ignore[no-any-return]

View file

@ -191,3 +191,48 @@ async def test_openai_chat_completion_is_async(vllm_inference_adapter):
assert mock_create_client.call_count == 4 # no cheating
assert total_time < (sleep_time * 2), f"Total time taken: {total_time}s exceeded expected max"
async def test_extra_body_forwarding(vllm_inference_adapter):
"""
Test that extra_body parameters (e.g., chat_template_kwargs) are correctly
forwarded to the underlying OpenAI client.
"""
mock_model = Model(identifier="mock-model", provider_resource_id="mock-model", provider_id="vllm-inference")
vllm_inference_adapter.model_store.get_model.return_value = mock_model
with patch.object(VLLMInferenceAdapter, "client", new_callable=PropertyMock) as mock_client_property:
mock_client = MagicMock()
mock_client.chat.completions.create = AsyncMock(
return_value=OpenAIChatCompletion(
id="chatcmpl-abc123",
created=1,
model="mock-model",
choices=[
OpenAIChoice(
message=OpenAIAssistantMessageParam(
content="test response",
),
finish_reason="stop",
index=0,
)
],
)
)
mock_client_property.return_value = mock_client
# Test with chat_template_kwargs for Granite thinking mode
params = OpenaiChatCompletionRequest(
model="mock-model",
messages=[{"role": "user", "content": "test"}],
stream=False,
chat_template_kwargs={"thinking": True},
)
await vllm_inference_adapter.openai_chat_completion(params)
# Verify that the client was called with extra_body containing chat_template_kwargs
mock_client.chat.completions.create.assert_called_once()
call_kwargs = mock_client.chat.completions.create.call_args.kwargs
assert "extra_body" in call_kwargs
assert "chat_template_kwargs" in call_kwargs["extra_body"]
assert call_kwargs["extra_body"]["chat_template_kwargs"] == {"thinking": True}