mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-09 13:14:39 +00:00
test
# What does this PR do? ## Test Plan
This commit is contained in:
parent
96886afaca
commit
f229c433fe
17 changed files with 175 additions and 4 deletions
26
docs/static/deprecated-llama-stack-spec.html
vendored
26
docs/static/deprecated-llama-stack-spec.html
vendored
|
@ -7709,12 +7709,36 @@
|
||||||
"user": {
|
"user": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "(Optional) The user to use."
|
"description": "(Optional) The user to use."
|
||||||
|
},
|
||||||
|
"kwargs": {
|
||||||
|
"oneOf": [
|
||||||
|
{
|
||||||
|
"type": "null"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "boolean"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "number"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "array"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "object"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"description": "(Optional) Additional provider-specific parameters to pass through as extra_body (e.g., chat_template_kwargs for vLLM)."
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"additionalProperties": false,
|
"additionalProperties": false,
|
||||||
"required": [
|
"required": [
|
||||||
"model",
|
"model",
|
||||||
"messages"
|
"messages",
|
||||||
|
"kwargs"
|
||||||
],
|
],
|
||||||
"title": "OpenaiChatCompletionRequest"
|
"title": "OpenaiChatCompletionRequest"
|
||||||
},
|
},
|
||||||
|
|
12
docs/static/deprecated-llama-stack-spec.yaml
vendored
12
docs/static/deprecated-llama-stack-spec.yaml
vendored
|
@ -5666,10 +5666,22 @@ components:
|
||||||
user:
|
user:
|
||||||
type: string
|
type: string
|
||||||
description: (Optional) The user to use.
|
description: (Optional) The user to use.
|
||||||
|
kwargs:
|
||||||
|
oneOf:
|
||||||
|
- type: 'null'
|
||||||
|
- type: boolean
|
||||||
|
- type: number
|
||||||
|
- type: string
|
||||||
|
- type: array
|
||||||
|
- type: object
|
||||||
|
description: >-
|
||||||
|
(Optional) Additional provider-specific parameters to pass through as
|
||||||
|
extra_body (e.g., chat_template_kwargs for vLLM).
|
||||||
additionalProperties: false
|
additionalProperties: false
|
||||||
required:
|
required:
|
||||||
- model
|
- model
|
||||||
- messages
|
- messages
|
||||||
|
- kwargs
|
||||||
title: OpenaiChatCompletionRequest
|
title: OpenaiChatCompletionRequest
|
||||||
OpenAIChatCompletion:
|
OpenAIChatCompletion:
|
||||||
type: object
|
type: object
|
||||||
|
|
26
docs/static/llama-stack-spec.html
vendored
26
docs/static/llama-stack-spec.html
vendored
|
@ -5205,12 +5205,36 @@
|
||||||
"user": {
|
"user": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "(Optional) The user to use."
|
"description": "(Optional) The user to use."
|
||||||
|
},
|
||||||
|
"kwargs": {
|
||||||
|
"oneOf": [
|
||||||
|
{
|
||||||
|
"type": "null"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "boolean"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "number"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "array"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "object"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"description": "(Optional) Additional provider-specific parameters to pass through as extra_body (e.g., chat_template_kwargs for vLLM)."
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"additionalProperties": false,
|
"additionalProperties": false,
|
||||||
"required": [
|
"required": [
|
||||||
"model",
|
"model",
|
||||||
"messages"
|
"messages",
|
||||||
|
"kwargs"
|
||||||
],
|
],
|
||||||
"title": "OpenaiChatCompletionRequest"
|
"title": "OpenaiChatCompletionRequest"
|
||||||
},
|
},
|
||||||
|
|
12
docs/static/llama-stack-spec.yaml
vendored
12
docs/static/llama-stack-spec.yaml
vendored
|
@ -3915,10 +3915,22 @@ components:
|
||||||
user:
|
user:
|
||||||
type: string
|
type: string
|
||||||
description: (Optional) The user to use.
|
description: (Optional) The user to use.
|
||||||
|
kwargs:
|
||||||
|
oneOf:
|
||||||
|
- type: 'null'
|
||||||
|
- type: boolean
|
||||||
|
- type: number
|
||||||
|
- type: string
|
||||||
|
- type: array
|
||||||
|
- type: object
|
||||||
|
description: >-
|
||||||
|
(Optional) Additional provider-specific parameters to pass through as
|
||||||
|
extra_body (e.g., chat_template_kwargs for vLLM).
|
||||||
additionalProperties: false
|
additionalProperties: false
|
||||||
required:
|
required:
|
||||||
- model
|
- model
|
||||||
- messages
|
- messages
|
||||||
|
- kwargs
|
||||||
title: OpenaiChatCompletionRequest
|
title: OpenaiChatCompletionRequest
|
||||||
OpenAIChatCompletion:
|
OpenAIChatCompletion:
|
||||||
type: object
|
type: object
|
||||||
|
|
26
docs/static/stainless-llama-stack-spec.html
vendored
26
docs/static/stainless-llama-stack-spec.html
vendored
|
@ -7214,12 +7214,36 @@
|
||||||
"user": {
|
"user": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "(Optional) The user to use."
|
"description": "(Optional) The user to use."
|
||||||
|
},
|
||||||
|
"kwargs": {
|
||||||
|
"oneOf": [
|
||||||
|
{
|
||||||
|
"type": "null"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "boolean"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "number"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "array"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "object"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"description": "(Optional) Additional provider-specific parameters to pass through as extra_body (e.g., chat_template_kwargs for vLLM)."
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"additionalProperties": false,
|
"additionalProperties": false,
|
||||||
"required": [
|
"required": [
|
||||||
"model",
|
"model",
|
||||||
"messages"
|
"messages",
|
||||||
|
"kwargs"
|
||||||
],
|
],
|
||||||
"title": "OpenaiChatCompletionRequest"
|
"title": "OpenaiChatCompletionRequest"
|
||||||
},
|
},
|
||||||
|
|
12
docs/static/stainless-llama-stack-spec.yaml
vendored
12
docs/static/stainless-llama-stack-spec.yaml
vendored
|
@ -5360,10 +5360,22 @@ components:
|
||||||
user:
|
user:
|
||||||
type: string
|
type: string
|
||||||
description: (Optional) The user to use.
|
description: (Optional) The user to use.
|
||||||
|
kwargs:
|
||||||
|
oneOf:
|
||||||
|
- type: 'null'
|
||||||
|
- type: boolean
|
||||||
|
- type: number
|
||||||
|
- type: string
|
||||||
|
- type: array
|
||||||
|
- type: object
|
||||||
|
description: >-
|
||||||
|
(Optional) Additional provider-specific parameters to pass through as
|
||||||
|
extra_body (e.g., chat_template_kwargs for vLLM).
|
||||||
additionalProperties: false
|
additionalProperties: false
|
||||||
required:
|
required:
|
||||||
- model
|
- model
|
||||||
- messages
|
- messages
|
||||||
|
- kwargs
|
||||||
title: OpenaiChatCompletionRequest
|
title: OpenaiChatCompletionRequest
|
||||||
OpenAIChatCompletion:
|
OpenAIChatCompletion:
|
||||||
type: object
|
type: object
|
||||||
|
|
|
@ -1106,6 +1106,7 @@ class InferenceProvider(Protocol):
|
||||||
top_logprobs: int | None = None,
|
top_logprobs: int | None = None,
|
||||||
top_p: float | None = None,
|
top_p: float | None = None,
|
||||||
user: str | None = None,
|
user: str | None = None,
|
||||||
|
**kwargs: Any,
|
||||||
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
|
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
|
||||||
"""Create chat completions.
|
"""Create chat completions.
|
||||||
|
|
||||||
|
@ -1134,6 +1135,7 @@ class InferenceProvider(Protocol):
|
||||||
:param top_logprobs: (Optional) The top log probabilities to use.
|
:param top_logprobs: (Optional) The top log probabilities to use.
|
||||||
:param top_p: (Optional) The top p to use.
|
:param top_p: (Optional) The top p to use.
|
||||||
:param user: (Optional) The user to use.
|
:param user: (Optional) The user to use.
|
||||||
|
:param kwargs: (Optional) Additional provider-specific parameters to pass through as extra_body (e.g., chat_template_kwargs for vLLM).
|
||||||
:returns: An OpenAIChatCompletion.
|
:returns: An OpenAIChatCompletion.
|
||||||
"""
|
"""
|
||||||
...
|
...
|
||||||
|
|
|
@ -277,6 +277,7 @@ class InferenceRouter(Inference):
|
||||||
top_logprobs: int | None = None,
|
top_logprobs: int | None = None,
|
||||||
top_p: float | None = None,
|
top_p: float | None = None,
|
||||||
user: str | None = None,
|
user: str | None = None,
|
||||||
|
**kwargs: Any,
|
||||||
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
|
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
|
||||||
logger.debug(
|
logger.debug(
|
||||||
f"InferenceRouter.openai_chat_completion: {model=}, {stream=}, {messages=}",
|
f"InferenceRouter.openai_chat_completion: {model=}, {stream=}, {messages=}",
|
||||||
|
@ -323,6 +324,7 @@ class InferenceRouter(Inference):
|
||||||
top_logprobs=top_logprobs,
|
top_logprobs=top_logprobs,
|
||||||
top_p=top_p,
|
top_p=top_p,
|
||||||
user=user,
|
user=user,
|
||||||
|
**kwargs,
|
||||||
)
|
)
|
||||||
provider = await self.routing_table.get_provider_impl(model_obj.identifier)
|
provider = await self.routing_table.get_provider_impl(model_obj.identifier)
|
||||||
if stream:
|
if stream:
|
||||||
|
|
|
@ -173,5 +173,6 @@ class MetaReferenceInferenceImpl(
|
||||||
top_logprobs: int | None = None,
|
top_logprobs: int | None = None,
|
||||||
top_p: float | None = None,
|
top_p: float | None = None,
|
||||||
user: str | None = None,
|
user: str | None = None,
|
||||||
|
**kwargs: Any,
|
||||||
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
|
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
|
||||||
raise NotImplementedError("OpenAI chat completion not supported by meta-reference inference provider")
|
raise NotImplementedError("OpenAI chat completion not supported by meta-reference inference provider")
|
||||||
|
|
|
@ -124,5 +124,6 @@ class SentenceTransformersInferenceImpl(
|
||||||
top_logprobs: int | None = None,
|
top_logprobs: int | None = None,
|
||||||
top_p: float | None = None,
|
top_p: float | None = None,
|
||||||
user: str | None = None,
|
user: str | None = None,
|
||||||
|
**kwargs: Any,
|
||||||
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
|
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
|
||||||
raise NotImplementedError("OpenAI chat completion not supported by sentence transformers provider")
|
raise NotImplementedError("OpenAI chat completion not supported by sentence transformers provider")
|
||||||
|
|
|
@ -186,5 +186,6 @@ class BedrockInferenceAdapter(
|
||||||
top_logprobs: int | None = None,
|
top_logprobs: int | None = None,
|
||||||
top_p: float | None = None,
|
top_p: float | None = None,
|
||||||
user: str | None = None,
|
user: str | None = None,
|
||||||
|
**kwargs: Any,
|
||||||
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
|
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
|
||||||
raise NotImplementedError("OpenAI chat completion not supported by the Bedrock provider")
|
raise NotImplementedError("OpenAI chat completion not supported by the Bedrock provider")
|
||||||
|
|
|
@ -153,6 +153,7 @@ class PassthroughInferenceAdapter(Inference):
|
||||||
top_logprobs: int | None = None,
|
top_logprobs: int | None = None,
|
||||||
top_p: float | None = None,
|
top_p: float | None = None,
|
||||||
user: str | None = None,
|
user: str | None = None,
|
||||||
|
**kwargs: Any,
|
||||||
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
|
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
|
||||||
client = self._get_client()
|
client = self._get_client()
|
||||||
model_obj = await self.model_store.get_model(model)
|
model_obj = await self.model_store.get_model(model)
|
||||||
|
@ -181,6 +182,7 @@ class PassthroughInferenceAdapter(Inference):
|
||||||
top_logprobs=top_logprobs,
|
top_logprobs=top_logprobs,
|
||||||
top_p=top_p,
|
top_p=top_p,
|
||||||
user=user,
|
user=user,
|
||||||
|
**kwargs,
|
||||||
)
|
)
|
||||||
|
|
||||||
return await client.inference.openai_chat_completion(**params)
|
return await client.inference.openai_chat_completion(**params)
|
||||||
|
|
|
@ -57,6 +57,7 @@ class RunpodInferenceAdapter(OpenAIMixin):
|
||||||
top_logprobs: int | None = None,
|
top_logprobs: int | None = None,
|
||||||
top_p: float | None = None,
|
top_p: float | None = None,
|
||||||
user: str | None = None,
|
user: str | None = None,
|
||||||
|
**kwargs: Any,
|
||||||
):
|
):
|
||||||
"""Override to add RunPod-specific stream_options requirement."""
|
"""Override to add RunPod-specific stream_options requirement."""
|
||||||
if stream and not stream_options:
|
if stream and not stream_options:
|
||||||
|
@ -86,4 +87,5 @@ class RunpodInferenceAdapter(OpenAIMixin):
|
||||||
top_logprobs=top_logprobs,
|
top_logprobs=top_logprobs,
|
||||||
top_p=top_p,
|
top_p=top_p,
|
||||||
user=user,
|
user=user,
|
||||||
|
**kwargs,
|
||||||
)
|
)
|
||||||
|
|
|
@ -102,6 +102,7 @@ class VLLMInferenceAdapter(OpenAIMixin):
|
||||||
top_logprobs: int | None = None,
|
top_logprobs: int | None = None,
|
||||||
top_p: float | None = None,
|
top_p: float | None = None,
|
||||||
user: str | None = None,
|
user: str | None = None,
|
||||||
|
**kwargs: Any,
|
||||||
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
|
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
|
||||||
max_tokens = max_tokens or self.config.max_tokens
|
max_tokens = max_tokens or self.config.max_tokens
|
||||||
|
|
||||||
|
@ -136,4 +137,5 @@ class VLLMInferenceAdapter(OpenAIMixin):
|
||||||
top_logprobs=top_logprobs,
|
top_logprobs=top_logprobs,
|
||||||
top_p=top_p,
|
top_p=top_p,
|
||||||
user=user,
|
user=user,
|
||||||
|
**kwargs,
|
||||||
)
|
)
|
||||||
|
|
|
@ -299,6 +299,7 @@ class LiteLLMOpenAIMixin(
|
||||||
top_logprobs: int | None = None,
|
top_logprobs: int | None = None,
|
||||||
top_p: float | None = None,
|
top_p: float | None = None,
|
||||||
user: str | None = None,
|
user: str | None = None,
|
||||||
|
**kwargs: Any,
|
||||||
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
|
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
|
||||||
# Add usage tracking for streaming when telemetry is active
|
# Add usage tracking for streaming when telemetry is active
|
||||||
from llama_stack.providers.utils.telemetry.tracing import get_current_span
|
from llama_stack.providers.utils.telemetry.tracing import get_current_span
|
||||||
|
@ -335,6 +336,7 @@ class LiteLLMOpenAIMixin(
|
||||||
user=user,
|
user=user,
|
||||||
api_key=self.get_api_key(),
|
api_key=self.get_api_key(),
|
||||||
api_base=self.api_base,
|
api_base=self.api_base,
|
||||||
|
**kwargs,
|
||||||
)
|
)
|
||||||
return await litellm.acompletion(**params)
|
return await litellm.acompletion(**params)
|
||||||
|
|
||||||
|
|
|
@ -313,6 +313,7 @@ class OpenAIMixin(NeedsRequestProviderData, ABC, BaseModel):
|
||||||
top_logprobs: int | None = None,
|
top_logprobs: int | None = None,
|
||||||
top_p: float | None = None,
|
top_p: float | None = None,
|
||||||
user: str | None = None,
|
user: str | None = None,
|
||||||
|
**kwargs: Any,
|
||||||
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
|
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
|
||||||
"""
|
"""
|
||||||
Direct OpenAI chat completion API call.
|
Direct OpenAI chat completion API call.
|
||||||
|
@ -361,7 +362,10 @@ class OpenAIMixin(NeedsRequestProviderData, ABC, BaseModel):
|
||||||
user=user,
|
user=user,
|
||||||
)
|
)
|
||||||
|
|
||||||
resp = await self.client.chat.completions.create(**params)
|
# Pass any additional provider-specific parameters as extra_body
|
||||||
|
extra_body = kwargs if kwargs else {}
|
||||||
|
|
||||||
|
resp = await self.client.chat.completions.create(**params, extra_body=extra_body)
|
||||||
|
|
||||||
return await self._maybe_overwrite_id(resp, stream) # type: ignore[no-any-return]
|
return await self._maybe_overwrite_id(resp, stream) # type: ignore[no-any-return]
|
||||||
|
|
||||||
|
|
|
@ -186,3 +186,47 @@ async def test_openai_chat_completion_is_async(vllm_inference_adapter):
|
||||||
|
|
||||||
assert mock_create_client.call_count == 4 # no cheating
|
assert mock_create_client.call_count == 4 # no cheating
|
||||||
assert total_time < (sleep_time * 2), f"Total time taken: {total_time}s exceeded expected max"
|
assert total_time < (sleep_time * 2), f"Total time taken: {total_time}s exceeded expected max"
|
||||||
|
|
||||||
|
|
||||||
|
async def test_extra_body_forwarding(vllm_inference_adapter):
|
||||||
|
"""
|
||||||
|
Test that extra_body parameters (e.g., chat_template_kwargs) are correctly
|
||||||
|
forwarded to the underlying OpenAI client.
|
||||||
|
"""
|
||||||
|
mock_model = Model(identifier="mock-model", provider_resource_id="mock-model", provider_id="vllm-inference")
|
||||||
|
vllm_inference_adapter.model_store.get_model.return_value = mock_model
|
||||||
|
|
||||||
|
with patch.object(VLLMInferenceAdapter, "client", new_callable=PropertyMock) as mock_client_property:
|
||||||
|
mock_client = MagicMock()
|
||||||
|
mock_client.chat.completions.create = AsyncMock(
|
||||||
|
return_value=OpenAIChatCompletion(
|
||||||
|
id="chatcmpl-abc123",
|
||||||
|
created=1,
|
||||||
|
model="mock-model",
|
||||||
|
choices=[
|
||||||
|
OpenAIChoice(
|
||||||
|
message=OpenAIAssistantMessageParam(
|
||||||
|
content="test response",
|
||||||
|
),
|
||||||
|
finish_reason="stop",
|
||||||
|
index=0,
|
||||||
|
)
|
||||||
|
],
|
||||||
|
)
|
||||||
|
)
|
||||||
|
mock_client_property.return_value = mock_client
|
||||||
|
|
||||||
|
# Test with chat_template_kwargs for Granite thinking mode
|
||||||
|
await vllm_inference_adapter.openai_chat_completion(
|
||||||
|
"mock-model",
|
||||||
|
messages=[],
|
||||||
|
stream=False,
|
||||||
|
chat_template_kwargs={"thinking": True},
|
||||||
|
)
|
||||||
|
|
||||||
|
# Verify that the client was called with extra_body containing chat_template_kwargs
|
||||||
|
mock_client.chat.completions.create.assert_called_once()
|
||||||
|
call_kwargs = mock_client.chat.completions.create.call_args.kwargs
|
||||||
|
assert "extra_body" in call_kwargs
|
||||||
|
assert "chat_template_kwargs" in call_kwargs["extra_body"]
|
||||||
|
assert call_kwargs["extra_body"]["chat_template_kwargs"] == {"thinking": True}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue