mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-12 05:54:38 +00:00
chore: refactor (chat)completions endpoints to use shared params struct (#3761)
# What does this PR do? Converts openai(_chat)_completions params to pydantic BaseModel to reduce code duplication across all providers. ## Test Plan CI --- [//]: # (BEGIN SAPLING FOOTER) Stack created with [Sapling](https://sapling-scm.com). Best reviewed with [ReviewStack](https://reviewstack.dev/llamastack/llama-stack/pull/3761). * #3777 * __->__ #3761
This commit is contained in:
parent
6954fe2274
commit
80d58ab519
33 changed files with 599 additions and 890 deletions
|
@ -23,6 +23,7 @@ from llama_stack.strong_typing.inspection import (
|
|||
is_generic_list,
|
||||
is_type_optional,
|
||||
is_type_union,
|
||||
is_unwrapped_body_param,
|
||||
unwrap_generic_list,
|
||||
unwrap_optional_type,
|
||||
unwrap_union_types,
|
||||
|
@ -769,24 +770,30 @@ class Generator:
|
|||
first = next(iter(op.request_params))
|
||||
request_name, request_type = first
|
||||
|
||||
op_name = "".join(word.capitalize() for word in op.name.split("_"))
|
||||
request_name = f"{op_name}Request"
|
||||
fields = [
|
||||
(
|
||||
name,
|
||||
type_,
|
||||
)
|
||||
for name, type_ in op.request_params
|
||||
]
|
||||
request_type = make_dataclass(
|
||||
request_name,
|
||||
fields,
|
||||
namespace={
|
||||
"__doc__": create_docstring_for_request(
|
||||
request_name, fields, doc_params
|
||||
# Special case: if there's a single parameter with Body(embed=False) that's a BaseModel,
|
||||
# unwrap it to show the flat structure in the OpenAPI spec
|
||||
# Example: openai_chat_completion()
|
||||
if (len(op.request_params) == 1 and is_unwrapped_body_param(request_type)):
|
||||
pass
|
||||
else:
|
||||
op_name = "".join(word.capitalize() for word in op.name.split("_"))
|
||||
request_name = f"{op_name}Request"
|
||||
fields = [
|
||||
(
|
||||
name,
|
||||
type_,
|
||||
)
|
||||
},
|
||||
)
|
||||
for name, type_ in op.request_params
|
||||
]
|
||||
request_type = make_dataclass(
|
||||
request_name,
|
||||
fields,
|
||||
namespace={
|
||||
"__doc__": create_docstring_for_request(
|
||||
request_name, fields, doc_params
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
requestBody = RequestBody(
|
||||
content={
|
||||
|
|
|
@ -8,10 +8,11 @@ import json
|
|||
import typing
|
||||
import inspect
|
||||
from pathlib import Path
|
||||
from typing import TextIO
|
||||
from typing import Any, List, Optional, Union, get_type_hints, get_origin, get_args
|
||||
from typing import Any, List, Optional, TextIO, Union, get_type_hints, get_origin, get_args
|
||||
|
||||
from pydantic import BaseModel
|
||||
from llama_stack.strong_typing.schema import object_to_json, StrictJsonType
|
||||
from llama_stack.strong_typing.inspection import is_unwrapped_body_param
|
||||
from llama_stack.core.resolver import api_protocol_map
|
||||
|
||||
from .generator import Generator
|
||||
|
@ -205,6 +206,14 @@ def _validate_has_return_in_docstring(method) -> str | None:
|
|||
def _validate_has_params_in_docstring(method) -> str | None:
|
||||
source = inspect.getsource(method)
|
||||
sig = inspect.signature(method)
|
||||
|
||||
params_list = [p for p in sig.parameters.values() if p.name != "self"]
|
||||
if len(params_list) == 1:
|
||||
param = params_list[0]
|
||||
param_type = param.annotation
|
||||
if is_unwrapped_body_param(param_type):
|
||||
return
|
||||
|
||||
# Only check if the method has more than one parameter
|
||||
if len(sig.parameters) > 1 and ":param" not in source:
|
||||
return "does not have a ':param' in its docstring"
|
||||
|
|
20
docs/static/deprecated-llama-stack-spec.html
vendored
20
docs/static/deprecated-llama-stack-spec.html
vendored
|
@ -1527,7 +1527,7 @@
|
|||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/OpenaiChatCompletionRequest"
|
||||
"$ref": "#/components/schemas/OpenAIChatCompletionRequest"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -1617,7 +1617,7 @@
|
|||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/OpenaiCompletionRequest"
|
||||
"$ref": "#/components/schemas/OpenAICompletionRequest"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -7522,7 +7522,7 @@
|
|||
"title": "OpenAIResponseFormatText",
|
||||
"description": "Text response format for OpenAI-compatible chat completion requests."
|
||||
},
|
||||
"OpenaiChatCompletionRequest": {
|
||||
"OpenAIChatCompletionRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"model": {
|
||||
|
@ -7769,7 +7769,8 @@
|
|||
"model",
|
||||
"messages"
|
||||
],
|
||||
"title": "OpenaiChatCompletionRequest"
|
||||
"title": "OpenAIChatCompletionRequest",
|
||||
"description": "Request parameters for OpenAI-compatible chat completion endpoint."
|
||||
},
|
||||
"OpenAIChatCompletion": {
|
||||
"type": "object",
|
||||
|
@ -7965,7 +7966,7 @@
|
|||
],
|
||||
"title": "OpenAICompletionWithInputMessages"
|
||||
},
|
||||
"OpenaiCompletionRequest": {
|
||||
"OpenAICompletionRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"model": {
|
||||
|
@ -8100,10 +8101,12 @@
|
|||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"description": "(Optional) vLLM-specific parameter for guided generation with a list of choices."
|
||||
},
|
||||
"prompt_logprobs": {
|
||||
"type": "integer"
|
||||
"type": "integer",
|
||||
"description": "(Optional) vLLM-specific parameter for number of log probabilities to return for prompt tokens."
|
||||
},
|
||||
"suffix": {
|
||||
"type": "string",
|
||||
|
@ -8115,7 +8118,8 @@
|
|||
"model",
|
||||
"prompt"
|
||||
],
|
||||
"title": "OpenaiCompletionRequest"
|
||||
"title": "OpenAICompletionRequest",
|
||||
"description": "Request parameters for OpenAI-compatible completion endpoint."
|
||||
},
|
||||
"OpenAICompletion": {
|
||||
"type": "object",
|
||||
|
|
22
docs/static/deprecated-llama-stack-spec.yaml
vendored
22
docs/static/deprecated-llama-stack-spec.yaml
vendored
|
@ -1098,7 +1098,7 @@ paths:
|
|||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/OpenaiChatCompletionRequest'
|
||||
$ref: '#/components/schemas/OpenAIChatCompletionRequest'
|
||||
required: true
|
||||
deprecated: true
|
||||
/v1/openai/v1/chat/completions/{completion_id}:
|
||||
|
@ -1167,7 +1167,7 @@ paths:
|
|||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/OpenaiCompletionRequest'
|
||||
$ref: '#/components/schemas/OpenAICompletionRequest'
|
||||
required: true
|
||||
deprecated: true
|
||||
/v1/openai/v1/embeddings:
|
||||
|
@ -5575,7 +5575,7 @@ components:
|
|||
title: OpenAIResponseFormatText
|
||||
description: >-
|
||||
Text response format for OpenAI-compatible chat completion requests.
|
||||
OpenaiChatCompletionRequest:
|
||||
OpenAIChatCompletionRequest:
|
||||
type: object
|
||||
properties:
|
||||
model:
|
||||
|
@ -5717,7 +5717,9 @@ components:
|
|||
required:
|
||||
- model
|
||||
- messages
|
||||
title: OpenaiChatCompletionRequest
|
||||
title: OpenAIChatCompletionRequest
|
||||
description: >-
|
||||
Request parameters for OpenAI-compatible chat completion endpoint.
|
||||
OpenAIChatCompletion:
|
||||
type: object
|
||||
properties:
|
||||
|
@ -5883,7 +5885,7 @@ components:
|
|||
- model
|
||||
- input_messages
|
||||
title: OpenAICompletionWithInputMessages
|
||||
OpenaiCompletionRequest:
|
||||
OpenAICompletionRequest:
|
||||
type: object
|
||||
properties:
|
||||
model:
|
||||
|
@ -5975,8 +5977,14 @@ components:
|
|||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: >-
|
||||
(Optional) vLLM-specific parameter for guided generation with a list of
|
||||
choices.
|
||||
prompt_logprobs:
|
||||
type: integer
|
||||
description: >-
|
||||
(Optional) vLLM-specific parameter for number of log probabilities to
|
||||
return for prompt tokens.
|
||||
suffix:
|
||||
type: string
|
||||
description: >-
|
||||
|
@ -5985,7 +5993,9 @@ components:
|
|||
required:
|
||||
- model
|
||||
- prompt
|
||||
title: OpenaiCompletionRequest
|
||||
title: OpenAICompletionRequest
|
||||
description: >-
|
||||
Request parameters for OpenAI-compatible completion endpoint.
|
||||
OpenAICompletion:
|
||||
type: object
|
||||
properties:
|
||||
|
|
20
docs/static/llama-stack-spec.html
vendored
20
docs/static/llama-stack-spec.html
vendored
|
@ -153,7 +153,7 @@
|
|||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/OpenaiChatCompletionRequest"
|
||||
"$ref": "#/components/schemas/OpenAIChatCompletionRequest"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -243,7 +243,7 @@
|
|||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/OpenaiCompletionRequest"
|
||||
"$ref": "#/components/schemas/OpenAICompletionRequest"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -5018,7 +5018,7 @@
|
|||
"title": "OpenAIResponseFormatText",
|
||||
"description": "Text response format for OpenAI-compatible chat completion requests."
|
||||
},
|
||||
"OpenaiChatCompletionRequest": {
|
||||
"OpenAIChatCompletionRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"model": {
|
||||
|
@ -5265,7 +5265,8 @@
|
|||
"model",
|
||||
"messages"
|
||||
],
|
||||
"title": "OpenaiChatCompletionRequest"
|
||||
"title": "OpenAIChatCompletionRequest",
|
||||
"description": "Request parameters for OpenAI-compatible chat completion endpoint."
|
||||
},
|
||||
"OpenAIChatCompletion": {
|
||||
"type": "object",
|
||||
|
@ -5461,7 +5462,7 @@
|
|||
],
|
||||
"title": "OpenAICompletionWithInputMessages"
|
||||
},
|
||||
"OpenaiCompletionRequest": {
|
||||
"OpenAICompletionRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"model": {
|
||||
|
@ -5596,10 +5597,12 @@
|
|||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"description": "(Optional) vLLM-specific parameter for guided generation with a list of choices."
|
||||
},
|
||||
"prompt_logprobs": {
|
||||
"type": "integer"
|
||||
"type": "integer",
|
||||
"description": "(Optional) vLLM-specific parameter for number of log probabilities to return for prompt tokens."
|
||||
},
|
||||
"suffix": {
|
||||
"type": "string",
|
||||
|
@ -5611,7 +5614,8 @@
|
|||
"model",
|
||||
"prompt"
|
||||
],
|
||||
"title": "OpenaiCompletionRequest"
|
||||
"title": "OpenAICompletionRequest",
|
||||
"description": "Request parameters for OpenAI-compatible completion endpoint."
|
||||
},
|
||||
"OpenAICompletion": {
|
||||
"type": "object",
|
||||
|
|
22
docs/static/llama-stack-spec.yaml
vendored
22
docs/static/llama-stack-spec.yaml
vendored
|
@ -98,7 +98,7 @@ paths:
|
|||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/OpenaiChatCompletionRequest'
|
||||
$ref: '#/components/schemas/OpenAIChatCompletionRequest'
|
||||
required: true
|
||||
deprecated: false
|
||||
/v1/chat/completions/{completion_id}:
|
||||
|
@ -167,7 +167,7 @@ paths:
|
|||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/OpenaiCompletionRequest'
|
||||
$ref: '#/components/schemas/OpenAICompletionRequest'
|
||||
required: true
|
||||
deprecated: false
|
||||
/v1/conversations:
|
||||
|
@ -3824,7 +3824,7 @@ components:
|
|||
title: OpenAIResponseFormatText
|
||||
description: >-
|
||||
Text response format for OpenAI-compatible chat completion requests.
|
||||
OpenaiChatCompletionRequest:
|
||||
OpenAIChatCompletionRequest:
|
||||
type: object
|
||||
properties:
|
||||
model:
|
||||
|
@ -3966,7 +3966,9 @@ components:
|
|||
required:
|
||||
- model
|
||||
- messages
|
||||
title: OpenaiChatCompletionRequest
|
||||
title: OpenAIChatCompletionRequest
|
||||
description: >-
|
||||
Request parameters for OpenAI-compatible chat completion endpoint.
|
||||
OpenAIChatCompletion:
|
||||
type: object
|
||||
properties:
|
||||
|
@ -4132,7 +4134,7 @@ components:
|
|||
- model
|
||||
- input_messages
|
||||
title: OpenAICompletionWithInputMessages
|
||||
OpenaiCompletionRequest:
|
||||
OpenAICompletionRequest:
|
||||
type: object
|
||||
properties:
|
||||
model:
|
||||
|
@ -4224,8 +4226,14 @@ components:
|
|||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: >-
|
||||
(Optional) vLLM-specific parameter for guided generation with a list of
|
||||
choices.
|
||||
prompt_logprobs:
|
||||
type: integer
|
||||
description: >-
|
||||
(Optional) vLLM-specific parameter for number of log probabilities to
|
||||
return for prompt tokens.
|
||||
suffix:
|
||||
type: string
|
||||
description: >-
|
||||
|
@ -4234,7 +4242,9 @@ components:
|
|||
required:
|
||||
- model
|
||||
- prompt
|
||||
title: OpenaiCompletionRequest
|
||||
title: OpenAICompletionRequest
|
||||
description: >-
|
||||
Request parameters for OpenAI-compatible completion endpoint.
|
||||
OpenAICompletion:
|
||||
type: object
|
||||
properties:
|
||||
|
|
20
docs/static/stainless-llama-stack-spec.html
vendored
20
docs/static/stainless-llama-stack-spec.html
vendored
|
@ -153,7 +153,7 @@
|
|||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/OpenaiChatCompletionRequest"
|
||||
"$ref": "#/components/schemas/OpenAIChatCompletionRequest"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -243,7 +243,7 @@
|
|||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/OpenaiCompletionRequest"
|
||||
"$ref": "#/components/schemas/OpenAICompletionRequest"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -7027,7 +7027,7 @@
|
|||
"title": "OpenAIResponseFormatText",
|
||||
"description": "Text response format for OpenAI-compatible chat completion requests."
|
||||
},
|
||||
"OpenaiChatCompletionRequest": {
|
||||
"OpenAIChatCompletionRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"model": {
|
||||
|
@ -7274,7 +7274,8 @@
|
|||
"model",
|
||||
"messages"
|
||||
],
|
||||
"title": "OpenaiChatCompletionRequest"
|
||||
"title": "OpenAIChatCompletionRequest",
|
||||
"description": "Request parameters for OpenAI-compatible chat completion endpoint."
|
||||
},
|
||||
"OpenAIChatCompletion": {
|
||||
"type": "object",
|
||||
|
@ -7470,7 +7471,7 @@
|
|||
],
|
||||
"title": "OpenAICompletionWithInputMessages"
|
||||
},
|
||||
"OpenaiCompletionRequest": {
|
||||
"OpenAICompletionRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"model": {
|
||||
|
@ -7605,10 +7606,12 @@
|
|||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"description": "(Optional) vLLM-specific parameter for guided generation with a list of choices."
|
||||
},
|
||||
"prompt_logprobs": {
|
||||
"type": "integer"
|
||||
"type": "integer",
|
||||
"description": "(Optional) vLLM-specific parameter for number of log probabilities to return for prompt tokens."
|
||||
},
|
||||
"suffix": {
|
||||
"type": "string",
|
||||
|
@ -7620,7 +7623,8 @@
|
|||
"model",
|
||||
"prompt"
|
||||
],
|
||||
"title": "OpenaiCompletionRequest"
|
||||
"title": "OpenAICompletionRequest",
|
||||
"description": "Request parameters for OpenAI-compatible completion endpoint."
|
||||
},
|
||||
"OpenAICompletion": {
|
||||
"type": "object",
|
||||
|
|
22
docs/static/stainless-llama-stack-spec.yaml
vendored
22
docs/static/stainless-llama-stack-spec.yaml
vendored
|
@ -101,7 +101,7 @@ paths:
|
|||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/OpenaiChatCompletionRequest'
|
||||
$ref: '#/components/schemas/OpenAIChatCompletionRequest'
|
||||
required: true
|
||||
deprecated: false
|
||||
/v1/chat/completions/{completion_id}:
|
||||
|
@ -170,7 +170,7 @@ paths:
|
|||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/OpenaiCompletionRequest'
|
||||
$ref: '#/components/schemas/OpenAICompletionRequest'
|
||||
required: true
|
||||
deprecated: false
|
||||
/v1/conversations:
|
||||
|
@ -5269,7 +5269,7 @@ components:
|
|||
title: OpenAIResponseFormatText
|
||||
description: >-
|
||||
Text response format for OpenAI-compatible chat completion requests.
|
||||
OpenaiChatCompletionRequest:
|
||||
OpenAIChatCompletionRequest:
|
||||
type: object
|
||||
properties:
|
||||
model:
|
||||
|
@ -5411,7 +5411,9 @@ components:
|
|||
required:
|
||||
- model
|
||||
- messages
|
||||
title: OpenaiChatCompletionRequest
|
||||
title: OpenAIChatCompletionRequest
|
||||
description: >-
|
||||
Request parameters for OpenAI-compatible chat completion endpoint.
|
||||
OpenAIChatCompletion:
|
||||
type: object
|
||||
properties:
|
||||
|
@ -5577,7 +5579,7 @@ components:
|
|||
- model
|
||||
- input_messages
|
||||
title: OpenAICompletionWithInputMessages
|
||||
OpenaiCompletionRequest:
|
||||
OpenAICompletionRequest:
|
||||
type: object
|
||||
properties:
|
||||
model:
|
||||
|
@ -5669,8 +5671,14 @@ components:
|
|||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: >-
|
||||
(Optional) vLLM-specific parameter for guided generation with a list of
|
||||
choices.
|
||||
prompt_logprobs:
|
||||
type: integer
|
||||
description: >-
|
||||
(Optional) vLLM-specific parameter for number of log probabilities to
|
||||
return for prompt tokens.
|
||||
suffix:
|
||||
type: string
|
||||
description: >-
|
||||
|
@ -5679,7 +5687,9 @@ components:
|
|||
required:
|
||||
- model
|
||||
- prompt
|
||||
title: OpenaiCompletionRequest
|
||||
title: OpenAICompletionRequest
|
||||
description: >-
|
||||
Request parameters for OpenAI-compatible completion endpoint.
|
||||
OpenAICompletion:
|
||||
type: object
|
||||
properties:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue