mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-12 13:57:57 +00:00
test
# What does this PR do? ## Test Plan
This commit is contained in:
parent
548ccff368
commit
82314c686c
32 changed files with 624 additions and 882 deletions
16
docs/static/stainless-llama-stack-spec.html
vendored
16
docs/static/stainless-llama-stack-spec.html
vendored
|
@ -243,7 +243,7 @@
|
|||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/OpenaiCompletionRequest"
|
||||
"$ref": "#/components/schemas/OpenAICompletionRequest"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -7274,7 +7274,8 @@
|
|||
"model",
|
||||
"messages"
|
||||
],
|
||||
"title": "OpenaiChatCompletionRequest"
|
||||
"title": "OpenaiChatCompletionRequest",
|
||||
"description": "Request parameters for OpenAI-compatible chat completion endpoint."
|
||||
},
|
||||
"OpenAIChatCompletion": {
|
||||
"type": "object",
|
||||
|
@ -7470,7 +7471,7 @@
|
|||
],
|
||||
"title": "OpenAICompletionWithInputMessages"
|
||||
},
|
||||
"OpenaiCompletionRequest": {
|
||||
"OpenAICompletionRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"model": {
|
||||
|
@ -7605,10 +7606,12 @@
|
|||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"description": "(Optional) vLLM-specific parameter for guided generation with a list of choices."
|
||||
},
|
||||
"prompt_logprobs": {
|
||||
"type": "integer"
|
||||
"type": "integer",
|
||||
"description": "(Optional) vLLM-specific parameter for number of log probabilities to return for prompt tokens."
|
||||
},
|
||||
"suffix": {
|
||||
"type": "string",
|
||||
|
@ -7620,7 +7623,8 @@
|
|||
"model",
|
||||
"prompt"
|
||||
],
|
||||
"title": "OpenaiCompletionRequest"
|
||||
"title": "OpenAICompletionRequest",
|
||||
"description": "Request parameters for OpenAI-compatible completion endpoint."
|
||||
},
|
||||
"OpenAICompletion": {
|
||||
"type": "object",
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue