feat: OpenAI-Compatible models, completions, chat/completions (#1894)

# What does this PR do?

This stubs in some OpenAI server-side compatibility with three new
endpoints:

/v1/openai/v1/models
/v1/openai/v1/completions
/v1/openai/v1/chat/completions

This gives common inference apps using OpenAI clients the ability to
talk to Llama Stack using an endpoint like
http://localhost:8321/v1/openai/v1 .

The two "v1" instances in there isn't awesome, but the thinking is that
Llama Stack's API is v1 and then our OpenAI compatibility layer is
compatible with OpenAI V1. And, some OpenAI clients implicitly assume
the URL ends with "v1", so this gives maximum compatibility.

The openai models endpoint is implemented in the routing layer, and just
returns all the models Llama Stack knows about.

The following providers should be working with the new OpenAI
completions and chat/completions API:
* remote::anthropic (untested)
* remote::cerebras-openai-compat (untested)
* remote::fireworks (tested)
* remote::fireworks-openai-compat (untested)
* remote::gemini (untested)
* remote::groq-openai-compat (untested)
* remote::nvidia (tested)
* remote::ollama (tested)
* remote::openai (untested)
* remote::passthrough (untested)
* remote::sambanova-openai-compat (untested)
* remote::together (tested)
* remote::together-openai-compat (untested)
* remote::vllm (tested)

The goal to support this for every inference provider - proxying
directly to the provider's OpenAI endpoint for OpenAI-compatible
providers. For providers that don't have an OpenAI-compatible API, we'll
add a mixin to translate incoming OpenAI requests to Llama Stack
inference requests and translate the Llama Stack inference responses to
OpenAI responses.

This is related to #1817 but is a bit larger in scope than just chat
completions, as I have real use-cases that need the older completions
API as well.

## Test Plan

### vLLM

```
VLLM_URL="http://localhost:8000/v1" INFERENCE_MODEL="meta-llama/Llama-3.2-3B-Instruct" llama stack build --template remote-vllm --image-type venv --run

LLAMA_STACK_CONFIG=http://localhost:8321 INFERENCE_MODEL="meta-llama/Llama-3.2-3B-Instruct" python -m pytest -v tests/integration/inference/test_openai_completion.py --text-model "meta-llama/Llama-3.2-3B-Instruct"
```

### ollama
```
INFERENCE_MODEL="llama3.2:3b-instruct-q8_0" llama stack build --template ollama --image-type venv --run

LLAMA_STACK_CONFIG=http://localhost:8321 INFERENCE_MODEL="llama3.2:3b-instruct-q8_0" python -m pytest -v tests/integration/inference/test_openai_completion.py --text-model "llama3.2:3b-instruct-q8_0"
```



## Documentation

Run a Llama Stack distribution that uses one of the providers mentioned
in the list above. Then, use your favorite OpenAI client to send
completion or chat completion requests with the base_url set to
http://localhost:8321/v1/openai/v1 . Replace "localhost:8321" with the
host and port of your Llama Stack server, if different.

---------

Signed-off-by: Ben Browning <bbrownin@redhat.com>
This commit is contained in:
Ben Browning 2025-04-11 16:14:17 -04:00 committed by GitHub
parent 24d70cedca
commit 2b2db5fbda
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
27 changed files with 3265 additions and 20 deletions

View file

@ -3092,6 +3092,125 @@
}
}
},
"/v1/openai/v1/chat/completions": {
"post": {
"responses": {
"200": {
"description": "OK",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/OpenAIChatCompletion"
}
}
}
},
"400": {
"$ref": "#/components/responses/BadRequest400"
},
"429": {
"$ref": "#/components/responses/TooManyRequests429"
},
"500": {
"$ref": "#/components/responses/InternalServerError500"
},
"default": {
"$ref": "#/components/responses/DefaultError"
}
},
"tags": [
"Inference"
],
"description": "Generate an OpenAI-compatible chat completion for the given messages using the specified model.",
"parameters": [],
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/OpenaiChatCompletionRequest"
}
}
},
"required": true
}
}
},
"/v1/openai/v1/completions": {
"post": {
"responses": {
"200": {
"description": "OK",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/OpenAICompletion"
}
}
}
},
"400": {
"$ref": "#/components/responses/BadRequest400"
},
"429": {
"$ref": "#/components/responses/TooManyRequests429"
},
"500": {
"$ref": "#/components/responses/InternalServerError500"
},
"default": {
"$ref": "#/components/responses/DefaultError"
}
},
"tags": [
"Inference"
],
"description": "Generate an OpenAI-compatible completion for the given prompt using the specified model.",
"parameters": [],
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/OpenaiCompletionRequest"
}
}
},
"required": true
}
}
},
"/v1/openai/v1/models": {
"get": {
"responses": {
"200": {
"description": "OK",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/OpenAIListModelsResponse"
}
}
}
},
"400": {
"$ref": "#/components/responses/BadRequest400"
},
"429": {
"$ref": "#/components/responses/TooManyRequests429"
},
"500": {
"$ref": "#/components/responses/InternalServerError500"
},
"default": {
"$ref": "#/components/responses/DefaultError"
}
},
"tags": [
"Models"
],
"description": "",
"parameters": []
}
},
"/v1/post-training/preference-optimize": {
"post": {
"responses": {
@ -8713,6 +8832,819 @@
],
"title": "LogEventRequest"
},
"OpenAIAssistantMessageParam": {
"type": "object",
"properties": {
"role": {
"type": "string",
"const": "assistant",
"default": "assistant",
"description": "Must be \"assistant\" to identify this as the model's response"
},
"content": {
"$ref": "#/components/schemas/InterleavedContent",
"description": "The content of the model's response"
},
"name": {
"type": "string",
"description": "(Optional) The name of the assistant message participant."
},
"tool_calls": {
"type": "array",
"items": {
"$ref": "#/components/schemas/ToolCall"
},
"description": "List of tool calls. Each tool call is a ToolCall object."
}
},
"additionalProperties": false,
"required": [
"role",
"content"
],
"title": "OpenAIAssistantMessageParam",
"description": "A message containing the model's (assistant) response in an OpenAI-compatible chat completion request."
},
"OpenAIDeveloperMessageParam": {
"type": "object",
"properties": {
"role": {
"type": "string",
"const": "developer",
"default": "developer",
"description": "Must be \"developer\" to identify this as a developer message"
},
"content": {
"$ref": "#/components/schemas/InterleavedContent",
"description": "The content of the developer message"
},
"name": {
"type": "string",
"description": "(Optional) The name of the developer message participant."
}
},
"additionalProperties": false,
"required": [
"role",
"content"
],
"title": "OpenAIDeveloperMessageParam",
"description": "A message from the developer in an OpenAI-compatible chat completion request."
},
"OpenAIMessageParam": {
"oneOf": [
{
"$ref": "#/components/schemas/OpenAIUserMessageParam"
},
{
"$ref": "#/components/schemas/OpenAISystemMessageParam"
},
{
"$ref": "#/components/schemas/OpenAIAssistantMessageParam"
},
{
"$ref": "#/components/schemas/OpenAIToolMessageParam"
},
{
"$ref": "#/components/schemas/OpenAIDeveloperMessageParam"
}
],
"discriminator": {
"propertyName": "role",
"mapping": {
"user": "#/components/schemas/OpenAIUserMessageParam",
"system": "#/components/schemas/OpenAISystemMessageParam",
"assistant": "#/components/schemas/OpenAIAssistantMessageParam",
"tool": "#/components/schemas/OpenAIToolMessageParam",
"developer": "#/components/schemas/OpenAIDeveloperMessageParam"
}
}
},
"OpenAISystemMessageParam": {
"type": "object",
"properties": {
"role": {
"type": "string",
"const": "system",
"default": "system",
"description": "Must be \"system\" to identify this as a system message"
},
"content": {
"$ref": "#/components/schemas/InterleavedContent",
"description": "The content of the \"system prompt\". If multiple system messages are provided, they are concatenated. The underlying Llama Stack code may also add other system messages (for example, for formatting tool definitions)."
},
"name": {
"type": "string",
"description": "(Optional) The name of the system message participant."
}
},
"additionalProperties": false,
"required": [
"role",
"content"
],
"title": "OpenAISystemMessageParam",
"description": "A system message providing instructions or context to the model."
},
"OpenAIToolMessageParam": {
"type": "object",
"properties": {
"role": {
"type": "string",
"const": "tool",
"default": "tool",
"description": "Must be \"tool\" to identify this as a tool response"
},
"tool_call_id": {
"type": "string",
"description": "Unique identifier for the tool call this response is for"
},
"content": {
"$ref": "#/components/schemas/InterleavedContent",
"description": "The response content from the tool"
}
},
"additionalProperties": false,
"required": [
"role",
"tool_call_id",
"content"
],
"title": "OpenAIToolMessageParam",
"description": "A message representing the result of a tool invocation in an OpenAI-compatible chat completion request."
},
"OpenAIUserMessageParam": {
"type": "object",
"properties": {
"role": {
"type": "string",
"const": "user",
"default": "user",
"description": "Must be \"user\" to identify this as a user message"
},
"content": {
"$ref": "#/components/schemas/InterleavedContent",
"description": "The content of the message, which can include text and other media"
},
"name": {
"type": "string",
"description": "(Optional) The name of the user message participant."
}
},
"additionalProperties": false,
"required": [
"role",
"content"
],
"title": "OpenAIUserMessageParam",
"description": "A message from the user in an OpenAI-compatible chat completion request."
},
"OpenaiChatCompletionRequest": {
"type": "object",
"properties": {
"model": {
"type": "string",
"description": "The identifier of the model to use. The model must be registered with Llama Stack and available via the /models endpoint."
},
"messages": {
"type": "array",
"items": {
"$ref": "#/components/schemas/OpenAIMessageParam"
},
"description": "List of messages in the conversation"
},
"frequency_penalty": {
"type": "number",
"description": "(Optional) The penalty for repeated tokens"
},
"function_call": {
"oneOf": [
{
"type": "string"
},
{
"type": "object",
"additionalProperties": {
"oneOf": [
{
"type": "null"
},
{
"type": "boolean"
},
{
"type": "number"
},
{
"type": "string"
},
{
"type": "array"
},
{
"type": "object"
}
]
}
}
],
"description": "(Optional) The function call to use"
},
"functions": {
"type": "array",
"items": {
"type": "object",
"additionalProperties": {
"oneOf": [
{
"type": "null"
},
{
"type": "boolean"
},
{
"type": "number"
},
{
"type": "string"
},
{
"type": "array"
},
{
"type": "object"
}
]
}
},
"description": "(Optional) List of functions to use"
},
"logit_bias": {
"type": "object",
"additionalProperties": {
"type": "number"
},
"description": "(Optional) The logit bias to use"
},
"logprobs": {
"type": "boolean",
"description": "(Optional) The log probabilities to use"
},
"max_completion_tokens": {
"type": "integer",
"description": "(Optional) The maximum number of tokens to generate"
},
"max_tokens": {
"type": "integer",
"description": "(Optional) The maximum number of tokens to generate"
},
"n": {
"type": "integer",
"description": "(Optional) The number of completions to generate"
},
"parallel_tool_calls": {
"type": "boolean",
"description": "(Optional) Whether to parallelize tool calls"
},
"presence_penalty": {
"type": "number",
"description": "(Optional) The penalty for repeated tokens"
},
"response_format": {
"type": "object",
"additionalProperties": {
"type": "string"
},
"description": "(Optional) The response format to use"
},
"seed": {
"type": "integer",
"description": "(Optional) The seed to use"
},
"stop": {
"oneOf": [
{
"type": "string"
},
{
"type": "array",
"items": {
"type": "string"
}
}
],
"description": "(Optional) The stop tokens to use"
},
"stream": {
"type": "boolean",
"description": "(Optional) Whether to stream the response"
},
"stream_options": {
"type": "object",
"additionalProperties": {
"oneOf": [
{
"type": "null"
},
{
"type": "boolean"
},
{
"type": "number"
},
{
"type": "string"
},
{
"type": "array"
},
{
"type": "object"
}
]
},
"description": "(Optional) The stream options to use"
},
"temperature": {
"type": "number",
"description": "(Optional) The temperature to use"
},
"tool_choice": {
"oneOf": [
{
"type": "string"
},
{
"type": "object",
"additionalProperties": {
"oneOf": [
{
"type": "null"
},
{
"type": "boolean"
},
{
"type": "number"
},
{
"type": "string"
},
{
"type": "array"
},
{
"type": "object"
}
]
}
}
],
"description": "(Optional) The tool choice to use"
},
"tools": {
"type": "array",
"items": {
"type": "object",
"additionalProperties": {
"oneOf": [
{
"type": "null"
},
{
"type": "boolean"
},
{
"type": "number"
},
{
"type": "string"
},
{
"type": "array"
},
{
"type": "object"
}
]
}
},
"description": "(Optional) The tools to use"
},
"top_logprobs": {
"type": "integer",
"description": "(Optional) The top log probabilities to use"
},
"top_p": {
"type": "number",
"description": "(Optional) The top p to use"
},
"user": {
"type": "string",
"description": "(Optional) The user to use"
}
},
"additionalProperties": false,
"required": [
"model",
"messages"
],
"title": "OpenaiChatCompletionRequest"
},
"OpenAIChatCompletion": {
"type": "object",
"properties": {
"id": {
"type": "string",
"description": "The ID of the chat completion"
},
"choices": {
"type": "array",
"items": {
"$ref": "#/components/schemas/OpenAIChoice"
},
"description": "List of choices"
},
"object": {
"type": "string",
"const": "chat.completion",
"default": "chat.completion",
"description": "The object type, which will be \"chat.completion\""
},
"created": {
"type": "integer",
"description": "The Unix timestamp in seconds when the chat completion was created"
},
"model": {
"type": "string",
"description": "The model that was used to generate the chat completion"
}
},
"additionalProperties": false,
"required": [
"id",
"choices",
"object",
"created",
"model"
],
"title": "OpenAIChatCompletion",
"description": "Response from an OpenAI-compatible chat completion request."
},
"OpenAIChoice": {
"type": "object",
"properties": {
"message": {
"$ref": "#/components/schemas/OpenAIMessageParam",
"description": "The message from the model"
},
"finish_reason": {
"type": "string",
"description": "The reason the model stopped generating"
},
"index": {
"type": "integer"
},
"logprobs": {
"$ref": "#/components/schemas/OpenAIChoiceLogprobs"
}
},
"additionalProperties": false,
"required": [
"message",
"finish_reason",
"index"
],
"title": "OpenAIChoice",
"description": "A choice from an OpenAI-compatible chat completion response."
},
"OpenAIChoiceLogprobs": {
"type": "object",
"properties": {
"content": {
"type": "array",
"items": {
"$ref": "#/components/schemas/OpenAITokenLogProb"
}
},
"refusal": {
"type": "array",
"items": {
"$ref": "#/components/schemas/OpenAITokenLogProb"
}
}
},
"additionalProperties": false,
"title": "OpenAIChoiceLogprobs",
"description": "The log probabilities for the tokens in the message from an OpenAI-compatible chat completion response."
},
"OpenAITokenLogProb": {
"type": "object",
"properties": {
"token": {
"type": "string"
},
"bytes": {
"type": "array",
"items": {
"type": "integer"
}
},
"logprob": {
"type": "number"
},
"top_logprobs": {
"type": "array",
"items": {
"$ref": "#/components/schemas/OpenAITopLogProb"
}
}
},
"additionalProperties": false,
"required": [
"token",
"logprob",
"top_logprobs"
],
"title": "OpenAITokenLogProb",
"description": "The log probability for a token from an OpenAI-compatible chat completion response."
},
"OpenAITopLogProb": {
"type": "object",
"properties": {
"token": {
"type": "string"
},
"bytes": {
"type": "array",
"items": {
"type": "integer"
}
},
"logprob": {
"type": "number"
}
},
"additionalProperties": false,
"required": [
"token",
"logprob"
],
"title": "OpenAITopLogProb",
"description": "The top log probability for a token from an OpenAI-compatible chat completion response."
},
"OpenaiCompletionRequest": {
"type": "object",
"properties": {
"model": {
"type": "string",
"description": "The identifier of the model to use. The model must be registered with Llama Stack and available via the /models endpoint."
},
"prompt": {
"oneOf": [
{
"type": "string"
},
{
"type": "array",
"items": {
"type": "string"
}
},
{
"type": "array",
"items": {
"type": "integer"
}
},
{
"type": "array",
"items": {
"type": "array",
"items": {
"type": "integer"
}
}
}
],
"description": "The prompt to generate a completion for"
},
"best_of": {
"type": "integer",
"description": "(Optional) The number of completions to generate"
},
"echo": {
"type": "boolean",
"description": "(Optional) Whether to echo the prompt"
},
"frequency_penalty": {
"type": "number",
"description": "(Optional) The penalty for repeated tokens"
},
"logit_bias": {
"type": "object",
"additionalProperties": {
"type": "number"
},
"description": "(Optional) The logit bias to use"
},
"logprobs": {
"type": "boolean",
"description": "(Optional) The log probabilities to use"
},
"max_tokens": {
"type": "integer",
"description": "(Optional) The maximum number of tokens to generate"
},
"n": {
"type": "integer",
"description": "(Optional) The number of completions to generate"
},
"presence_penalty": {
"type": "number",
"description": "(Optional) The penalty for repeated tokens"
},
"seed": {
"type": "integer",
"description": "(Optional) The seed to use"
},
"stop": {
"oneOf": [
{
"type": "string"
},
{
"type": "array",
"items": {
"type": "string"
}
}
],
"description": "(Optional) The stop tokens to use"
},
"stream": {
"type": "boolean",
"description": "(Optional) Whether to stream the response"
},
"stream_options": {
"type": "object",
"additionalProperties": {
"oneOf": [
{
"type": "null"
},
{
"type": "boolean"
},
{
"type": "number"
},
{
"type": "string"
},
{
"type": "array"
},
{
"type": "object"
}
]
},
"description": "(Optional) The stream options to use"
},
"temperature": {
"type": "number",
"description": "(Optional) The temperature to use"
},
"top_p": {
"type": "number",
"description": "(Optional) The top p to use"
},
"user": {
"type": "string",
"description": "(Optional) The user to use"
},
"guided_choice": {
"type": "array",
"items": {
"type": "string"
}
},
"prompt_logprobs": {
"type": "integer"
}
},
"additionalProperties": false,
"required": [
"model",
"prompt"
],
"title": "OpenaiCompletionRequest"
},
"OpenAICompletion": {
"type": "object",
"properties": {
"id": {
"type": "string"
},
"choices": {
"type": "array",
"items": {
"$ref": "#/components/schemas/OpenAICompletionChoice"
}
},
"created": {
"type": "integer"
},
"model": {
"type": "string"
},
"object": {
"type": "string",
"const": "text_completion",
"default": "text_completion"
}
},
"additionalProperties": false,
"required": [
"id",
"choices",
"created",
"model",
"object"
],
"title": "OpenAICompletion",
"description": "Response from an OpenAI-compatible completion request."
},
"OpenAICompletionChoice": {
"type": "object",
"properties": {
"finish_reason": {
"type": "string"
},
"text": {
"type": "string"
},
"index": {
"type": "integer"
},
"logprobs": {
"$ref": "#/components/schemas/OpenAIChoiceLogprobs"
}
},
"additionalProperties": false,
"required": [
"finish_reason",
"text",
"index"
],
"title": "OpenAICompletionChoice",
"description": "A choice from an OpenAI-compatible completion response."
},
"OpenAIModel": {
"type": "object",
"properties": {
"id": {
"type": "string"
},
"object": {
"type": "string",
"const": "model",
"default": "model"
},
"created": {
"type": "integer"
},
"owned_by": {
"type": "string"
}
},
"additionalProperties": false,
"required": [
"id",
"object",
"created",
"owned_by"
],
"title": "OpenAIModel",
"description": "A model from OpenAI."
},
"OpenAIListModelsResponse": {
"type": "object",
"properties": {
"data": {
"type": "array",
"items": {
"$ref": "#/components/schemas/OpenAIModel"
}
}
},
"additionalProperties": false,
"required": [
"data"
],
"title": "OpenAIListModelsResponse"
},
"DPOAlignmentConfig": {
"type": "object",
"properties": {