Add OpenAI-Compatible models, completions, chat/completions endpoints

This stubs in some OpenAI server-side compatibility with three new
endpoints:

/v1/openai/v1/models
/v1/openai/v1/completions
/v1/openai/v1/chat/completions

This gives common inference apps using OpenAI clients the ability to
talk to Llama Stack using an endpoint like
http://localhost:8321/v1/openai/v1 .

The two "v1" instances in there isn't awesome, but the thinking is
that Llama Stack's API is v1 and then our OpenAI compatibility layer
is compatible with OpenAI V1. And, some OpenAI clients implicitly
assume the URL ends with "v1", so this gives maximum compatibility.

The openai models endpoint is implemented in the routing layer, and
just returns all the models Llama Stack knows about.

The chat endpoints are only actually implemented for the remote-vllm
provider right now, and it just proxies the completion and chat
completion requests to the backend vLLM.

The goal to support this for every inference provider - proxying
directly to the provider's OpenAI endpoint for OpenAI-compatible
providers. For providers that don't have an OpenAI-compatible API,
we'll add a mixin to translate incoming OpenAI requests to Llama Stack
inference requests and translate the Llama Stack inference responses
to OpenAI responses.
This commit is contained in:
Ben Browning 2025-04-07 21:27:06 -04:00
parent e2299291c4
commit a193c9fc3f
12 changed files with 443 additions and 8 deletions

View file

@ -17,6 +17,9 @@ from typing import (
runtime_checkable,
)
from openai.types.chat import ChatCompletion as OpenAIChatCompletion
from openai.types.chat import ChatCompletionMessageParam as OpenAIChatCompletionMessageParam
from openai.types.completion import Completion as OpenAICompletion
from pydantic import BaseModel, Field, field_validator
from typing_extensions import Annotated
@ -564,3 +567,57 @@ class Inference(Protocol):
:returns: An array of embeddings, one for each content. Each embedding is a list of floats. The dimensionality of the embedding is model-specific; you can check model metadata using /models/{model_id}
"""
...
@webmethod(route="/openai/v1/completions", method="POST")
async def openai_completion(
self,
model: str,
prompt: str,
best_of: Optional[int] = None,
echo: Optional[bool] = None,
frequency_penalty: Optional[float] = None,
logit_bias: Optional[Dict[str, float]] = None,
logprobs: Optional[bool] = None,
max_tokens: Optional[int] = None,
n: Optional[int] = None,
presence_penalty: Optional[float] = None,
seed: Optional[int] = None,
stop: Optional[Union[str, List[str]]] = None,
stream: Optional[bool] = None,
stream_options: Optional[Dict[str, Any]] = None,
temperature: Optional[float] = None,
top_p: Optional[float] = None,
user: Optional[str] = None,
) -> OpenAICompletion:
"""Generate an OpenAI-compatible completion for the given prompt using the specified model."""
...
@webmethod(route="/openai/v1/chat/completions", method="POST")
async def openai_chat_completion(
self,
model: str,
messages: List[OpenAIChatCompletionMessageParam],
frequency_penalty: Optional[float] = None,
function_call: Optional[Union[str, Dict[str, Any]]] = None,
functions: Optional[List[Dict[str, Any]]] = None,
logit_bias: Optional[Dict[str, float]] = None,
logprobs: Optional[bool] = None,
max_completion_tokens: Optional[int] = None,
max_tokens: Optional[int] = None,
n: Optional[int] = None,
parallel_tool_calls: Optional[bool] = None,
presence_penalty: Optional[float] = None,
response_format: Optional[Dict[str, str]] = None,
seed: Optional[int] = None,
stop: Optional[Union[str, List[str]]] = None,
stream: Optional[bool] = None,
stream_options: Optional[Dict[str, Any]] = None,
temperature: Optional[float] = None,
tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
tools: Optional[List[Dict[str, Any]]] = None,
top_logprobs: Optional[int] = None,
top_p: Optional[float] = None,
user: Optional[str] = None,
) -> OpenAIChatCompletion:
"""Generate an OpenAI-compatible chat completion for the given messages using the specified model."""
...

View file

@ -7,6 +7,7 @@
from enum import Enum
from typing import Any, Dict, List, Literal, Optional, Protocol, runtime_checkable
from openai.types.model import Model as OpenAIModel
from pydantic import BaseModel, ConfigDict, Field
from llama_stack.apis.resource import Resource, ResourceType
@ -56,12 +57,19 @@ class ListModelsResponse(BaseModel):
data: List[Model]
class OpenAIListModelsResponse(BaseModel):
data: List[OpenAIModel]
@runtime_checkable
@trace_protocol
class Models(Protocol):
@webmethod(route="/models", method="GET")
async def list_models(self) -> ListModelsResponse: ...
@webmethod(route="/openai/v1/models", method="GET")
async def openai_list_models(self) -> OpenAIListModelsResponse: ...
@webmethod(route="/models/{model_id:path}", method="GET")
async def get_model(
self,