mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-03 19:57:35 +00:00
Update runpod.py
This commit is contained in:
parent
42414a1a1b
commit
064602bc97
1 changed files with 188 additions and 48 deletions
|
@ -3,52 +3,29 @@
|
|||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from collections.abc import AsyncGenerator
|
||||
|
||||
from openai import OpenAI
|
||||
|
||||
from llama_stack.apis.inference import * # noqa: F403
|
||||
from llama_stack.apis.inference import *
|
||||
from llama_stack.apis.inference import OpenAIEmbeddingsResponse
|
||||
|
||||
# from llama_stack.providers.datatypes import ModelsProtocolPrivate
|
||||
from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper, build_hf_repo_model_entry
|
||||
from llama_stack.apis.models import Model, ModelType
|
||||
from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper
|
||||
from llama_stack.providers.utils.inference.openai_compat import (
|
||||
OpenAIChatCompletionToLlamaStackMixin,
|
||||
OpenAICompletionToLlamaStackMixin,
|
||||
get_sampling_options,
|
||||
process_chat_completion_response,
|
||||
process_chat_completion_stream_response,
|
||||
process_completion_response,
|
||||
process_completion_stream_response,
|
||||
)
|
||||
from llama_stack.providers.utils.inference.prompt_adapter import (
|
||||
chat_completion_request_to_prompt,
|
||||
completion_request_to_prompt,
|
||||
interleaved_content_as_str,
|
||||
)
|
||||
|
||||
from .config import RunpodImplConfig
|
||||
|
||||
# https://docs.runpod.io/serverless/vllm/overview#compatible-models
|
||||
# https://github.com/runpod-workers/worker-vllm/blob/main/README.md#compatible-model-architectures
|
||||
RUNPOD_SUPPORTED_MODELS = {
|
||||
"Llama3.1-8B": "meta-llama/Llama-3.1-8B",
|
||||
"Llama3.1-70B": "meta-llama/Llama-3.1-70B",
|
||||
"Llama3.1-405B:bf16-mp8": "meta-llama/Llama-3.1-405B",
|
||||
"Llama3.1-405B": "meta-llama/Llama-3.1-405B-FP8",
|
||||
"Llama3.1-405B:bf16-mp16": "meta-llama/Llama-3.1-405B",
|
||||
"Llama3.1-8B-Instruct": "meta-llama/Llama-3.1-8B-Instruct",
|
||||
"Llama3.1-70B-Instruct": "meta-llama/Llama-3.1-70B-Instruct",
|
||||
"Llama3.1-405B-Instruct:bf16-mp8": "meta-llama/Llama-3.1-405B-Instruct",
|
||||
"Llama3.1-405B-Instruct": "meta-llama/Llama-3.1-405B-Instruct-FP8",
|
||||
"Llama3.1-405B-Instruct:bf16-mp16": "meta-llama/Llama-3.1-405B-Instruct",
|
||||
"Llama3.2-1B": "meta-llama/Llama-3.2-1B",
|
||||
"Llama3.2-3B": "meta-llama/Llama-3.2-3B",
|
||||
}
|
||||
|
||||
SAFETY_MODELS_ENTRIES = []
|
||||
|
||||
# Create MODEL_ENTRIES from RUNPOD_SUPPORTED_MODELS for compatibility with starter template
|
||||
MODEL_ENTRIES = [
|
||||
build_hf_repo_model_entry(provider_model_id, model_descriptor)
|
||||
for provider_model_id, model_descriptor in RUNPOD_SUPPORTED_MODELS.items()
|
||||
] + SAFETY_MODELS_ENTRIES
|
||||
MODEL_ENTRIES = []
|
||||
|
||||
|
||||
class RunpodInferenceAdapter(
|
||||
|
@ -57,30 +34,83 @@ class RunpodInferenceAdapter(
|
|||
OpenAIChatCompletionToLlamaStackMixin,
|
||||
OpenAICompletionToLlamaStackMixin,
|
||||
):
|
||||
"""
|
||||
Adapter for RunPod's OpenAI-compatible API endpoints.
|
||||
Supports VLLM for serverless endpoint self-hosted or public endpoints.
|
||||
Can work with any runpod endpoints that support OpenAI-compatible API
|
||||
"""
|
||||
|
||||
def __init__(self, config: RunpodImplConfig) -> None:
|
||||
ModelRegistryHelper.__init__(self, stack_to_provider_models_map=RUNPOD_SUPPORTED_MODELS)
|
||||
ModelRegistryHelper.__init__(self, MODEL_ENTRIES)
|
||||
self.config = config
|
||||
|
||||
async def initialize(self) -> None:
|
||||
return
|
||||
pass
|
||||
|
||||
async def shutdown(self) -> None:
|
||||
pass
|
||||
|
||||
async def register_model(self, model: Model) -> Model:
|
||||
"""
|
||||
Register any model with the runpod provider_id.
|
||||
|
||||
Pass-through registration - accepts any model string that the RunPod endpoint serves.
|
||||
No static model validation since RunPod endpoints can serve arbitrary vLLM models.
|
||||
|
||||
YAML Configuration Example:
|
||||
models:
|
||||
- metadata: {}
|
||||
model_id: runpod/qwen/qwen3-8b
|
||||
model_type: llm
|
||||
provider_id: runpod
|
||||
provider_model_id: qwen/qwen3-8b
|
||||
- metadata: {}
|
||||
model_id: runpod/deepcogito/cogito-v2-preview-llama-70B
|
||||
model_type: llm
|
||||
provider_id: runpod
|
||||
provider_model_id: deepcogito/cogito-v2-preview-llama-70B
|
||||
|
||||
The provider strips 'runpod/' prefix before API calls:
|
||||
"runpod/qwen/qwen3-8b" -> "qwen/qwen3-8b"
|
||||
"""
|
||||
if model.provider_id == "runpod":
|
||||
logger.info(
|
||||
f"Registering model: {model.identifier} -> {model.provider_resource_id}"
|
||||
)
|
||||
return model
|
||||
return await super().register_model(model)
|
||||
|
||||
async def completion(
|
||||
self,
|
||||
model: str,
|
||||
model_id: str,
|
||||
content: InterleavedContent,
|
||||
sampling_params: SamplingParams | None = None,
|
||||
response_format: ResponseFormat | None = None,
|
||||
stream: bool | None = False,
|
||||
logprobs: LogProbConfig | None = None,
|
||||
) -> AsyncGenerator:
|
||||
raise NotImplementedError()
|
||||
if sampling_params is None:
|
||||
sampling_params = SamplingParams()
|
||||
|
||||
request = CompletionRequest(
|
||||
model=model_id,
|
||||
content=content,
|
||||
sampling_params=sampling_params,
|
||||
response_format=response_format,
|
||||
stream=stream,
|
||||
logprobs=logprobs,
|
||||
)
|
||||
|
||||
client = OpenAI(base_url=self.config.url, api_key=self.config.api_token)
|
||||
|
||||
if stream:
|
||||
return self._stream_completion(request, client)
|
||||
else:
|
||||
return await self._nonstream_completion(request, client)
|
||||
|
||||
async def chat_completion(
|
||||
self,
|
||||
model: str,
|
||||
model_id: str,
|
||||
messages: list[Message],
|
||||
sampling_params: SamplingParams | None = None,
|
||||
response_format: ResponseFormat | None = None,
|
||||
|
@ -91,10 +121,12 @@ class RunpodInferenceAdapter(
|
|||
logprobs: LogProbConfig | None = None,
|
||||
tool_config: ToolConfig | None = None,
|
||||
) -> AsyncGenerator:
|
||||
"""Process chat completion requests using RunPod's OpenAI-compatible API."""
|
||||
if sampling_params is None:
|
||||
sampling_params = SamplingParams()
|
||||
|
||||
request = ChatCompletionRequest(
|
||||
model=model,
|
||||
model=model_id,
|
||||
messages=messages,
|
||||
sampling_params=sampling_params,
|
||||
tools=tools or [],
|
||||
|
@ -104,6 +136,7 @@ class RunpodInferenceAdapter(
|
|||
)
|
||||
|
||||
client = OpenAI(base_url=self.config.url, api_key=self.config.api_token)
|
||||
|
||||
if stream:
|
||||
return self._stream_chat_completion(request, client)
|
||||
else:
|
||||
|
@ -112,15 +145,17 @@ class RunpodInferenceAdapter(
|
|||
async def _nonstream_chat_completion(
|
||||
self, request: ChatCompletionRequest, client: OpenAI
|
||||
) -> ChatCompletionResponse:
|
||||
params = self._get_params(request)
|
||||
r = client.completions.create(**params)
|
||||
params = await self._get_chat_params(request)
|
||||
r = client.chat.completions.create(**params)
|
||||
return process_chat_completion_response(r, request)
|
||||
|
||||
async def _stream_chat_completion(self, request: ChatCompletionRequest, client: OpenAI) -> AsyncGenerator:
|
||||
params = self._get_params(request)
|
||||
async def _stream_chat_completion(
|
||||
self, request: ChatCompletionRequest, client: OpenAI
|
||||
) -> AsyncGenerator:
|
||||
params = await self._get_chat_params(request)
|
||||
|
||||
async def _to_async_generator():
|
||||
s = client.completions.create(**params)
|
||||
s = client.chat.completions.create(**params)
|
||||
for chunk in s:
|
||||
yield chunk
|
||||
|
||||
|
@ -128,14 +163,102 @@ class RunpodInferenceAdapter(
|
|||
async for chunk in process_chat_completion_stream_response(stream, request):
|
||||
yield chunk
|
||||
|
||||
def _get_params(self, request: ChatCompletionRequest) -> dict:
|
||||
return {
|
||||
"model": self.map_to_provider_model(request.model),
|
||||
"prompt": chat_completion_request_to_prompt(request),
|
||||
async def _get_chat_params(self, request: ChatCompletionRequest) -> dict:
|
||||
"""Convert Llama Stack request to RunPod API parameters."""
|
||||
messages = [
|
||||
{"role": msg.role, "content": msg.content} for msg in request.messages
|
||||
]
|
||||
|
||||
# Resolve model_id to provider_resource_id
|
||||
model_obj = await self.model_store.get_model(request.model)
|
||||
model = model_obj.provider_resource_id or request.model
|
||||
|
||||
if model.startswith("runpod/"):
|
||||
model = model.replace("runpod/", "", 1)
|
||||
|
||||
params = {
|
||||
"model": model,
|
||||
"messages": messages,
|
||||
"stream": request.stream,
|
||||
**get_sampling_options(request.sampling_params),
|
||||
}
|
||||
|
||||
if request.stream:
|
||||
params["stream_options"] = {"include_usage": True}
|
||||
|
||||
return params
|
||||
|
||||
async def _nonstream_completion(
|
||||
self, request: CompletionRequest, client: OpenAI
|
||||
) -> CompletionResponse:
|
||||
params = await self._get_completion_params(request)
|
||||
r = client.completions.create(**params)
|
||||
return process_completion_response(r)
|
||||
|
||||
async def _stream_completion(
|
||||
self, request: CompletionRequest, client: OpenAI
|
||||
) -> AsyncGenerator:
|
||||
params = await self._get_completion_params(request)
|
||||
|
||||
async def _to_async_generator():
|
||||
s = client.completions.create(**params)
|
||||
for chunk in s:
|
||||
yield chunk
|
||||
|
||||
stream = _to_async_generator()
|
||||
async for chunk in process_completion_stream_response(stream):
|
||||
yield chunk
|
||||
|
||||
async def _get_completion_params(self, request: CompletionRequest) -> dict:
|
||||
# Resolve model_id to provider_resource_id
|
||||
model_obj = await self.model_store.get_model(request.model)
|
||||
model = model_obj.provider_resource_id or request.model
|
||||
|
||||
if model.startswith("runpod/"):
|
||||
model = model.replace("runpod/", "", 1)
|
||||
|
||||
params = {
|
||||
"model": model,
|
||||
"prompt": completion_request_to_prompt(request),
|
||||
"stream": request.stream,
|
||||
**get_sampling_options(request.sampling_params),
|
||||
}
|
||||
|
||||
if request.stream:
|
||||
params["stream_options"] = {"include_usage": True}
|
||||
|
||||
return params
|
||||
|
||||
async def embeddings(
|
||||
self,
|
||||
model_id: str,
|
||||
contents: list[str] | list[InterleavedContentItem],
|
||||
text_truncation: TextTruncation | None = TextTruncation.none,
|
||||
output_dimension: int | None = None,
|
||||
task_type: EmbeddingTaskType | None = None,
|
||||
) -> EmbeddingsResponse:
|
||||
# Resolve model_id to provider_resource_id
|
||||
model_obj = await self.model_store.get_model(model_id)
|
||||
model = model_obj.provider_resource_id or model_id
|
||||
|
||||
if model.startswith("runpod/"):
|
||||
model = model.replace("runpod/", "", 1)
|
||||
|
||||
client = OpenAI(base_url=self.config.url, api_key=self.config.api_token)
|
||||
|
||||
kwargs = {}
|
||||
if output_dimension:
|
||||
kwargs["dimensions"] = output_dimension
|
||||
|
||||
response = client.embeddings.create(
|
||||
model=model,
|
||||
input=[interleaved_content_as_str(content) for content in contents],
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
embeddings = [data.embedding for data in response.data]
|
||||
return EmbeddingsResponse(embeddings=embeddings)
|
||||
|
||||
async def openai_embeddings(
|
||||
self,
|
||||
model: str,
|
||||
|
@ -144,4 +267,21 @@ class RunpodInferenceAdapter(
|
|||
dimensions: int | None = None,
|
||||
user: str | None = None,
|
||||
) -> OpenAIEmbeddingsResponse:
|
||||
raise NotImplementedError()
|
||||
# Resolve model_id to provider_resource_id
|
||||
model_obj = await self.model_store.get_model(model)
|
||||
model_stripped = model_obj.provider_resource_id or model
|
||||
|
||||
if model_stripped.startswith("runpod/"):
|
||||
model_stripped = model_stripped.replace("runpod/", "", 1)
|
||||
|
||||
client = OpenAI(base_url=self.config.url, api_key=self.config.api_token)
|
||||
|
||||
response = client.embeddings.create(
|
||||
model=model_stripped,
|
||||
input=input,
|
||||
encoding_format=encoding_format,
|
||||
dimensions=dimensions,
|
||||
user=user,
|
||||
)
|
||||
|
||||
return response
|
Loading…
Add table
Add a link
Reference in a new issue