mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-04 20:14:13 +00:00
# What does this PR do? Rather than have a single `LLAMA_STACK_VERSION`, we need to have a `_V1`, `_V1ALPHA`, and `_V1BETA` constant. This also necessitated addition of `level` to the `WebMethod` so that routing can be handeled properly. For backwards compat, the `v1` routes are being kept around and marked as `deprecated`. When used, the server will log a deprecation warning. Deprecation log: <img width="1224" height="134" alt="Screenshot 2025-09-25 at 2 43 36 PM" src="https://github.com/user-attachments/assets/0cc7c245-dafc-48f0-be99-269fb9a686f9" /> move: 1. post_training to `v1alpha` as it is under heavy development and not near its final state 2. eval: job scheduling is not implemented. Relies heavily on the datasetio API which is under development missing implementations of specific routes indicating the structure of those routes might change. Additionally eval depends on the `inference` API which is going to be deprecated, eval will likely need a major API surface change to conform to using completions properly implements leveling in #3317 note: integration tests will fail until the SDK is regenerated with v1alpha/inference as opposed to v1/inference ## Test Plan existing tests should pass with newly generated schema. Conformance will also pass as these routes are not the ones we currently test for stability Signed-off-by: Charlie Doern <cdoern@redhat.com>
79 lines
3.1 KiB
Python
79 lines
3.1 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
from typing import Protocol, runtime_checkable
|
|
|
|
from llama_stack.apis.common.job_types import Job
|
|
from llama_stack.apis.inference import (
|
|
InterleavedContent,
|
|
LogProbConfig,
|
|
Message,
|
|
ResponseFormat,
|
|
SamplingParams,
|
|
ToolChoice,
|
|
ToolDefinition,
|
|
ToolPromptFormat,
|
|
)
|
|
from llama_stack.apis.version import LLAMA_STACK_API_V1
|
|
from llama_stack.schema_utils import webmethod
|
|
|
|
|
|
@runtime_checkable
|
|
class BatchInference(Protocol):
|
|
"""Batch inference API for generating completions and chat completions.
|
|
|
|
This is an asynchronous API. If the request is successful, the response will be a job which can be polled for completion.
|
|
|
|
NOTE: This API is not yet implemented and is subject to change in concert with other asynchronous APIs
|
|
including (post-training, evals, etc).
|
|
"""
|
|
|
|
@webmethod(route="/batch-inference/completion", method="POST", level=LLAMA_STACK_API_V1)
|
|
async def completion(
|
|
self,
|
|
model: str,
|
|
content_batch: list[InterleavedContent],
|
|
sampling_params: SamplingParams | None = None,
|
|
response_format: ResponseFormat | None = None,
|
|
logprobs: LogProbConfig | None = None,
|
|
) -> Job:
|
|
"""Generate completions for a batch of content.
|
|
|
|
:param model: The model to use for the completion.
|
|
:param content_batch: The content to complete.
|
|
:param sampling_params: The sampling parameters to use for the completion.
|
|
:param response_format: The response format to use for the completion.
|
|
:param logprobs: The logprobs to use for the completion.
|
|
:returns: A job for the completion.
|
|
"""
|
|
...
|
|
|
|
@webmethod(route="/batch-inference/chat-completion", method="POST", level=LLAMA_STACK_API_V1)
|
|
async def chat_completion(
|
|
self,
|
|
model: str,
|
|
messages_batch: list[list[Message]],
|
|
sampling_params: SamplingParams | None = None,
|
|
# zero-shot tool definitions as input to the model
|
|
tools: list[ToolDefinition] | None = None,
|
|
tool_choice: ToolChoice | None = ToolChoice.auto,
|
|
tool_prompt_format: ToolPromptFormat | None = None,
|
|
response_format: ResponseFormat | None = None,
|
|
logprobs: LogProbConfig | None = None,
|
|
) -> Job:
|
|
"""Generate chat completions for a batch of messages.
|
|
|
|
:param model: The model to use for the chat completion.
|
|
:param messages_batch: The messages to complete.
|
|
:param sampling_params: The sampling parameters to use for the completion.
|
|
:param tools: The tools to use for the chat completion.
|
|
:param tool_choice: The tool choice to use for the chat completion.
|
|
:param tool_prompt_format: The tool prompt format to use for the chat completion.
|
|
:param response_format: The response format to use for the chat completion.
|
|
:param logprobs: The logprobs to use for the chat completion.
|
|
:returns: A job for the chat completion.
|
|
"""
|
|
...
|