llama-stack-mirror/llama_stack/apis/batch_inference/batch_inference.py
Ihar Hrachyshka 90ed785fbd feat(api): remove List* response types and nils for get/list
TODO:
- make sure docstrings are refreshed as needed.
- make sure this passes tests.
- address a TODO in code (obsolete comment?)
- make sure client side still works.
- analyze if any providers need adjustments.

Signed-off-by: Ihar Hrachyshka <ihar.hrachyshka@gmail.com>
2025-03-14 10:27:04 -04:00

48 lines
1.6 KiB
Python

# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from typing import List, Optional, Protocol, runtime_checkable
from llama_stack.apis.inference import (
ChatCompletionResponse,
CompletionResponse,
InterleavedContent,
LogProbConfig,
Message,
ResponseFormat,
SamplingParams,
ToolChoice,
ToolDefinition,
ToolPromptFormat,
)
from llama_stack.schema_utils import webmethod
@runtime_checkable
class BatchInference(Protocol):
@webmethod(route="/batch-inference/completion", method="POST")
async def batch_completion(
self,
model: str,
content_batch: List[InterleavedContent],
sampling_params: Optional[SamplingParams] = None,
response_format: Optional[ResponseFormat] = None,
logprobs: Optional[LogProbConfig] = None,
) -> list[CompletionResponse]: ...
@webmethod(route="/batch-inference/chat-completion", method="POST")
async def batch_chat_completion(
self,
model: str,
messages_batch: List[List[Message]],
sampling_params: Optional[SamplingParams] = None,
# zero-shot tool definitions as input to the model
tools: Optional[List[ToolDefinition]] = list,
tool_choice: Optional[ToolChoice] = ToolChoice.auto,
tool_prompt_format: Optional[ToolPromptFormat] = None,
response_format: Optional[ResponseFormat] = None,
logprobs: Optional[LogProbConfig] = None,
) -> list[ChatCompletionResponse]: ...