mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-21 01:15:10 +00:00
Add complete batches API implementation with protocol, providers, and tests: Core Infrastructure: - Add batches API protocol using OpenAI Batch types directly - Add Api.batches enum value and protocol mapping in resolver - Add OpenAI "batch" file purpose support - Include proper error handling (ConflictError, ResourceNotFoundError) Reference Provider: - Add ReferenceBatchesImpl with full CRUD operations (create, retrieve, cancel, list) - Implement background batch processing with configurable concurrency - Add SQLite KVStore backend for persistence - Support /v1/chat/completions endpoint with request validation Comprehensive Test Suite: - Add unit tests for provider implementation with validation - Add integration tests for end-to-end batch processing workflows - Add error handling tests for validation, malformed inputs, and edge cases Configuration: - Add max_concurrent_batches and max_concurrent_requests_per_batch options - Add provider documentation with sample configurations Test with - ``` $ uv run llama stack build --image-type venv --providers inference=YOU_PICK,files=inline::localfs,batches=inline::reference --run & $ LLAMA_STACK_CONFIG=http://localhost:8321 uv run pytest tests/unit/providers/batches tests/integration/batches --text-model YOU_PICK ``` addresses #3066 --------- Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com> Co-authored-by: Ashwin Bharambe <ashwin.bharambe@gmail.com>
81 lines
3.2 KiB
Python
81 lines
3.2 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
# Custom Llama Stack Exception classes should follow the following schema
|
|
# 1. All classes should inherit from an existing Built-In Exception class: https://docs.python.org/3/library/exceptions.html
|
|
# 2. All classes should have a custom error message with the goal of informing the Llama Stack user specifically
|
|
# 3. All classes should propogate the inherited __init__ function otherwise via 'super().__init__(message)'
|
|
|
|
|
|
class ResourceNotFoundError(ValueError):
|
|
"""generic exception for a missing Llama Stack resource"""
|
|
|
|
def __init__(self, resource_name: str, resource_type: str, client_list: str) -> None:
|
|
message = (
|
|
f"{resource_type} '{resource_name}' not found. Use '{client_list}' to list available {resource_type}s."
|
|
)
|
|
super().__init__(message)
|
|
|
|
|
|
class UnsupportedModelError(ValueError):
|
|
"""raised when model is not present in the list of supported models"""
|
|
|
|
def __init__(self, model_name: str, supported_models_list: list[str]):
|
|
message = f"'{model_name}' model is not supported. Supported models are: {', '.join(supported_models_list)}"
|
|
super().__init__(message)
|
|
|
|
|
|
class ModelNotFoundError(ResourceNotFoundError):
|
|
"""raised when Llama Stack cannot find a referenced model"""
|
|
|
|
def __init__(self, model_name: str) -> None:
|
|
super().__init__(model_name, "Model", "client.models.list()")
|
|
|
|
|
|
class VectorStoreNotFoundError(ResourceNotFoundError):
|
|
"""raised when Llama Stack cannot find a referenced vector store"""
|
|
|
|
def __init__(self, vector_store_name: str) -> None:
|
|
super().__init__(vector_store_name, "Vector Store", "client.vector_dbs.list()")
|
|
|
|
|
|
class DatasetNotFoundError(ResourceNotFoundError):
|
|
"""raised when Llama Stack cannot find a referenced dataset"""
|
|
|
|
def __init__(self, dataset_name: str) -> None:
|
|
super().__init__(dataset_name, "Dataset", "client.datasets.list()")
|
|
|
|
|
|
class ToolGroupNotFoundError(ResourceNotFoundError):
|
|
"""raised when Llama Stack cannot find a referenced tool group"""
|
|
|
|
def __init__(self, toolgroup_name: str) -> None:
|
|
super().__init__(toolgroup_name, "Tool Group", "client.toolgroups.list()")
|
|
|
|
|
|
class SessionNotFoundError(ValueError):
|
|
"""raised when Llama Stack cannot find a referenced session or access is denied"""
|
|
|
|
def __init__(self, session_name: str) -> None:
|
|
message = f"Session '{session_name}' not found or access denied."
|
|
super().__init__(message)
|
|
|
|
|
|
class ModelTypeError(TypeError):
|
|
"""raised when a model is present but not the correct type"""
|
|
|
|
def __init__(self, model_name: str, model_type: str, expected_model_type: str) -> None:
|
|
message = (
|
|
f"Model '{model_name}' is of type '{model_type}' rather than the expected type '{expected_model_type}'"
|
|
)
|
|
super().__init__(message)
|
|
|
|
|
|
class ConflictError(ValueError):
|
|
"""raised when an operation cannot be performed due to a conflict with the current state"""
|
|
|
|
def __init__(self, message: str) -> None:
|
|
super().__init__(message)
|