chore: extract the protocol into its own file

The protocol leaves in api.py now

Signed-off-by: Sébastien Han <seb@redhat.com>
This commit is contained in:
Sébastien Han 2025-12-02 15:19:41 +01:00
parent 1ffaa04f09
commit 7b93964a16
No known key found for this signature in database
2 changed files with 59 additions and 39 deletions

View file

@ -11,13 +11,14 @@ Pydantic models are defined in llama_stack_api.batches.models.
The FastAPI router is defined in llama_stack_api.batches.fastapi_routes. The FastAPI router is defined in llama_stack_api.batches.fastapi_routes.
""" """
from typing import Protocol, runtime_checkable
try: try:
from openai.types import Batch as BatchObject from openai.types import Batch as BatchObject
except ImportError as e: except ImportError as e:
raise ImportError("OpenAI package is required for batches API. Please install it with: pip install openai") from e raise ImportError("OpenAI package is required for batches API. Please install it with: pip install openai") from e
# Import protocol for re-export
from llama_stack_api.batches.api import Batches
# Import models for re-export # Import models for re-export
from llama_stack_api.batches.models import ( from llama_stack_api.batches.models import (
CancelBatchRequest, CancelBatchRequest,
@ -27,43 +28,6 @@ from llama_stack_api.batches.models import (
RetrieveBatchRequest, RetrieveBatchRequest,
) )
@runtime_checkable
class Batches(Protocol):
"""
The Batches API enables efficient processing of multiple requests in a single operation,
particularly useful for processing large datasets, batch evaluation workflows, and
cost-effective inference at scale.
The API is designed to allow use of openai client libraries for seamless integration.
This API provides the following extensions:
- idempotent batch creation
Note: This API is currently under active development and may undergo changes.
"""
async def create_batch(
self,
request: CreateBatchRequest,
) -> BatchObject: ...
async def retrieve_batch(
self,
request: RetrieveBatchRequest,
) -> BatchObject: ...
async def cancel_batch(
self,
request: CancelBatchRequest,
) -> BatchObject: ...
async def list_batches(
self,
request: ListBatchesRequest,
) -> ListBatchesResponse: ...
__all__ = [ __all__ = [
"Batches", "Batches",
"BatchObject", "BatchObject",

View file

@ -0,0 +1,56 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from typing import Protocol, runtime_checkable
try:
from openai.types import Batch as BatchObject
except ImportError as e:
raise ImportError("OpenAI package is required for batches API. Please install it with: pip install openai") from e
from llama_stack_api.batches.models import (
CancelBatchRequest,
CreateBatchRequest,
ListBatchesRequest,
ListBatchesResponse,
RetrieveBatchRequest,
)
@runtime_checkable
class Batches(Protocol):
"""
The Batches API enables efficient processing of multiple requests in a single operation,
particularly useful for processing large datasets, batch evaluation workflows, and
cost-effective inference at scale.
The API is designed to allow use of openai client libraries for seamless integration.
This API provides the following extensions:
- idempotent batch creation
Note: This API is currently under active development and may undergo changes.
"""
async def create_batch(
self,
request: CreateBatchRequest,
) -> BatchObject: ...
async def retrieve_batch(
self,
request: RetrieveBatchRequest,
) -> BatchObject: ...
async def cancel_batch(
self,
request: CancelBatchRequest,
) -> BatchObject: ...
async def list_batches(
self,
request: ListBatchesRequest,
) -> ListBatchesResponse: ...