mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-04 10:10:36 +00:00
# What does this PR do? This commit introduces a new FastAPI router-based system for defining API endpoints, enabling a migration path away from the legacy @webmethod decorator system. The implementation includes router infrastructure, migration of the Batches API as the first example, and updates to server, OpenAPI generation, and inspection systems to support both routing approaches. The router infrastructure consists of a router registry system that allows APIs to register FastAPI router factories, which are then automatically discovered and included in the server application. Standard error responses are centralized in router_utils to ensure consistent OpenAPI specification generation with proper $ref references to component responses. The Batches API has been migrated to demonstrate the new pattern. The protocol definition and models remain in llama_stack_api/batches, maintaining clear separation between API contracts and server implementation. The FastAPI router implementation lives in llama_stack/core/server/routers/batches, following the established pattern where API contracts are defined in llama_stack_api and server routing logic lives in llama_stack/core/server. The server now checks for registered routers before falling back to the legacy webmethod-based route discovery, ensuring backward compatibility during the migration period. The OpenAPI generator has been updated to handle both router-based and webmethod-based routes, correctly extracting metadata from FastAPI route decorators and Pydantic Field descriptions. The inspect endpoint now includes routes from both systems, with proper filtering for deprecated routes and API levels. Response descriptions are now explicitly defined in router decorators, ensuring the generated OpenAPI specification matches the previous format. Error responses use $ref references to component responses (BadRequest400, TooManyRequests429, etc.) as required by the specification. This is neat and will allow us to remove a lot of boiler plate code from our generator once the migration is done. This implementation provides a foundation for incrementally migrating other APIs to the router system while maintaining full backward compatibility with existing webmethod-based APIs. Closes: https://github.com/llamastack/llama-stack/issues/4188 ## Test Plan CI, the server should start, same routes should be visible. ``` curl http://localhost:8321/v1/inspect/routes | jq '.data[] | select(.route | contains("batches"))' ``` Also: ``` uv run pytest tests/integration/batches/ -vv --stack-config=http://localhost:8321 ================================================== test session starts ================================================== platform darwin -- Python 3.12.8, pytest-8.4.2, pluggy-1.6.0 -- /Users/leseb/Documents/AI/llama-stack/.venv/bin/python3 cachedir: .pytest_cache metadata: {'Python': '3.12.8', 'Platform': 'macOS-26.0.1-arm64-arm-64bit', 'Packages': {'pytest': '8.4.2', 'pluggy': '1.6.0'}, 'Plugins': {'anyio': '4.9.0', 'html': '4.1.1', 'socket': '0.7.0', 'asyncio': '1.1.0', 'json-report': '1.5.0', 'timeout': '2.4.0', 'metadata': '3.1.1', 'cov': '6.2.1', 'nbval': '0.11.0'}} rootdir: /Users/leseb/Documents/AI/llama-stack configfile: pyproject.toml plugins: anyio-4.9.0, html-4.1.1, socket-0.7.0, asyncio-1.1.0, json-report-1.5.0, timeout-2.4.0, metadata-3.1.1, cov-6.2.1, nbval-0.11.0 asyncio: mode=Mode.AUTO, asyncio_default_fixture_loop_scope=None, asyncio_default_test_loop_scope=function collected 24 items tests/integration/batches/test_batches.py::TestBatchesIntegration::test_batch_creation_and_retrieval[None] SKIPPED [ 4%] tests/integration/batches/test_batches.py::TestBatchesIntegration::test_batch_listing[None] SKIPPED [ 8%] tests/integration/batches/test_batches.py::TestBatchesIntegration::test_batch_immediate_cancellation[None] SKIPPED [ 12%] tests/integration/batches/test_batches.py::TestBatchesIntegration::test_batch_e2e_chat_completions[None] SKIPPED [ 16%] tests/integration/batches/test_batches.py::TestBatchesIntegration::test_batch_e2e_completions[None] SKIPPED [ 20%] tests/integration/batches/test_batches_errors.py::TestBatchesErrorHandling::test_batch_invalid_endpoint[None] SKIPPED [ 25%] tests/integration/batches/test_batches_errors.py::TestBatchesErrorHandling::test_batch_cancel_completed[None] SKIPPED [ 29%] tests/integration/batches/test_batches_errors.py::TestBatchesErrorHandling::test_batch_missing_required_fields[None] SKIPPED [ 33%] tests/integration/batches/test_batches_errors.py::TestBatchesErrorHandling::test_batch_invalid_completion_window[None] SKIPPED [ 37%] tests/integration/batches/test_batches_errors.py::TestBatchesErrorHandling::test_batch_streaming_not_supported[None] SKIPPED [ 41%] tests/integration/batches/test_batches_errors.py::TestBatchesErrorHandling::test_batch_mixed_streaming_requests[None] SKIPPED [ 45%] tests/integration/batches/test_batches_errors.py::TestBatchesErrorHandling::test_batch_endpoint_mismatch[None] SKIPPED [ 50%] tests/integration/batches/test_batches_errors.py::TestBatchesErrorHandling::test_batch_missing_required_body_fields[None] SKIPPED [ 54%] tests/integration/batches/test_batches_errors.py::TestBatchesErrorHandling::test_batch_invalid_metadata_types[None] SKIPPED [ 58%] tests/integration/batches/test_batches.py::TestBatchesIntegration::test_batch_e2e_embeddings[None] SKIPPED [ 62%] tests/integration/batches/test_batches_errors.py::TestBatchesErrorHandling::test_batch_nonexistent_file_id PASSED [ 66%] tests/integration/batches/test_batches_errors.py::TestBatchesErrorHandling::test_batch_malformed_jsonl PASSED [ 70%] tests/integration/batches/test_batches_errors.py::TestBatchesErrorHandling::test_file_malformed_batch_file[empty] XFAIL [ 75%] tests/integration/batches/test_batches_errors.py::TestBatchesErrorHandling::test_file_malformed_batch_file[malformed] XFAIL [ 79%] tests/integration/batches/test_batches_errors.py::TestBatchesErrorHandling::test_batch_retrieve_nonexistent PASSED [ 83%] tests/integration/batches/test_batches_errors.py::TestBatchesErrorHandling::test_batch_cancel_nonexistent PASSED [ 87%] tests/integration/batches/test_batches_errors.py::TestBatchesErrorHandling::test_batch_error_handling_invalid_model PASSED [ 91%] tests/integration/batches/test_batches_idempotency.py::TestBatchesIdempotencyIntegration::test_idempotent_batch_creation_successful PASSED [ 95%] tests/integration/batches/test_batches_idempotency.py::TestBatchesIdempotencyIntegration::test_idempotency_conflict_with_different_params PASSED [100%] ================================================= slowest 10 durations ================================================== 1.01s call tests/integration/batches/test_batches_idempotency.py::TestBatchesIdempotencyIntegration::test_idempotent_batch_creation_successful 0.21s call tests/integration/batches/test_batches_errors.py::TestBatchesErrorHandling::test_batch_nonexistent_file_id 0.17s call tests/integration/batches/test_batches_errors.py::TestBatchesErrorHandling::test_batch_malformed_jsonl 0.12s call tests/integration/batches/test_batches_errors.py::TestBatchesErrorHandling::test_batch_error_handling_invalid_model 0.05s setup tests/integration/batches/test_batches.py::TestBatchesIntegration::test_batch_creation_and_retrieval[None] 0.02s call tests/integration/batches/test_batches_errors.py::TestBatchesErrorHandling::test_file_malformed_batch_file[empty] 0.01s call tests/integration/batches/test_batches_idempotency.py::TestBatchesIdempotencyIntegration::test_idempotency_conflict_with_different_params 0.01s call tests/integration/batches/test_batches_errors.py::TestBatchesErrorHandling::test_file_malformed_batch_file[malformed] 0.01s call tests/integration/batches/test_batches_errors.py::TestBatchesErrorHandling::test_batch_retrieve_nonexistent 0.00s call tests/integration/batches/test_batches_errors.py::TestBatchesErrorHandling::test_batch_cancel_nonexistent ======================================= 7 passed, 15 skipped, 2 xfailed in 1.78s ======================================== ``` --------- Signed-off-by: Sébastien Han <seb@redhat.com>
127 lines
5.4 KiB
Python
127 lines
5.4 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
"""
|
|
Tests for idempotency functionality in the reference batches provider.
|
|
|
|
This module tests the optional idempotency feature that allows clients to provide
|
|
an idempotency key (idempotency_key) to ensure that repeated requests with the same key
|
|
and parameters return the same batch, while requests with the same key but different
|
|
parameters result in a conflict error.
|
|
|
|
Test Categories:
|
|
1. Core Idempotency: Same parameters with same key return same batch
|
|
2. Parameter Independence: Different parameters without keys create different batches
|
|
3. Conflict Detection: Same key with different parameters raises ConflictError
|
|
|
|
Tests by Category:
|
|
|
|
1. Core Idempotency:
|
|
- test_idempotent_batch_creation_same_params
|
|
- test_idempotent_batch_creation_metadata_order_independence
|
|
|
|
2. Parameter Independence:
|
|
- test_non_idempotent_behavior_without_key
|
|
- test_different_idempotency_keys_create_different_batches
|
|
|
|
3. Conflict Detection:
|
|
- test_same_idempotency_key_different_params_conflict (parametrized: input_file_id, metadata values, metadata None vs {})
|
|
|
|
Key Behaviors Tested:
|
|
- Idempotent batch creation when idempotency_key provided with identical parameters
|
|
- Metadata order independence for consistent batch ID generation
|
|
- Non-idempotent behavior when no idempotency_key provided (random UUIDs)
|
|
- Conflict detection for parameter mismatches with same idempotency key
|
|
- Deterministic ID generation based solely on idempotency key
|
|
- Proper error handling with detailed conflict messages including key and error codes
|
|
- Protection against idempotency key reuse with different request parameters
|
|
"""
|
|
|
|
import asyncio
|
|
|
|
import pytest
|
|
|
|
from llama_stack_api import ConflictError
|
|
from llama_stack_api.batches.models import CreateBatchRequest, RetrieveBatchRequest
|
|
|
|
|
|
class TestReferenceBatchesIdempotency:
|
|
"""Test suite for idempotency functionality in the reference implementation."""
|
|
|
|
async def test_idempotent_batch_creation_same_params(self, provider, sample_batch_data):
|
|
"""Test that creating batches with identical parameters returns the same batch when idempotency_key is provided."""
|
|
|
|
del sample_batch_data["metadata"]
|
|
|
|
batch1 = await provider.create_batch(
|
|
CreateBatchRequest(
|
|
**sample_batch_data,
|
|
metadata={"test": "value1", "other": "value2"},
|
|
idempotency_key="unique-token-1",
|
|
)
|
|
)
|
|
|
|
# sleep for 1 second to allow created_at timestamps to be different
|
|
await asyncio.sleep(1)
|
|
|
|
batch2 = await provider.create_batch(
|
|
CreateBatchRequest(
|
|
**sample_batch_data,
|
|
metadata={"other": "value2", "test": "value1"}, # Different order
|
|
idempotency_key="unique-token-1",
|
|
)
|
|
)
|
|
|
|
assert batch1.id == batch2.id
|
|
assert batch1.input_file_id == batch2.input_file_id
|
|
assert batch1.metadata == batch2.metadata
|
|
assert batch1.created_at == batch2.created_at
|
|
|
|
async def test_different_idempotency_keys_create_different_batches(self, provider, sample_batch_data):
|
|
"""Test that different idempotency keys create different batches even with same params."""
|
|
batch1 = await provider.create_batch(CreateBatchRequest(**sample_batch_data, idempotency_key="token-A"))
|
|
|
|
batch2 = await provider.create_batch(CreateBatchRequest(**sample_batch_data, idempotency_key="token-B"))
|
|
|
|
assert batch1.id != batch2.id
|
|
|
|
async def test_non_idempotent_behavior_without_key(self, provider, sample_batch_data):
|
|
"""Test that batches without idempotency key create unique batches even with identical parameters."""
|
|
batch1 = await provider.create_batch(CreateBatchRequest(**sample_batch_data))
|
|
|
|
batch2 = await provider.create_batch(CreateBatchRequest(**sample_batch_data))
|
|
|
|
assert batch1.id != batch2.id
|
|
assert batch1.input_file_id == batch2.input_file_id
|
|
assert batch1.endpoint == batch2.endpoint
|
|
assert batch1.completion_window == batch2.completion_window
|
|
assert batch1.metadata == batch2.metadata
|
|
|
|
@pytest.mark.parametrize(
|
|
"param_name,first_value,second_value",
|
|
[
|
|
("input_file_id", "file_001", "file_002"),
|
|
("metadata", {"test": "value1"}, {"test": "value2"}),
|
|
("metadata", None, {}),
|
|
],
|
|
)
|
|
async def test_same_idempotency_key_different_params_conflict(
|
|
self, provider, sample_batch_data, param_name, first_value, second_value
|
|
):
|
|
"""Test that same idempotency_key with different parameters raises conflict error."""
|
|
sample_batch_data["idempotency_key"] = "same-token"
|
|
|
|
sample_batch_data[param_name] = first_value
|
|
|
|
batch1 = await provider.create_batch(CreateBatchRequest(**sample_batch_data))
|
|
|
|
with pytest.raises(ConflictError, match="Idempotency key.*was previously used with different parameters"):
|
|
sample_batch_data[param_name] = second_value
|
|
await provider.create_batch(CreateBatchRequest(**sample_batch_data))
|
|
|
|
retrieved_batch = await provider.retrieve_batch(RetrieveBatchRequest(batch_id=batch1.id))
|
|
assert retrieved_batch.id == batch1.id
|
|
assert getattr(retrieved_batch, param_name) == first_value
|