From a078f089d9070d5618d185fb9dfdbc53f5e3c34f Mon Sep 17 00:00:00 2001 From: Charlie Doern Date: Thu, 13 Nov 2025 18:04:36 -0500 Subject: [PATCH] fix: rename llama_stack_api dir (#4155) # What does this PR do? the directory structure was src/llama-stack-api/llama_stack_api instead it should just be src/llama_stack_api to match the other packages. update the structure and pyproject/linting config --------- Signed-off-by: Charlie Doern Co-authored-by: Ashwin Bharambe --- .github/workflows/python-build-test.yml | 4 +- .pre-commit-config.yaml | 2 +- pyproject.toml | 12 +- scripts/generate_prompt_format.py | 2 +- src/llama_stack/cli/stack/_list_deps.py | 2 +- src/llama_stack/cli/stack/utils.py | 2 +- src/llama_stack/core/build.py | 2 +- src/llama_stack/core/client.py | 3 +- src/llama_stack/core/configure.py | 3 +- .../core/conversations/conversations.py | 14 +- src/llama_stack/core/datatypes.py | 18 +- src/llama_stack/core/distribution.py | 10 +- src/llama_stack/core/external.py | 2 +- src/llama_stack/core/inspect.py | 10 +- src/llama_stack/core/library_client.py | 1 + src/llama_stack/core/prompts/prompts.py | 2 +- src/llama_stack/core/providers.py | 2 +- src/llama_stack/core/resolver.py | 27 +- src/llama_stack/core/routers/__init__.py | 3 +- src/llama_stack/core/routers/datasets.py | 3 +- src/llama_stack/core/routers/eval_scoring.py | 3 +- src/llama_stack/core/routers/inference.py | 20 +- src/llama_stack/core/routers/safety.py | 3 +- src/llama_stack/core/routers/tool_runtime.py | 3 +- src/llama_stack/core/routers/vector_io.py | 6 +- .../core/routing_tables/benchmarks.py | 3 +- src/llama_stack/core/routing_tables/common.py | 3 +- .../core/routing_tables/datasets.py | 9 +- src/llama_stack/core/routing_tables/models.py | 15 +- .../core/routing_tables/scoring_functions.py | 9 +- .../core/routing_tables/shields.py | 3 +- .../core/routing_tables/toolgroups.py | 5 +- .../core/routing_tables/vector_stores.py | 10 +- src/llama_stack/core/server/auth_providers.py | 2 +- src/llama_stack/core/server/routes.py | 2 +- src/llama_stack/core/server/server.py | 2 +- src/llama_stack/core/stack.py | 44 +- src/llama_stack/core/telemetry/telemetry.py | 2 +- src/llama_stack/distributions/dell/dell.py | 3 +- .../meta-reference-gpu/meta_reference.py | 3 +- .../open-benchmark/open_benchmark.py | 3 +- .../distributions/starter/starter.py | 3 +- src/llama_stack/distributions/template.py | 2 +- .../inline/agents/meta_reference/agents.py | 9 +- .../responses/openai_responses.py | 14 +- .../meta_reference/responses/streaming.py | 9 +- .../meta_reference/responses/tool_executor.py | 5 +- .../agents/meta_reference/responses/types.py | 5 +- .../inline/agents/meta_reference/safety.py | 3 +- .../inline/batches/reference/__init__.py | 3 +- .../inline/batches/reference/batches.py | 10 +- .../inline/datasetio/localfs/datasetio.py | 3 +- .../inline/eval/meta_reference/eval.py | 8 +- .../providers/inline/files/localfs/files.py | 16 +- .../inline/inference/meta_reference/config.py | 2 +- .../inference/meta_reference/generators.py | 16 +- .../inference/meta_reference/inference.py | 38 +- .../sentence_transformers.py | 9 +- .../inline/post_training/common/validator.py | 3 +- .../huggingface/post_training.py | 11 +- .../recipes/finetune_single_device.py | 16 +- .../recipes/finetune_single_device_dpo.py | 14 +- .../inline/post_training/huggingface/utils.py | 3 +- .../post_training/torchtune/common/utils.py | 2 +- .../post_training/torchtune/post_training.py | 11 +- .../recipes/lora_finetuning_single_device.py | 22 +- .../safety/code_scanner/code_scanner.py | 9 +- .../inline/safety/llama_guard/llama_guard.py | 15 +- .../safety/prompt_guard/prompt_guard.py | 10 +- .../providers/inline/scoring/basic/scoring.py | 11 +- .../basic/scoring_fn/docvqa_scoring_fn.py | 3 +- .../basic/scoring_fn/equality_scoring_fn.py | 3 +- .../basic/scoring_fn/ifeval_scoring_fn.py | 3 +- .../regex_parser_math_response_scoring_fn.py | 3 +- .../scoring_fn/regex_parser_scoring_fn.py | 3 +- .../basic/scoring_fn/subset_of_scoring_fn.py | 3 +- .../inline/scoring/braintrust/braintrust.py | 20 +- .../inline/scoring/llm_as_judge/scoring.py | 11 +- .../scoring_fn/llm_as_judge_scoring_fn.py | 3 +- .../tool_runtime/rag/context_retriever.py | 8 +- .../inline/tool_runtime/rag/memory.py | 10 +- .../inline/vector_io/chroma/config.py | 2 +- .../inline/vector_io/faiss/config.py | 2 +- .../providers/inline/vector_io/faiss/faiss.py | 14 +- .../inline/vector_io/milvus/config.py | 2 +- .../inline/vector_io/qdrant/config.py | 2 +- .../inline/vector_io/sqlite_vec/sqlite_vec.py | 20 +- src/llama_stack/providers/registry/agents.py | 3 +- src/llama_stack/providers/registry/files.py | 3 +- .../providers/registry/tool_runtime.py | 3 +- .../datasetio/huggingface/huggingface.py | 3 +- .../remote/datasetio/nvidia/datasetio.py | 1 + .../providers/remote/eval/nvidia/eval.py | 4 +- .../providers/remote/files/openai/files.py | 12 +- .../providers/remote/files/s3/files.py | 13 +- .../remote/inference/anthropic/config.py | 2 +- .../remote/inference/azure/config.py | 2 +- .../remote/inference/bedrock/bedrock.py | 10 +- .../remote/inference/cerebras/cerebras.py | 3 +- .../remote/inference/cerebras/config.py | 2 +- .../remote/inference/databricks/config.py | 2 +- .../remote/inference/databricks/databricks.py | 2 +- .../remote/inference/fireworks/config.py | 2 +- .../remote/inference/gemini/config.py | 2 +- .../remote/inference/gemini/gemini.py | 3 +- .../providers/remote/inference/groq/config.py | 2 +- .../inference/llama_openai_compat/config.py | 2 +- .../inference/llama_openai_compat/llama.py | 7 +- .../remote/inference/nvidia/config.py | 2 +- .../remote/inference/nvidia/nvidia.py | 6 +- .../providers/remote/inference/oci/config.py | 2 +- .../providers/remote/inference/oci/oci.py | 10 +- .../remote/inference/ollama/ollama.py | 10 +- .../remote/inference/openai/config.py | 2 +- .../remote/inference/passthrough/config.py | 2 +- .../inference/passthrough/passthrough.py | 6 +- .../remote/inference/runpod/config.py | 2 +- .../remote/inference/runpod/runpod.py | 3 +- .../remote/inference/sambanova/config.py | 2 +- .../providers/remote/inference/tgi/config.py | 2 +- .../providers/remote/inference/tgi/tgi.py | 8 +- .../remote/inference/together/config.py | 2 +- .../remote/inference/together/together.py | 12 +- .../remote/inference/vertexai/config.py | 2 +- .../providers/remote/inference/vllm/config.py | 2 +- .../providers/remote/inference/vllm/vllm.py | 8 +- .../remote/inference/watsonx/config.py | 2 +- .../remote/inference/watsonx/watsonx.py | 15 +- .../post_training/nvidia/post_training.py | 10 +- .../remote/post_training/nvidia/utils.py | 2 +- .../remote/safety/bedrock/bedrock.py | 5 +- .../providers/remote/safety/bedrock/config.py | 3 +- .../providers/remote/safety/nvidia/config.py | 3 +- .../providers/remote/safety/nvidia/nvidia.py | 4 +- .../remote/safety/sambanova/config.py | 3 +- .../remote/safety/sambanova/sambanova.py | 6 +- .../tool_runtime/bing_search/bing_search.py | 4 +- .../tool_runtime/brave_search/brave_search.py | 6 +- .../model_context_protocol.py | 7 +- .../tavily_search/tavily_search.py | 4 +- .../wolfram_alpha/wolfram_alpha.py | 4 +- .../remote/vector_io/chroma/chroma.py | 16 +- .../remote/vector_io/chroma/config.py | 2 +- .../remote/vector_io/milvus/config.py | 2 +- .../remote/vector_io/milvus/milvus.py | 22 +- .../remote/vector_io/pgvector/config.py | 2 +- .../remote/vector_io/pgvector/pgvector.py | 22 +- .../remote/vector_io/qdrant/config.py | 2 +- .../remote/vector_io/qdrant/qdrant.py | 18 +- .../remote/vector_io/weaviate/config.py | 2 +- .../remote/vector_io/weaviate/weaviate.py | 22 +- .../utils/common/data_schema_validator.py | 3 +- .../providers/utils/files/form_data.py | 3 +- .../utils/inference/inference_store.py | 10 +- .../utils/inference/litellm_openai_mixin.py | 14 +- .../utils/inference/model_registry.py | 2 +- .../utils/inference/openai_compat.py | 24 +- .../providers/utils/inference/openai_mixin.py | 16 +- .../utils/inference/prompt_adapter.py | 36 +- .../providers/utils/kvstore/sqlite/config.py | 3 +- .../utils/memory/openai_vector_store_mixin.py | 20 +- .../providers/utils/memory/vector_store.py | 18 +- .../utils/responses/responses_store.py | 7 +- .../utils/scoring/base_scoring_fn.py | 3 +- .../providers/utils/sqlstore/api.py | 3 +- .../utils/sqlstore/sqlalchemy_sqlstore.py | 2 +- src/llama_stack/providers/utils/tools/mcp.py | 16 +- .../README.md | 2 +- .../llama_stack_api/__init__.py | 2 +- .../llama_stack_api/agents.py | 0 .../llama_stack_api/batches.py | 0 .../llama_stack_api/benchmarks.py | 0 .../llama_stack_api/common/__init__.py | 0 .../llama_stack_api/common/content_types.py | 0 .../llama_stack_api/common/errors.py | 0 .../llama_stack_api/common/job_types.py | 0 .../llama_stack_api/common/responses.py | 0 .../llama_stack_api/common/tracing.py | 0 .../llama_stack_api/common/training_types.py | 0 .../llama_stack_api/common/type_system.py | 0 .../llama_stack_api/conversations.py | 0 .../llama_stack_api/datasetio.py | 0 .../llama_stack_api/datasets.py | 0 .../llama_stack_api/datatypes.py | 0 .../llama_stack_api/eval.py | 0 .../llama_stack_api/files.py | 0 .../llama_stack_api/inference.py | 0 .../llama_stack_api/inspect.py | 0 .../llama_stack_api/models.py | 0 .../llama_stack_api/openai_responses.py | 0 .../llama_stack_api/post_training.py | 0 .../llama_stack_api/prompts.py | 0 .../llama_stack_api/providers.py | 0 .../llama_stack_api/py.typed | 0 .../pyproject.toml | 2 +- .../llama_stack_api/rag_tool.py | 0 .../llama_stack_api/resource.py | 0 .../llama_stack_api/safety.py | 0 .../llama_stack_api/schema_utils.py | 0 .../llama_stack_api/scoring.py | 0 .../llama_stack_api/scoring_functions.py | 0 .../llama_stack_api/shields.py | 0 .../llama_stack_api/strong_typing/__init__.py | 0 .../strong_typing/auxiliary.py | 0 .../llama_stack_api/strong_typing/classdef.py | 0 .../llama_stack_api/strong_typing/core.py | 0 .../strong_typing/deserializer.py | 0 .../strong_typing/docstring.py | 0 .../strong_typing/exception.py | 0 .../strong_typing/inspection.py | 0 .../llama_stack_api/strong_typing/mapping.py | 0 .../llama_stack_api/strong_typing/name.py | 0 .../llama_stack_api/strong_typing/py.typed | 0 .../llama_stack_api/strong_typing/schema.py | 0 .../strong_typing/serialization.py | 0 .../strong_typing/serializer.py | 0 .../llama_stack_api/strong_typing/slots.py | 0 .../strong_typing/topological.py | 0 .../llama_stack_api/tools.py | 0 src/llama_stack_api/uv.lock | 498 ++++++++++++++++++ .../llama_stack_api/vector_io.py | 0 .../llama_stack_api/vector_stores.py | 0 .../llama_stack_api/version.py | 0 tests/integration/batches/conftest.py | 1 + tests/integration/files/test_files.py | 2 +- .../inference/test_provider_data_routing.py | 6 +- .../post_training/test_post_training.py | 4 +- tests/integration/safety/test_llama_guard.py | 2 +- tests/integration/safety/test_safety.py | 1 + .../integration/safety/test_vision_safety.py | 1 + .../tool_runtime/test_registration.py | 2 +- .../vector_io/test_openai_vector_stores.py | 2 +- tests/integration/vector_io/test_vector_io.py | 1 + .../unit/conversations/test_conversations.py | 2 +- tests/unit/core/routers/test_safety_router.py | 3 +- tests/unit/core/routers/test_vector_io.py | 7 +- tests/unit/core/test_stack_validation.py | 2 +- .../routers/test_routing_tables.py | 16 +- .../unit/distribution/test_api_recordings.py | 16 +- tests/unit/distribution/test_distribution.py | 20 +- tests/unit/files/test_files.py | 2 +- .../unit/providers/batches/test_reference.py | 1 + .../batches/test_reference_idempotency.py | 1 + tests/unit/providers/files/test_s3_files.py | 1 + .../providers/files/test_s3_files_auth.py | 2 +- .../inference/test_bedrock_adapter.py | 2 +- .../providers/inference/test_remote_vllm.py | 10 +- .../responses/test_streaming.py | 2 +- tests/unit/providers/nvidia/test_datastore.py | 2 +- tests/unit/providers/nvidia/test_eval.py | 8 +- .../unit/providers/nvidia/test_parameters.py | 12 +- .../providers/nvidia/test_rerank_inference.py | 2 +- tests/unit/providers/nvidia/test_safety.py | 6 +- .../nvidia/test_supervised_fine_tuning.py | 18 +- tests/unit/providers/test_bedrock.py | 3 +- .../utils/inference/test_openai_mixin.py | 2 +- .../utils/inference/test_prompt_adapter.py | 3 +- .../utils/memory/test_vector_store.py | 2 +- .../providers/utils/test_model_registry.py | 2 +- tests/unit/providers/vector_io/conftest.py | 2 +- tests/unit/providers/vector_io/test_faiss.py | 2 +- .../providers/vector_io/test_sqlite_vec.py | 2 +- .../test_vector_io_openai_vector_stores.py | 7 +- .../providers/vector_io/test_vector_utils.py | 3 +- tests/unit/rag/test_rag_query.py | 2 +- tests/unit/rag/test_vector_store.py | 2 +- tests/unit/registry/test_registry.py | 5 +- tests/unit/registry/test_registry_acl.py | 3 +- tests/unit/server/test_access_control.py | 2 +- tests/unit/server/test_resolver.py | 2 +- tests/unit/server/test_sse.py | 2 +- tests/unit/tools/test_tools_json_schema.py | 2 +- .../utils/inference/test_inference_store.py | 8 +- .../utils/responses/test_responses_store.py | 2 +- uv.lock | 8 +- 275 files changed, 1187 insertions(+), 745 deletions(-) rename src/{llama-stack-api => llama_stack_api}/README.md (98%) rename src/{llama-stack-api => }/llama_stack_api/__init__.py (99%) rename src/{llama-stack-api => }/llama_stack_api/agents.py (100%) rename src/{llama-stack-api => }/llama_stack_api/batches.py (100%) rename src/{llama-stack-api => }/llama_stack_api/benchmarks.py (100%) rename src/{llama-stack-api => }/llama_stack_api/common/__init__.py (100%) rename src/{llama-stack-api => }/llama_stack_api/common/content_types.py (100%) rename src/{llama-stack-api => }/llama_stack_api/common/errors.py (100%) rename src/{llama-stack-api => }/llama_stack_api/common/job_types.py (100%) rename src/{llama-stack-api => }/llama_stack_api/common/responses.py (100%) rename src/{llama-stack-api => }/llama_stack_api/common/tracing.py (100%) rename src/{llama-stack-api => }/llama_stack_api/common/training_types.py (100%) rename src/{llama-stack-api => }/llama_stack_api/common/type_system.py (100%) rename src/{llama-stack-api => }/llama_stack_api/conversations.py (100%) rename src/{llama-stack-api => }/llama_stack_api/datasetio.py (100%) rename src/{llama-stack-api => }/llama_stack_api/datasets.py (100%) rename src/{llama-stack-api => }/llama_stack_api/datatypes.py (100%) rename src/{llama-stack-api => }/llama_stack_api/eval.py (100%) rename src/{llama-stack-api => }/llama_stack_api/files.py (100%) rename src/{llama-stack-api => }/llama_stack_api/inference.py (100%) rename src/{llama-stack-api => }/llama_stack_api/inspect.py (100%) rename src/{llama-stack-api => }/llama_stack_api/models.py (100%) rename src/{llama-stack-api => }/llama_stack_api/openai_responses.py (100%) rename src/{llama-stack-api => }/llama_stack_api/post_training.py (100%) rename src/{llama-stack-api => }/llama_stack_api/prompts.py (100%) rename src/{llama-stack-api => }/llama_stack_api/providers.py (100%) rename src/{llama-stack-api => }/llama_stack_api/py.typed (100%) rename src/{llama-stack-api => llama_stack_api}/pyproject.toml (99%) rename src/{llama-stack-api => }/llama_stack_api/rag_tool.py (100%) rename src/{llama-stack-api => }/llama_stack_api/resource.py (100%) rename src/{llama-stack-api => }/llama_stack_api/safety.py (100%) rename src/{llama-stack-api => }/llama_stack_api/schema_utils.py (100%) rename src/{llama-stack-api => }/llama_stack_api/scoring.py (100%) rename src/{llama-stack-api => }/llama_stack_api/scoring_functions.py (100%) rename src/{llama-stack-api => }/llama_stack_api/shields.py (100%) rename src/{llama-stack-api => }/llama_stack_api/strong_typing/__init__.py (100%) rename src/{llama-stack-api => }/llama_stack_api/strong_typing/auxiliary.py (100%) rename src/{llama-stack-api => }/llama_stack_api/strong_typing/classdef.py (100%) rename src/{llama-stack-api => }/llama_stack_api/strong_typing/core.py (100%) rename src/{llama-stack-api => }/llama_stack_api/strong_typing/deserializer.py (100%) rename src/{llama-stack-api => }/llama_stack_api/strong_typing/docstring.py (100%) rename src/{llama-stack-api => }/llama_stack_api/strong_typing/exception.py (100%) rename src/{llama-stack-api => }/llama_stack_api/strong_typing/inspection.py (100%) rename src/{llama-stack-api => }/llama_stack_api/strong_typing/mapping.py (100%) rename src/{llama-stack-api => }/llama_stack_api/strong_typing/name.py (100%) rename src/{llama-stack-api => }/llama_stack_api/strong_typing/py.typed (100%) rename src/{llama-stack-api => }/llama_stack_api/strong_typing/schema.py (100%) rename src/{llama-stack-api => }/llama_stack_api/strong_typing/serialization.py (100%) rename src/{llama-stack-api => }/llama_stack_api/strong_typing/serializer.py (100%) rename src/{llama-stack-api => }/llama_stack_api/strong_typing/slots.py (100%) rename src/{llama-stack-api => }/llama_stack_api/strong_typing/topological.py (100%) rename src/{llama-stack-api => }/llama_stack_api/tools.py (100%) create mode 100644 src/llama_stack_api/uv.lock rename src/{llama-stack-api => }/llama_stack_api/vector_io.py (100%) rename src/{llama-stack-api => }/llama_stack_api/vector_stores.py (100%) rename src/{llama-stack-api => }/llama_stack_api/version.py (100%) diff --git a/.github/workflows/python-build-test.yml b/.github/workflows/python-build-test.yml index b0f2c6e69..b58f4eb69 100644 --- a/.github/workflows/python-build-test.yml +++ b/.github/workflows/python-build-test.yml @@ -31,7 +31,7 @@ jobs: version: 0.7.6 - name: Build Llama Stack API package - working-directory: src/llama-stack-api + working-directory: src/llama_stack_api run: uv build - name: Build Llama Stack package @@ -39,7 +39,7 @@ jobs: - name: Install Llama Stack package (with api stubs from local build) run: | - uv pip install --find-links src/llama-stack-api/dist dist/*.whl + uv pip install --find-links src/llama_stack_api/dist dist/*.whl - name: Verify Llama Stack package run: | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 6f4dd6a0e..c60440173 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -42,7 +42,7 @@ repos: hooks: - id: ruff args: [ --fix ] - exclude: ^(src/llama_stack/strong_typing/.*|src/llama-stack-api/llama_stack_api/strong_typing/.*)$ + exclude: ^(src/llama_stack_api/strong_typing/.*)$ - id: ruff-format - repo: https://github.com/adamchainz/blacken-docs diff --git a/pyproject.toml b/pyproject.toml index d287b4be7..34728d6ea 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -181,7 +181,7 @@ install-wheel-from-presigned = "llama_stack.cli.scripts.run:install_wheel_from_p [tool.setuptools.packages.find] where = ["src"] -include = ["llama_stack", "llama_stack.*", "llama-stack-api", "llama-stack-api.*"] +include = ["llama_stack", "llama_stack.*", "llama_stack_api", "llama_stack_api.*"] [[tool.uv.index]] name = "pytorch-cpu" @@ -191,7 +191,7 @@ explicit = true [tool.uv.sources] torch = [{ index = "pytorch-cpu" }] torchvision = [{ index = "pytorch-cpu" }] -llama-stack-api = [{ path = "src/llama-stack-api", editable = true }] +llama-stack-api = [{ path = "src/llama_stack_api", editable = true }] [tool.ruff] line-length = 120 @@ -258,7 +258,7 @@ unfixable = [ ] # Using import * is acceptable (or at least tolerated) in an __init__.py of a package API [tool.mypy] -mypy_path = ["src", "src/llama-stack-api"] +mypy_path = ["src"] packages = ["llama_stack", "llama_stack_api"] plugins = ['pydantic.mypy'] disable_error_code = [] @@ -281,14 +281,12 @@ exclude = [ "^src/llama_stack/core/store/registry\\.py$", "^src/llama_stack/core/utils/exec\\.py$", "^src/llama_stack/core/utils/prompt_for_config\\.py$", - # Moved to llama-stack-api but still excluded "^src/llama_stack/models/llama/llama3/interface\\.py$", "^src/llama_stack/models/llama/llama3/tokenizer\\.py$", "^src/llama_stack/models/llama/llama3/tool_utils\\.py$", "^src/llama_stack/models/llama/llama3/generation\\.py$", "^src/llama_stack/models/llama/llama3/multimodal/model\\.py$", "^src/llama_stack/models/llama/llama4/", - "^src/llama-stack-api/llama_stack_api/core/telemetry/telemetry\\.py$", "^src/llama_stack/providers/inline/agents/meta_reference/", "^src/llama_stack/providers/inline/datasetio/localfs/", "^src/llama_stack/providers/inline/eval/meta_reference/eval\\.py$", @@ -342,9 +340,7 @@ exclude = [ "^src/llama_stack/providers/utils/telemetry/dataset_mixin\\.py$", "^src/llama_stack/providers/utils/telemetry/trace_protocol\\.py$", "^src/llama_stack/providers/utils/telemetry/tracing\\.py$", - "^src/llama-stack-api/llama_stack_api/core/telemetry/trace_protocol\\.py$", - "^src/llama-stack-api/llama_stack_api/core/telemetry/tracing\\.py$", - "^src/llama-stack-api/llama_stack_api/strong_typing/auxiliary\\.py$", + "^src/llama_stack_api/strong_typing/auxiliary\\.py$", "^src/llama_stack/distributions/template\\.py$", ] diff --git a/scripts/generate_prompt_format.py b/scripts/generate_prompt_format.py index 8099a3f0d..381bbc6a7 100755 --- a/scripts/generate_prompt_format.py +++ b/scripts/generate_prompt_format.py @@ -14,11 +14,11 @@ import os from pathlib import Path import fire -from llama_stack_api import ModelNotFoundError from llama_stack.models.llama.llama3.generation import Llama3 from llama_stack.models.llama.llama4.generation import Llama4 from llama_stack.models.llama.sku_list import resolve_model +from llama_stack_api import ModelNotFoundError THIS_DIR = Path(__file__).parent.resolve() diff --git a/src/llama_stack/cli/stack/_list_deps.py b/src/llama_stack/cli/stack/_list_deps.py index 50fe394fc..82bef1a4f 100644 --- a/src/llama_stack/cli/stack/_list_deps.py +++ b/src/llama_stack/cli/stack/_list_deps.py @@ -9,7 +9,6 @@ import sys from pathlib import Path import yaml -from llama_stack_api import Api from termcolor import cprint from llama_stack.cli.stack.utils import ImageType @@ -22,6 +21,7 @@ from llama_stack.core.datatypes import ( from llama_stack.core.distribution import get_provider_registry from llama_stack.core.stack import replace_env_vars from llama_stack.log import get_logger +from llama_stack_api import Api TEMPLATES_PATH = Path(__file__).parent.parent.parent / "templates" diff --git a/src/llama_stack/cli/stack/utils.py b/src/llama_stack/cli/stack/utils.py index 0a4e22b09..d49b142e0 100644 --- a/src/llama_stack/cli/stack/utils.py +++ b/src/llama_stack/cli/stack/utils.py @@ -11,7 +11,6 @@ from functools import lru_cache from pathlib import Path import yaml -from llama_stack_api import Api from termcolor import cprint from llama_stack.core.datatypes import ( @@ -33,6 +32,7 @@ from llama_stack.core.storage.datatypes import ( from llama_stack.core.utils.config_dirs import DISTRIBS_BASE_DIR, EXTERNAL_PROVIDERS_DIR from llama_stack.core.utils.dynamic import instantiate_class_type from llama_stack.core.utils.image_types import LlamaStackImageType +from llama_stack_api import Api TEMPLATES_PATH = Path(__file__).parent.parent.parent / "distributions" diff --git a/src/llama_stack/core/build.py b/src/llama_stack/core/build.py index 27ded7ede..630b2a47f 100644 --- a/src/llama_stack/core/build.py +++ b/src/llama_stack/core/build.py @@ -6,7 +6,6 @@ import sys -from llama_stack_api import Api from pydantic import BaseModel from termcolor import cprint @@ -14,6 +13,7 @@ from llama_stack.core.datatypes import BuildConfig from llama_stack.core.distribution import get_provider_registry from llama_stack.distributions.template import DistributionTemplate from llama_stack.log import get_logger +from llama_stack_api import Api log = get_logger(name=__name__, category="core") diff --git a/src/llama_stack/core/client.py b/src/llama_stack/core/client.py index 41acacdb5..ba935a35e 100644 --- a/src/llama_stack/core/client.py +++ b/src/llama_stack/core/client.py @@ -12,10 +12,11 @@ from enum import Enum from typing import Any, Union, get_args, get_origin import httpx -from llama_stack_api import RemoteProviderConfig from pydantic import BaseModel, parse_obj_as from termcolor import cprint +from llama_stack_api import RemoteProviderConfig + _CLIENT_CLASSES = {} diff --git a/src/llama_stack/core/configure.py b/src/llama_stack/core/configure.py index bdb3b9734..d738b8a61 100644 --- a/src/llama_stack/core/configure.py +++ b/src/llama_stack/core/configure.py @@ -6,8 +6,6 @@ import textwrap from typing import Any -from llama_stack_api import Api, ProviderSpec - from llama_stack.core.datatypes import ( LLAMA_STACK_RUN_CONFIG_VERSION, DistributionSpec, @@ -22,6 +20,7 @@ from llama_stack.core.stack import cast_image_name_to_string, replace_env_vars from llama_stack.core.utils.dynamic import instantiate_class_type from llama_stack.core.utils.prompt_for_config import prompt_for_config from llama_stack.log import get_logger +from llama_stack_api import Api, ProviderSpec logger = get_logger(name=__name__, category="core") diff --git a/src/llama_stack/core/conversations/conversations.py b/src/llama_stack/core/conversations/conversations.py index b94cd4fdd..4cf5a82ee 100644 --- a/src/llama_stack/core/conversations/conversations.py +++ b/src/llama_stack/core/conversations/conversations.py @@ -8,6 +8,13 @@ import secrets import time from typing import Any, Literal +from pydantic import BaseModel, TypeAdapter + +from llama_stack.core.datatypes import AccessRule, StackRunConfig +from llama_stack.log import get_logger +from llama_stack.providers.utils.sqlstore.api import ColumnDefinition, ColumnType +from llama_stack.providers.utils.sqlstore.authorized_sqlstore import AuthorizedSqlStore +from llama_stack.providers.utils.sqlstore.sqlstore import sqlstore_impl from llama_stack_api import ( Conversation, ConversationDeletedResource, @@ -18,13 +25,6 @@ from llama_stack_api import ( Conversations, Metadata, ) -from pydantic import BaseModel, TypeAdapter - -from llama_stack.core.datatypes import AccessRule, StackRunConfig -from llama_stack.log import get_logger -from llama_stack.providers.utils.sqlstore.api import ColumnDefinition, ColumnType -from llama_stack.providers.utils.sqlstore.authorized_sqlstore import AuthorizedSqlStore -from llama_stack.providers.utils.sqlstore.sqlstore import sqlstore_impl logger = get_logger(name=__name__, category="openai_conversations") diff --git a/src/llama_stack/core/datatypes.py b/src/llama_stack/core/datatypes.py index 4231363b6..1e29690ff 100644 --- a/src/llama_stack/core/datatypes.py +++ b/src/llama_stack/core/datatypes.py @@ -9,6 +9,15 @@ from pathlib import Path from typing import Annotated, Any, Literal, Self from urllib.parse import urlparse +from pydantic import BaseModel, Field, field_validator, model_validator + +from llama_stack.core.access_control.datatypes import AccessRule +from llama_stack.core.storage.datatypes import ( + KVStoreReference, + StorageBackendType, + StorageConfig, +) +from llama_stack.log import LoggingConfig from llama_stack_api import ( Api, Benchmark, @@ -35,15 +44,6 @@ from llama_stack_api import ( VectorStore, VectorStoreInput, ) -from pydantic import BaseModel, Field, field_validator, model_validator - -from llama_stack.core.access_control.datatypes import AccessRule -from llama_stack.core.storage.datatypes import ( - KVStoreReference, - StorageBackendType, - StorageConfig, -) -from llama_stack.log import LoggingConfig LLAMA_STACK_BUILD_CONFIG_VERSION = 2 LLAMA_STACK_RUN_CONFIG_VERSION = 2 diff --git a/src/llama_stack/core/distribution.py b/src/llama_stack/core/distribution.py index 162f9f2b0..658c75ef2 100644 --- a/src/llama_stack/core/distribution.py +++ b/src/llama_stack/core/distribution.py @@ -10,17 +10,17 @@ import os from typing import Any import yaml +from pydantic import BaseModel + +from llama_stack.core.datatypes import BuildConfig, DistributionSpec +from llama_stack.core.external import load_external_apis +from llama_stack.log import get_logger from llama_stack_api import ( Api, InlineProviderSpec, ProviderSpec, RemoteProviderSpec, ) -from pydantic import BaseModel - -from llama_stack.core.datatypes import BuildConfig, DistributionSpec -from llama_stack.core.external import load_external_apis -from llama_stack.log import get_logger logger = get_logger(name=__name__, category="core") diff --git a/src/llama_stack/core/external.py b/src/llama_stack/core/external.py index ce0c7eb72..d1a2d6e42 100644 --- a/src/llama_stack/core/external.py +++ b/src/llama_stack/core/external.py @@ -6,10 +6,10 @@ import yaml -from llama_stack_api import Api, ExternalApiSpec from llama_stack.core.datatypes import BuildConfig, StackRunConfig from llama_stack.log import get_logger +from llama_stack_api import Api, ExternalApiSpec logger = get_logger(name=__name__, category="core") diff --git a/src/llama_stack/core/inspect.py b/src/llama_stack/core/inspect.py index 53ddd3475..272c9d1bc 100644 --- a/src/llama_stack/core/inspect.py +++ b/src/llama_stack/core/inspect.py @@ -6,6 +6,11 @@ from importlib.metadata import version +from pydantic import BaseModel + +from llama_stack.core.datatypes import StackRunConfig +from llama_stack.core.external import load_external_apis +from llama_stack.core.server.routes import get_all_api_routes from llama_stack_api import ( HealthInfo, HealthStatus, @@ -14,11 +19,6 @@ from llama_stack_api import ( RouteInfo, VersionInfo, ) -from pydantic import BaseModel - -from llama_stack.core.datatypes import StackRunConfig -from llama_stack.core.external import load_external_apis -from llama_stack.core.server.routes import get_all_api_routes class DistributionInspectConfig(BaseModel): diff --git a/src/llama_stack/core/library_client.py b/src/llama_stack/core/library_client.py index 959284720..2a224d915 100644 --- a/src/llama_stack/core/library_client.py +++ b/src/llama_stack/core/library_client.py @@ -18,6 +18,7 @@ from typing import Any, TypeVar, Union, get_args, get_origin import httpx import yaml from fastapi import Response as FastAPIResponse + from llama_stack_api import is_unwrapped_body_param try: diff --git a/src/llama_stack/core/prompts/prompts.py b/src/llama_stack/core/prompts/prompts.py index d9532b978..9f532c1cd 100644 --- a/src/llama_stack/core/prompts/prompts.py +++ b/src/llama_stack/core/prompts/prompts.py @@ -7,11 +7,11 @@ import json from typing import Any -from llama_stack_api import ListPromptsResponse, Prompt, Prompts from pydantic import BaseModel from llama_stack.core.datatypes import StackRunConfig from llama_stack.providers.utils.kvstore import KVStore, kvstore_impl +from llama_stack_api import ListPromptsResponse, Prompt, Prompts class PromptServiceConfig(BaseModel): diff --git a/src/llama_stack/core/providers.py b/src/llama_stack/core/providers.py index 7337d9e35..e3fe3c7b3 100644 --- a/src/llama_stack/core/providers.py +++ b/src/llama_stack/core/providers.py @@ -7,10 +7,10 @@ import asyncio from typing import Any -from llama_stack_api import HealthResponse, HealthStatus, ListProvidersResponse, ProviderInfo, Providers from pydantic import BaseModel from llama_stack.log import get_logger +from llama_stack_api import HealthResponse, HealthStatus, ListProvidersResponse, ProviderInfo, Providers from .datatypes import StackRunConfig from .utils.config import redact_sensitive_fields diff --git a/src/llama_stack/core/resolver.py b/src/llama_stack/core/resolver.py index ca154fbc6..6bc32c2d0 100644 --- a/src/llama_stack/core/resolver.py +++ b/src/llama_stack/core/resolver.py @@ -8,6 +8,19 @@ import importlib.metadata import inspect from typing import Any +from llama_stack.core.client import get_client_impl +from llama_stack.core.datatypes import ( + AccessRule, + AutoRoutedProviderSpec, + Provider, + RoutingTableProviderSpec, + StackRunConfig, +) +from llama_stack.core.distribution import builtin_automatically_routed_apis +from llama_stack.core.external import load_external_apis +from llama_stack.core.store import DistributionRegistry +from llama_stack.core.utils.dynamic import instantiate_class_type +from llama_stack.log import get_logger from llama_stack_api import ( LLAMA_STACK_API_V1ALPHA, Agents, @@ -48,20 +61,6 @@ from llama_stack_api import ( Providers as ProvidersAPI, ) -from llama_stack.core.client import get_client_impl -from llama_stack.core.datatypes import ( - AccessRule, - AutoRoutedProviderSpec, - Provider, - RoutingTableProviderSpec, - StackRunConfig, -) -from llama_stack.core.distribution import builtin_automatically_routed_apis -from llama_stack.core.external import load_external_apis -from llama_stack.core.store import DistributionRegistry -from llama_stack.core.utils.dynamic import instantiate_class_type -from llama_stack.log import get_logger - logger = get_logger(name=__name__, category="core") diff --git a/src/llama_stack/core/routers/__init__.py b/src/llama_stack/core/routers/__init__.py index c2d051422..289755bcb 100644 --- a/src/llama_stack/core/routers/__init__.py +++ b/src/llama_stack/core/routers/__init__.py @@ -6,8 +6,6 @@ from typing import Any -from llama_stack_api import Api, RoutingTable - from llama_stack.core.datatypes import ( AccessRule, RoutedProtocol, @@ -15,6 +13,7 @@ from llama_stack.core.datatypes import ( from llama_stack.core.stack import StackRunConfig from llama_stack.core.store import DistributionRegistry from llama_stack.providers.utils.inference.inference_store import InferenceStore +from llama_stack_api import Api, RoutingTable async def get_routing_table_impl( diff --git a/src/llama_stack/core/routers/datasets.py b/src/llama_stack/core/routers/datasets.py index dcf247874..b6a5f3b96 100644 --- a/src/llama_stack/core/routers/datasets.py +++ b/src/llama_stack/core/routers/datasets.py @@ -6,9 +6,8 @@ from typing import Any -from llama_stack_api import DatasetIO, DatasetPurpose, DataSource, PaginatedResponse, RoutingTable - from llama_stack.log import get_logger +from llama_stack_api import DatasetIO, DatasetPurpose, DataSource, PaginatedResponse, RoutingTable logger = get_logger(name=__name__, category="core::routers") diff --git a/src/llama_stack/core/routers/eval_scoring.py b/src/llama_stack/core/routers/eval_scoring.py index cbbbf5cc5..4d7269180 100644 --- a/src/llama_stack/core/routers/eval_scoring.py +++ b/src/llama_stack/core/routers/eval_scoring.py @@ -6,6 +6,7 @@ from typing import Any +from llama_stack.log import get_logger from llama_stack_api import ( BenchmarkConfig, Eval, @@ -18,8 +19,6 @@ from llama_stack_api import ( ScoringFnParams, ) -from llama_stack.log import get_logger - logger = get_logger(name=__name__, category="core::routers") diff --git a/src/llama_stack/core/routers/inference.py b/src/llama_stack/core/routers/inference.py index 292a7c4bb..719624e86 100644 --- a/src/llama_stack/core/routers/inference.py +++ b/src/llama_stack/core/routers/inference.py @@ -11,6 +11,16 @@ from datetime import UTC, datetime from typing import Annotated, Any from fastapi import Body +from openai.types.chat import ChatCompletionToolChoiceOptionParam as OpenAIChatCompletionToolChoiceOptionParam +from openai.types.chat import ChatCompletionToolParam as OpenAIChatCompletionToolParam +from pydantic import TypeAdapter + +from llama_stack.core.telemetry.telemetry import MetricEvent +from llama_stack.core.telemetry.tracing import enqueue_event, get_current_span +from llama_stack.log import get_logger +from llama_stack.models.llama.llama3.chat_format import ChatFormat +from llama_stack.models.llama.llama3.tokenizer import Tokenizer +from llama_stack.providers.utils.inference.inference_store import InferenceStore from llama_stack_api import ( HealthResponse, HealthStatus, @@ -39,16 +49,6 @@ from llama_stack_api import ( RerankResponse, RoutingTable, ) -from openai.types.chat import ChatCompletionToolChoiceOptionParam as OpenAIChatCompletionToolChoiceOptionParam -from openai.types.chat import ChatCompletionToolParam as OpenAIChatCompletionToolParam -from pydantic import TypeAdapter - -from llama_stack.core.telemetry.telemetry import MetricEvent -from llama_stack.core.telemetry.tracing import enqueue_event, get_current_span -from llama_stack.log import get_logger -from llama_stack.models.llama.llama3.chat_format import ChatFormat -from llama_stack.models.llama.llama3.tokenizer import Tokenizer -from llama_stack.providers.utils.inference.inference_store import InferenceStore logger = get_logger(name=__name__, category="core::routers") diff --git a/src/llama_stack/core/routers/safety.py b/src/llama_stack/core/routers/safety.py index f85bbb767..2bc99f14f 100644 --- a/src/llama_stack/core/routers/safety.py +++ b/src/llama_stack/core/routers/safety.py @@ -6,10 +6,9 @@ from typing import Any -from llama_stack_api import ModerationObject, OpenAIMessageParam, RoutingTable, RunShieldResponse, Safety, Shield - from llama_stack.core.datatypes import SafetyConfig from llama_stack.log import get_logger +from llama_stack_api import ModerationObject, OpenAIMessageParam, RoutingTable, RunShieldResponse, Safety, Shield logger = get_logger(name=__name__, category="core::routers") diff --git a/src/llama_stack/core/routers/tool_runtime.py b/src/llama_stack/core/routers/tool_runtime.py index 984a8e2a7..eccc05732 100644 --- a/src/llama_stack/core/routers/tool_runtime.py +++ b/src/llama_stack/core/routers/tool_runtime.py @@ -6,14 +6,13 @@ from typing import Any +from llama_stack.log import get_logger from llama_stack_api import ( URL, ListToolDefsResponse, ToolRuntime, ) -from llama_stack.log import get_logger - from ..routing_tables.toolgroups import ToolGroupsRoutingTable logger = get_logger(name=__name__, category="core::routers") diff --git a/src/llama_stack/core/routers/vector_io.py b/src/llama_stack/core/routers/vector_io.py index 47412c07f..5256dda44 100644 --- a/src/llama_stack/core/routers/vector_io.py +++ b/src/llama_stack/core/routers/vector_io.py @@ -9,6 +9,9 @@ import uuid from typing import Annotated, Any from fastapi import Body + +from llama_stack.core.datatypes import VectorStoresConfig +from llama_stack.log import get_logger from llama_stack_api import ( Chunk, HealthResponse, @@ -38,9 +41,6 @@ from llama_stack_api import ( VectorStoreSearchResponsePage, ) -from llama_stack.core.datatypes import VectorStoresConfig -from llama_stack.log import get_logger - logger = get_logger(name=__name__, category="core::routers") diff --git a/src/llama_stack/core/routing_tables/benchmarks.py b/src/llama_stack/core/routing_tables/benchmarks.py index 66830bc41..9037ffe8b 100644 --- a/src/llama_stack/core/routing_tables/benchmarks.py +++ b/src/llama_stack/core/routing_tables/benchmarks.py @@ -6,12 +6,11 @@ from typing import Any -from llama_stack_api import Benchmark, Benchmarks, ListBenchmarksResponse - from llama_stack.core.datatypes import ( BenchmarkWithOwner, ) from llama_stack.log import get_logger +from llama_stack_api import Benchmark, Benchmarks, ListBenchmarksResponse from .common import CommonRoutingTableImpl diff --git a/src/llama_stack/core/routing_tables/common.py b/src/llama_stack/core/routing_tables/common.py index cfbafc9a8..a9e3ff95f 100644 --- a/src/llama_stack/core/routing_tables/common.py +++ b/src/llama_stack/core/routing_tables/common.py @@ -6,8 +6,6 @@ from typing import Any -from llama_stack_api import Api, Model, ModelNotFoundError, ResourceType, RoutingTable - from llama_stack.core.access_control.access_control import AccessDeniedError, is_action_allowed from llama_stack.core.access_control.datatypes import Action from llama_stack.core.datatypes import ( @@ -20,6 +18,7 @@ from llama_stack.core.datatypes import ( from llama_stack.core.request_headers import get_authenticated_user from llama_stack.core.store import DistributionRegistry from llama_stack.log import get_logger +from llama_stack_api import Api, Model, ModelNotFoundError, ResourceType, RoutingTable logger = get_logger(name=__name__, category="core::routing_tables") diff --git a/src/llama_stack/core/routing_tables/datasets.py b/src/llama_stack/core/routing_tables/datasets.py index c49c9769b..62fd07b13 100644 --- a/src/llama_stack/core/routing_tables/datasets.py +++ b/src/llama_stack/core/routing_tables/datasets.py @@ -7,6 +7,10 @@ import uuid from typing import Any +from llama_stack.core.datatypes import ( + DatasetWithOwner, +) +from llama_stack.log import get_logger from llama_stack_api import ( Dataset, DatasetNotFoundError, @@ -20,11 +24,6 @@ from llama_stack_api import ( URIDataSource, ) -from llama_stack.core.datatypes import ( - DatasetWithOwner, -) -from llama_stack.log import get_logger - from .common import CommonRoutingTableImpl logger = get_logger(name=__name__, category="core::routing_tables") diff --git a/src/llama_stack/core/routing_tables/models.py b/src/llama_stack/core/routing_tables/models.py index e1210a139..1facbb27b 100644 --- a/src/llama_stack/core/routing_tables/models.py +++ b/src/llama_stack/core/routing_tables/models.py @@ -7,6 +7,13 @@ import time from typing import Any +from llama_stack.core.datatypes import ( + ModelWithOwner, + RegistryEntrySource, +) +from llama_stack.core.request_headers import PROVIDER_DATA_VAR, NeedsRequestProviderData +from llama_stack.core.utils.dynamic import instantiate_class_type +from llama_stack.log import get_logger from llama_stack_api import ( ListModelsResponse, Model, @@ -17,14 +24,6 @@ from llama_stack_api import ( OpenAIModel, ) -from llama_stack.core.datatypes import ( - ModelWithOwner, - RegistryEntrySource, -) -from llama_stack.core.request_headers import PROVIDER_DATA_VAR, NeedsRequestProviderData -from llama_stack.core.utils.dynamic import instantiate_class_type -from llama_stack.log import get_logger - from .common import CommonRoutingTableImpl, lookup_model logger = get_logger(name=__name__, category="core::routing_tables") diff --git a/src/llama_stack/core/routing_tables/scoring_functions.py b/src/llama_stack/core/routing_tables/scoring_functions.py index 66165ac2f..65ed26b85 100644 --- a/src/llama_stack/core/routing_tables/scoring_functions.py +++ b/src/llama_stack/core/routing_tables/scoring_functions.py @@ -4,6 +4,10 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. +from llama_stack.core.datatypes import ( + ScoringFnWithOwner, +) +from llama_stack.log import get_logger from llama_stack_api import ( ListScoringFunctionsResponse, ParamType, @@ -13,11 +17,6 @@ from llama_stack_api import ( ScoringFunctions, ) -from llama_stack.core.datatypes import ( - ScoringFnWithOwner, -) -from llama_stack.log import get_logger - from .common import CommonRoutingTableImpl logger = get_logger(name=__name__, category="core::routing_tables") diff --git a/src/llama_stack/core/routing_tables/shields.py b/src/llama_stack/core/routing_tables/shields.py index 0f981c49d..97b2efb96 100644 --- a/src/llama_stack/core/routing_tables/shields.py +++ b/src/llama_stack/core/routing_tables/shields.py @@ -6,12 +6,11 @@ from typing import Any -from llama_stack_api import ListShieldsResponse, ResourceType, Shield, Shields - from llama_stack.core.datatypes import ( ShieldWithOwner, ) from llama_stack.log import get_logger +from llama_stack_api import ListShieldsResponse, ResourceType, Shield, Shields from .common import CommonRoutingTableImpl diff --git a/src/llama_stack/core/routing_tables/toolgroups.py b/src/llama_stack/core/routing_tables/toolgroups.py index a552cb96e..7e2068608 100644 --- a/src/llama_stack/core/routing_tables/toolgroups.py +++ b/src/llama_stack/core/routing_tables/toolgroups.py @@ -6,6 +6,8 @@ from typing import Any +from llama_stack.core.datatypes import AuthenticationRequiredError, ToolGroupWithOwner +from llama_stack.log import get_logger from llama_stack_api import ( URL, ListToolDefsResponse, @@ -16,9 +18,6 @@ from llama_stack_api import ( ToolGroups, ) -from llama_stack.core.datatypes import AuthenticationRequiredError, ToolGroupWithOwner -from llama_stack.log import get_logger - from .common import CommonRoutingTableImpl logger = get_logger(name=__name__, category="core::routing_tables") diff --git a/src/llama_stack/core/routing_tables/vector_stores.py b/src/llama_stack/core/routing_tables/vector_stores.py index f95463b3c..93c119542 100644 --- a/src/llama_stack/core/routing_tables/vector_stores.py +++ b/src/llama_stack/core/routing_tables/vector_stores.py @@ -6,6 +6,11 @@ from typing import Any +from llama_stack.core.datatypes import ( + VectorStoreWithOwner, +) +from llama_stack.log import get_logger + # Removed VectorStores import to avoid exposing public API from llama_stack_api import ( ModelNotFoundError, @@ -23,11 +28,6 @@ from llama_stack_api import ( VectorStoreSearchResponsePage, ) -from llama_stack.core.datatypes import ( - VectorStoreWithOwner, -) -from llama_stack.log import get_logger - from .common import CommonRoutingTableImpl, lookup_model logger = get_logger(name=__name__, category="core::routing_tables") diff --git a/src/llama_stack/core/server/auth_providers.py b/src/llama_stack/core/server/auth_providers.py index a7f5d7916..66942dd39 100644 --- a/src/llama_stack/core/server/auth_providers.py +++ b/src/llama_stack/core/server/auth_providers.py @@ -11,7 +11,6 @@ from urllib.parse import parse_qs, urljoin, urlparse import httpx import jwt -from llama_stack_api import TokenValidationError from pydantic import BaseModel, Field from llama_stack.core.datatypes import ( @@ -23,6 +22,7 @@ from llama_stack.core.datatypes import ( User, ) from llama_stack.log import get_logger +from llama_stack_api import TokenValidationError logger = get_logger(name=__name__, category="core::auth") diff --git a/src/llama_stack/core/server/routes.py b/src/llama_stack/core/server/routes.py index e7a84937d..af5002565 100644 --- a/src/llama_stack/core/server/routes.py +++ b/src/llama_stack/core/server/routes.py @@ -10,10 +10,10 @@ from collections.abc import Callable from typing import Any from aiohttp import hdrs -from llama_stack_api import Api, ExternalApiSpec, WebMethod from starlette.routing import Route from llama_stack.core.resolver import api_protocol_map +from llama_stack_api import Api, ExternalApiSpec, WebMethod EndpointFunc = Callable[..., Any] PathParams = dict[str, str] diff --git a/src/llama_stack/core/server/server.py b/src/llama_stack/core/server/server.py index 8116348ec..0d3513980 100644 --- a/src/llama_stack/core/server/server.py +++ b/src/llama_stack/core/server/server.py @@ -28,7 +28,6 @@ from fastapi import Path as FastapiPath from fastapi.exceptions import RequestValidationError from fastapi.middleware.cors import CORSMiddleware from fastapi.responses import JSONResponse, StreamingResponse -from llama_stack_api import Api, ConflictError, PaginatedResponse, ResourceNotFoundError from openai import BadRequestError from pydantic import BaseModel, ValidationError @@ -57,6 +56,7 @@ from llama_stack.core.utils.config import redact_sensitive_fields from llama_stack.core.utils.config_resolution import Mode, resolve_config_or_distro from llama_stack.core.utils.context import preserve_contexts_async_generator from llama_stack.log import LoggingConfig, get_logger, setup_logging +from llama_stack_api import Api, ConflictError, PaginatedResponse, ResourceNotFoundError from .auth import AuthenticationMiddleware from .quota import QuotaMiddleware diff --git a/src/llama_stack/core/stack.py b/src/llama_stack/core/stack.py index 674c35f31..00d990cb1 100644 --- a/src/llama_stack/core/stack.py +++ b/src/llama_stack/core/stack.py @@ -12,6 +12,28 @@ import tempfile from typing import Any import yaml + +from llama_stack.core.conversations.conversations import ConversationServiceConfig, ConversationServiceImpl +from llama_stack.core.datatypes import Provider, SafetyConfig, StackRunConfig, VectorStoresConfig +from llama_stack.core.distribution import get_provider_registry +from llama_stack.core.inspect import DistributionInspectConfig, DistributionInspectImpl +from llama_stack.core.prompts.prompts import PromptServiceConfig, PromptServiceImpl +from llama_stack.core.providers import ProviderImpl, ProviderImplConfig +from llama_stack.core.resolver import ProviderRegistry, resolve_impls +from llama_stack.core.routing_tables.common import CommonRoutingTableImpl +from llama_stack.core.storage.datatypes import ( + InferenceStoreReference, + KVStoreReference, + ServerStoresConfig, + SqliteKVStoreConfig, + SqliteSqlStoreConfig, + SqlStoreReference, + StorageBackendConfig, + StorageConfig, +) +from llama_stack.core.store.registry import create_dist_registry +from llama_stack.core.utils.dynamic import instantiate_class_type +from llama_stack.log import get_logger from llama_stack_api import ( Agents, Api, @@ -37,28 +59,6 @@ from llama_stack_api import ( VectorIO, ) -from llama_stack.core.conversations.conversations import ConversationServiceConfig, ConversationServiceImpl -from llama_stack.core.datatypes import Provider, SafetyConfig, StackRunConfig, VectorStoresConfig -from llama_stack.core.distribution import get_provider_registry -from llama_stack.core.inspect import DistributionInspectConfig, DistributionInspectImpl -from llama_stack.core.prompts.prompts import PromptServiceConfig, PromptServiceImpl -from llama_stack.core.providers import ProviderImpl, ProviderImplConfig -from llama_stack.core.resolver import ProviderRegistry, resolve_impls -from llama_stack.core.routing_tables.common import CommonRoutingTableImpl -from llama_stack.core.storage.datatypes import ( - InferenceStoreReference, - KVStoreReference, - ServerStoresConfig, - SqliteKVStoreConfig, - SqliteSqlStoreConfig, - SqlStoreReference, - StorageBackendConfig, - StorageConfig, -) -from llama_stack.core.store.registry import create_dist_registry -from llama_stack.core.utils.dynamic import instantiate_class_type -from llama_stack.log import get_logger - logger = get_logger(name=__name__, category="core") diff --git a/src/llama_stack/core/telemetry/telemetry.py b/src/llama_stack/core/telemetry/telemetry.py index 1a56277ea..5268fa641 100644 --- a/src/llama_stack/core/telemetry/telemetry.py +++ b/src/llama_stack/core/telemetry/telemetry.py @@ -16,7 +16,6 @@ from typing import ( cast, ) -from llama_stack_api import json_schema_type, register_schema from opentelemetry import metrics, trace from opentelemetry.exporter.otlp.proto.http.metric_exporter import OTLPMetricExporter from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter @@ -29,6 +28,7 @@ from pydantic import BaseModel, Field from llama_stack.log import get_logger from llama_stack.models.llama.datatypes import Primitive +from llama_stack_api import json_schema_type, register_schema ROOT_SPAN_MARKERS = ["__root__", "__root_span__"] diff --git a/src/llama_stack/distributions/dell/dell.py b/src/llama_stack/distributions/dell/dell.py index fd76e3ccb..52a07b7f1 100644 --- a/src/llama_stack/distributions/dell/dell.py +++ b/src/llama_stack/distributions/dell/dell.py @@ -4,8 +4,6 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. -from llama_stack_api import ModelType - from llama_stack.core.datatypes import ( BuildProvider, ModelInput, @@ -18,6 +16,7 @@ from llama_stack.providers.inline.inference.sentence_transformers import ( SentenceTransformersInferenceConfig, ) from llama_stack.providers.remote.vector_io.chroma import ChromaVectorIOConfig +from llama_stack_api import ModelType def get_distribution_template() -> DistributionTemplate: diff --git a/src/llama_stack/distributions/meta-reference-gpu/meta_reference.py b/src/llama_stack/distributions/meta-reference-gpu/meta_reference.py index 67af0e92a..a515794d5 100644 --- a/src/llama_stack/distributions/meta-reference-gpu/meta_reference.py +++ b/src/llama_stack/distributions/meta-reference-gpu/meta_reference.py @@ -6,8 +6,6 @@ from pathlib import Path -from llama_stack_api import ModelType - from llama_stack.core.datatypes import ( BuildProvider, ModelInput, @@ -23,6 +21,7 @@ from llama_stack.providers.inline.inference.sentence_transformers import ( SentenceTransformersInferenceConfig, ) from llama_stack.providers.inline.vector_io.faiss.config import FaissVectorIOConfig +from llama_stack_api import ModelType def get_distribution_template() -> DistributionTemplate: diff --git a/src/llama_stack/distributions/open-benchmark/open_benchmark.py b/src/llama_stack/distributions/open-benchmark/open_benchmark.py index 59deca6d0..1f4dbf2c2 100644 --- a/src/llama_stack/distributions/open-benchmark/open_benchmark.py +++ b/src/llama_stack/distributions/open-benchmark/open_benchmark.py @@ -5,8 +5,6 @@ # the root directory of this source tree. -from llama_stack_api import DatasetPurpose, ModelType, URIDataSource - from llama_stack.core.datatypes import ( BenchmarkInput, BuildProvider, @@ -34,6 +32,7 @@ from llama_stack.providers.remote.vector_io.pgvector.config import ( PGVectorVectorIOConfig, ) from llama_stack.providers.utils.inference.model_registry import ProviderModelEntry +from llama_stack_api import DatasetPurpose, ModelType, URIDataSource def get_inference_providers() -> tuple[list[Provider], dict[str, list[ProviderModelEntry]]]: diff --git a/src/llama_stack/distributions/starter/starter.py b/src/llama_stack/distributions/starter/starter.py index 1a8126290..4c21a8c99 100644 --- a/src/llama_stack/distributions/starter/starter.py +++ b/src/llama_stack/distributions/starter/starter.py @@ -7,8 +7,6 @@ from typing import Any -from llama_stack_api import RemoteProviderSpec - from llama_stack.core.datatypes import ( BuildProvider, Provider, @@ -39,6 +37,7 @@ from llama_stack.providers.remote.vector_io.qdrant.config import QdrantVectorIOC from llama_stack.providers.remote.vector_io.weaviate.config import WeaviateVectorIOConfig from llama_stack.providers.utils.kvstore.config import PostgresKVStoreConfig from llama_stack.providers.utils.sqlstore.sqlstore import PostgresSqlStoreConfig +from llama_stack_api import RemoteProviderSpec def _get_config_for_provider(provider_spec: ProviderSpec) -> dict[str, Any]: diff --git a/src/llama_stack/distributions/template.py b/src/llama_stack/distributions/template.py index faf5fb085..5755a26de 100644 --- a/src/llama_stack/distributions/template.py +++ b/src/llama_stack/distributions/template.py @@ -10,7 +10,6 @@ from typing import Any, Literal import jinja2 import rich import yaml -from llama_stack_api import DatasetPurpose, ModelType from pydantic import BaseModel, Field from llama_stack.core.datatypes import ( @@ -43,6 +42,7 @@ from llama_stack.providers.utils.kvstore.config import SqliteKVStoreConfig from llama_stack.providers.utils.kvstore.config import get_pip_packages as get_kv_pip_packages from llama_stack.providers.utils.sqlstore.sqlstore import SqliteSqlStoreConfig from llama_stack.providers.utils.sqlstore.sqlstore import get_pip_packages as get_sql_pip_packages +from llama_stack_api import DatasetPurpose, ModelType def filter_empty_values(obj: Any) -> Any: diff --git a/src/llama_stack/providers/inline/agents/meta_reference/agents.py b/src/llama_stack/providers/inline/agents/meta_reference/agents.py index 025fcc676..347f6fdb1 100644 --- a/src/llama_stack/providers/inline/agents/meta_reference/agents.py +++ b/src/llama_stack/providers/inline/agents/meta_reference/agents.py @@ -5,6 +5,10 @@ # the root directory of this source tree. +from llama_stack.core.datatypes import AccessRule +from llama_stack.log import get_logger +from llama_stack.providers.utils.kvstore import InmemoryKVStoreImpl, kvstore_impl +from llama_stack.providers.utils.responses.responses_store import ResponsesStore from llama_stack_api import ( Agents, Conversations, @@ -25,11 +29,6 @@ from llama_stack_api import ( VectorIO, ) -from llama_stack.core.datatypes import AccessRule -from llama_stack.log import get_logger -from llama_stack.providers.utils.kvstore import InmemoryKVStoreImpl, kvstore_impl -from llama_stack.providers.utils.responses.responses_store import ResponsesStore - from .config import MetaReferenceAgentsImplConfig from .responses.openai_responses import OpenAIResponsesImpl diff --git a/src/llama_stack/providers/inline/agents/meta_reference/responses/openai_responses.py b/src/llama_stack/providers/inline/agents/meta_reference/responses/openai_responses.py index 347eeef78..3f88b1562 100644 --- a/src/llama_stack/providers/inline/agents/meta_reference/responses/openai_responses.py +++ b/src/llama_stack/providers/inline/agents/meta_reference/responses/openai_responses.py @@ -8,6 +8,13 @@ import time import uuid from collections.abc import AsyncIterator +from pydantic import BaseModel, TypeAdapter + +from llama_stack.log import get_logger +from llama_stack.providers.utils.responses.responses_store import ( + ResponsesStore, + _OpenAIResponseObjectWithInputAndMessages, +) from llama_stack_api import ( ConversationItem, Conversations, @@ -34,13 +41,6 @@ from llama_stack_api import ( ToolRuntime, VectorIO, ) -from pydantic import BaseModel, TypeAdapter - -from llama_stack.log import get_logger -from llama_stack.providers.utils.responses.responses_store import ( - ResponsesStore, - _OpenAIResponseObjectWithInputAndMessages, -) from .streaming import StreamingResponseOrchestrator from .tool_executor import ToolExecutor diff --git a/src/llama_stack/providers/inline/agents/meta_reference/responses/streaming.py b/src/llama_stack/providers/inline/agents/meta_reference/responses/streaming.py index 6a791e92d..ea4486b62 100644 --- a/src/llama_stack/providers/inline/agents/meta_reference/responses/streaming.py +++ b/src/llama_stack/providers/inline/agents/meta_reference/responses/streaming.py @@ -8,6 +8,9 @@ import uuid from collections.abc import AsyncIterator from typing import Any +from llama_stack.core.telemetry import tracing +from llama_stack.log import get_logger +from llama_stack.providers.utils.inference.prompt_adapter import interleaved_content_as_str from llama_stack_api import ( AllowedToolsFilter, ApprovalFilter, @@ -65,10 +68,6 @@ from llama_stack_api import ( WebSearchToolTypes, ) -from llama_stack.core.telemetry import tracing -from llama_stack.log import get_logger -from llama_stack.providers.utils.inference.prompt_adapter import interleaved_content_as_str - from .types import ChatCompletionContext, ChatCompletionResult from .utils import ( convert_chat_choice_to_response_message, @@ -1022,11 +1021,11 @@ class StreamingResponseOrchestrator: self, tools: list[OpenAIResponseInputTool], output_messages: list[OpenAIResponseOutput] ) -> AsyncIterator[OpenAIResponseObjectStream]: """Process all tools and emit appropriate streaming events.""" - from llama_stack_api import ToolDef from openai.types.chat import ChatCompletionToolParam from llama_stack.models.llama.datatypes import ToolDefinition from llama_stack.providers.utils.inference.openai_compat import convert_tooldef_to_openai_tool + from llama_stack_api import ToolDef def make_openai_tool(tool_name: str, tool: ToolDef) -> ChatCompletionToolParam: tool_def = ToolDefinition( diff --git a/src/llama_stack/providers/inline/agents/meta_reference/responses/tool_executor.py b/src/llama_stack/providers/inline/agents/meta_reference/responses/tool_executor.py index 38fb2a94f..616ec2477 100644 --- a/src/llama_stack/providers/inline/agents/meta_reference/responses/tool_executor.py +++ b/src/llama_stack/providers/inline/agents/meta_reference/responses/tool_executor.py @@ -9,6 +9,8 @@ import json from collections.abc import AsyncIterator from typing import Any +from llama_stack.core.telemetry import tracing +from llama_stack.log import get_logger from llama_stack_api import ( ImageContentItem, OpenAIChatCompletionContentPartImageParam, @@ -37,9 +39,6 @@ from llama_stack_api import ( VectorIO, ) -from llama_stack.core.telemetry import tracing -from llama_stack.log import get_logger - from .types import ChatCompletionContext, ToolExecutionResult logger = get_logger(name=__name__, category="agents::meta_reference") diff --git a/src/llama_stack/providers/inline/agents/meta_reference/responses/types.py b/src/llama_stack/providers/inline/agents/meta_reference/responses/types.py index 35ad03378..f6efcee22 100644 --- a/src/llama_stack/providers/inline/agents/meta_reference/responses/types.py +++ b/src/llama_stack/providers/inline/agents/meta_reference/responses/types.py @@ -7,6 +7,9 @@ from dataclasses import dataclass from typing import cast +from openai.types.chat import ChatCompletionToolParam +from pydantic import BaseModel + from llama_stack_api import ( OpenAIChatCompletionToolCall, OpenAIMessageParam, @@ -26,8 +29,6 @@ from llama_stack_api import ( OpenAIResponseTool, OpenAIResponseToolMCP, ) -from openai.types.chat import ChatCompletionToolParam -from pydantic import BaseModel class ToolExecutionResult(BaseModel): diff --git a/src/llama_stack/providers/inline/agents/meta_reference/safety.py b/src/llama_stack/providers/inline/agents/meta_reference/safety.py index dd90ac298..bfb557a99 100644 --- a/src/llama_stack/providers/inline/agents/meta_reference/safety.py +++ b/src/llama_stack/providers/inline/agents/meta_reference/safety.py @@ -6,10 +6,9 @@ import asyncio -from llama_stack_api import OpenAIMessageParam, Safety, SafetyViolation, ViolationLevel - from llama_stack.core.telemetry import tracing from llama_stack.log import get_logger +from llama_stack_api import OpenAIMessageParam, Safety, SafetyViolation, ViolationLevel log = get_logger(name=__name__, category="agents::meta_reference") diff --git a/src/llama_stack/providers/inline/batches/reference/__init__.py b/src/llama_stack/providers/inline/batches/reference/__init__.py index 27d0f4213..11c4b06a9 100644 --- a/src/llama_stack/providers/inline/batches/reference/__init__.py +++ b/src/llama_stack/providers/inline/batches/reference/__init__.py @@ -6,10 +6,9 @@ from typing import Any -from llama_stack_api import Files, Inference, Models - from llama_stack.core.datatypes import AccessRule, Api from llama_stack.providers.utils.kvstore import kvstore_impl +from llama_stack_api import Files, Inference, Models from .batches import ReferenceBatchesImpl from .config import ReferenceBatchesImplConfig diff --git a/src/llama_stack/providers/inline/batches/reference/batches.py b/src/llama_stack/providers/inline/batches/reference/batches.py index f0f8da96c..73727799d 100644 --- a/src/llama_stack/providers/inline/batches/reference/batches.py +++ b/src/llama_stack/providers/inline/batches/reference/batches.py @@ -13,6 +13,11 @@ import uuid from io import BytesIO from typing import Any, Literal +from openai.types.batch import BatchError, Errors +from pydantic import BaseModel + +from llama_stack.log import get_logger +from llama_stack.providers.utils.kvstore import KVStore from llama_stack_api import ( Batches, BatchObject, @@ -33,11 +38,6 @@ from llama_stack_api import ( OpenAIUserMessageParam, ResourceNotFoundError, ) -from openai.types.batch import BatchError, Errors -from pydantic import BaseModel - -from llama_stack.log import get_logger -from llama_stack.providers.utils.kvstore import KVStore from .config import ReferenceBatchesImplConfig diff --git a/src/llama_stack/providers/inline/datasetio/localfs/datasetio.py b/src/llama_stack/providers/inline/datasetio/localfs/datasetio.py index 1fcfbbef4..6ab1a540f 100644 --- a/src/llama_stack/providers/inline/datasetio/localfs/datasetio.py +++ b/src/llama_stack/providers/inline/datasetio/localfs/datasetio.py @@ -5,11 +5,10 @@ # the root directory of this source tree. from typing import Any -from llama_stack_api import Dataset, DatasetIO, DatasetsProtocolPrivate, PaginatedResponse - from llama_stack.providers.utils.datasetio.url_utils import get_dataframe_from_uri from llama_stack.providers.utils.kvstore import kvstore_impl from llama_stack.providers.utils.pagination import paginate_records +from llama_stack_api import Dataset, DatasetIO, DatasetsProtocolPrivate, PaginatedResponse from .config import LocalFSDatasetIOConfig diff --git a/src/llama_stack/providers/inline/eval/meta_reference/eval.py b/src/llama_stack/providers/inline/eval/meta_reference/eval.py index e6020e8a3..d43e569e2 100644 --- a/src/llama_stack/providers/inline/eval/meta_reference/eval.py +++ b/src/llama_stack/providers/inline/eval/meta_reference/eval.py @@ -6,6 +6,10 @@ import json from typing import Any +from tqdm import tqdm + +from llama_stack.providers.utils.common.data_schema_validator import ColumnName +from llama_stack.providers.utils.kvstore import kvstore_impl from llama_stack_api import ( Agents, Benchmark, @@ -24,10 +28,6 @@ from llama_stack_api import ( OpenAIUserMessageParam, Scoring, ) -from tqdm import tqdm - -from llama_stack.providers.utils.common.data_schema_validator import ColumnName -from llama_stack.providers.utils.kvstore import kvstore_impl from .config import MetaReferenceEvalConfig diff --git a/src/llama_stack/providers/inline/files/localfs/files.py b/src/llama_stack/providers/inline/files/localfs/files.py index 5e8c887f1..5fb35a378 100644 --- a/src/llama_stack/providers/inline/files/localfs/files.py +++ b/src/llama_stack/providers/inline/files/localfs/files.py @@ -10,6 +10,14 @@ from pathlib import Path from typing import Annotated from fastapi import Depends, File, Form, Response, UploadFile + +from llama_stack.core.datatypes import AccessRule +from llama_stack.core.id_generation import generate_object_id +from llama_stack.log import get_logger +from llama_stack.providers.utils.files.form_data import parse_expires_after +from llama_stack.providers.utils.sqlstore.api import ColumnDefinition, ColumnType +from llama_stack.providers.utils.sqlstore.authorized_sqlstore import AuthorizedSqlStore +from llama_stack.providers.utils.sqlstore.sqlstore import sqlstore_impl from llama_stack_api import ( ExpiresAfter, Files, @@ -21,14 +29,6 @@ from llama_stack_api import ( ResourceNotFoundError, ) -from llama_stack.core.datatypes import AccessRule -from llama_stack.core.id_generation import generate_object_id -from llama_stack.log import get_logger -from llama_stack.providers.utils.files.form_data import parse_expires_after -from llama_stack.providers.utils.sqlstore.api import ColumnDefinition, ColumnType -from llama_stack.providers.utils.sqlstore.authorized_sqlstore import AuthorizedSqlStore -from llama_stack.providers.utils.sqlstore.sqlstore import sqlstore_impl - from .config import LocalfsFilesImplConfig logger = get_logger(name=__name__, category="files") diff --git a/src/llama_stack/providers/inline/inference/meta_reference/config.py b/src/llama_stack/providers/inline/inference/meta_reference/config.py index 802e79f15..ec6e8bfe8 100644 --- a/src/llama_stack/providers/inline/inference/meta_reference/config.py +++ b/src/llama_stack/providers/inline/inference/meta_reference/config.py @@ -6,10 +6,10 @@ from typing import Any -from llama_stack_api import QuantizationConfig from pydantic import BaseModel, field_validator from llama_stack.providers.utils.inference import supported_inference_models +from llama_stack_api import QuantizationConfig class MetaReferenceInferenceConfig(BaseModel): diff --git a/src/llama_stack/providers/inline/inference/meta_reference/generators.py b/src/llama_stack/providers/inline/inference/meta_reference/generators.py index 2155a1ae8..6781d0af9 100644 --- a/src/llama_stack/providers/inline/inference/meta_reference/generators.py +++ b/src/llama_stack/providers/inline/inference/meta_reference/generators.py @@ -8,6 +8,14 @@ import math from typing import Optional import torch +from lmformatenforcer import JsonSchemaParser, TokenEnforcer, TokenEnforcerTokenizerData + +from llama_stack.models.llama.datatypes import QuantizationMode, ToolPromptFormat +from llama_stack.models.llama.llama3.generation import Llama3 +from llama_stack.models.llama.llama3.tokenizer import Tokenizer as Llama3Tokenizer +from llama_stack.models.llama.llama4.generation import Llama4 +from llama_stack.models.llama.llama4.tokenizer import Tokenizer as Llama4Tokenizer +from llama_stack.models.llama.sku_types import Model, ModelFamily from llama_stack_api import ( GreedySamplingStrategy, JsonSchemaResponseFormat, @@ -18,14 +26,6 @@ from llama_stack_api import ( SamplingParams, TopPSamplingStrategy, ) -from lmformatenforcer import JsonSchemaParser, TokenEnforcer, TokenEnforcerTokenizerData - -from llama_stack.models.llama.datatypes import QuantizationMode, ToolPromptFormat -from llama_stack.models.llama.llama3.generation import Llama3 -from llama_stack.models.llama.llama3.tokenizer import Tokenizer as Llama3Tokenizer -from llama_stack.models.llama.llama4.generation import Llama4 -from llama_stack.models.llama.llama4.tokenizer import Tokenizer as Llama4Tokenizer -from llama_stack.models.llama.sku_types import Model, ModelFamily from .common import model_checkpoint_dir from .config import MetaReferenceInferenceConfig diff --git a/src/llama_stack/providers/inline/inference/meta_reference/inference.py b/src/llama_stack/providers/inline/inference/meta_reference/inference.py index 753185fe7..42d1299ab 100644 --- a/src/llama_stack/providers/inline/inference/meta_reference/inference.py +++ b/src/llama_stack/providers/inline/inference/meta_reference/inference.py @@ -9,23 +9,6 @@ import time import uuid from collections.abc import AsyncIterator -from llama_stack_api import ( - InferenceProvider, - Model, - ModelsProtocolPrivate, - ModelType, - OpenAIAssistantMessageParam, - OpenAIChatCompletion, - OpenAIChatCompletionChunk, - OpenAIChatCompletionRequestWithExtraBody, - OpenAIChatCompletionUsage, - OpenAIChoice, - OpenAICompletion, - OpenAICompletionRequestWithExtraBody, - OpenAIUserMessageParam, - ToolChoice, -) - from llama_stack.log import get_logger from llama_stack.models.llama.datatypes import RawMessage, RawTextItem, ToolDefinition from llama_stack.models.llama.llama3.chat_format import ChatFormat as Llama3ChatFormat @@ -48,6 +31,22 @@ from llama_stack.providers.utils.inference.model_registry import ( ModelRegistryHelper, build_hf_repo_model_entry, ) +from llama_stack_api import ( + InferenceProvider, + Model, + ModelsProtocolPrivate, + ModelType, + OpenAIAssistantMessageParam, + OpenAIChatCompletion, + OpenAIChatCompletionChunk, + OpenAIChatCompletionRequestWithExtraBody, + OpenAIChatCompletionUsage, + OpenAIChoice, + OpenAICompletion, + OpenAICompletionRequestWithExtraBody, + OpenAIUserMessageParam, + ToolChoice, +) from .config import MetaReferenceInferenceConfig from .generators import LlamaGenerator @@ -441,6 +440,8 @@ class MetaReferenceInferenceImpl( params: OpenAIChatCompletionRequestWithExtraBody, ) -> AsyncIterator[OpenAIChatCompletionChunk]: """Stream chat completion chunks as they're generated.""" + from llama_stack.models.llama.datatypes import StopReason + from llama_stack.providers.utils.inference.prompt_adapter import decode_assistant_message from llama_stack_api import ( OpenAIChatCompletionChunk, OpenAIChatCompletionToolCall, @@ -449,9 +450,6 @@ class MetaReferenceInferenceImpl( OpenAIChunkChoice, ) - from llama_stack.models.llama.datatypes import StopReason - from llama_stack.providers.utils.inference.prompt_adapter import decode_assistant_message - response_id = f"chatcmpl-{uuid.uuid4().hex[:24]}" created = int(time.time()) generated_text = "" diff --git a/src/llama_stack/providers/inline/inference/sentence_transformers/sentence_transformers.py b/src/llama_stack/providers/inline/inference/sentence_transformers/sentence_transformers.py index 14c9a41a4..b5cadeec2 100644 --- a/src/llama_stack/providers/inline/inference/sentence_transformers/sentence_transformers.py +++ b/src/llama_stack/providers/inline/inference/sentence_transformers/sentence_transformers.py @@ -6,6 +6,10 @@ from collections.abc import AsyncIterator +from llama_stack.log import get_logger +from llama_stack.providers.utils.inference.embedding_mixin import ( + SentenceTransformerEmbeddingMixin, +) from llama_stack_api import ( InferenceProvider, Model, @@ -18,11 +22,6 @@ from llama_stack_api import ( OpenAICompletionRequestWithExtraBody, ) -from llama_stack.log import get_logger -from llama_stack.providers.utils.inference.embedding_mixin import ( - SentenceTransformerEmbeddingMixin, -) - from .config import SentenceTransformersInferenceConfig log = get_logger(name=__name__, category="inference") diff --git a/src/llama_stack/providers/inline/post_training/common/validator.py b/src/llama_stack/providers/inline/post_training/common/validator.py index 7a85d0e03..cc018c865 100644 --- a/src/llama_stack/providers/inline/post_training/common/validator.py +++ b/src/llama_stack/providers/inline/post_training/common/validator.py @@ -12,11 +12,10 @@ from typing import Any -from llama_stack_api import ChatCompletionInputType, DialogType, StringType - from llama_stack.providers.utils.common.data_schema_validator import ( ColumnName, ) +from llama_stack_api import ChatCompletionInputType, DialogType, StringType EXPECTED_DATASET_SCHEMA: dict[str, list[dict[str, Any]]] = { "instruct": [ diff --git a/src/llama_stack/providers/inline/post_training/huggingface/post_training.py b/src/llama_stack/providers/inline/post_training/huggingface/post_training.py index f3f3d8d56..fa939d439 100644 --- a/src/llama_stack/providers/inline/post_training/huggingface/post_training.py +++ b/src/llama_stack/providers/inline/post_training/huggingface/post_training.py @@ -6,6 +6,11 @@ from enum import Enum from typing import Any +from llama_stack.providers.inline.post_training.huggingface.config import ( + HuggingFacePostTrainingConfig, +) +from llama_stack.providers.utils.scheduler import JobArtifact, Scheduler +from llama_stack.providers.utils.scheduler import JobStatus as SchedulerJobStatus from llama_stack_api import ( AlgorithmConfig, Checkpoint, @@ -20,12 +25,6 @@ from llama_stack_api import ( TrainingConfig, ) -from llama_stack.providers.inline.post_training.huggingface.config import ( - HuggingFacePostTrainingConfig, -) -from llama_stack.providers.utils.scheduler import JobArtifact, Scheduler -from llama_stack.providers.utils.scheduler import JobStatus as SchedulerJobStatus - class TrainingArtifactType(Enum): CHECKPOINT = "checkpoint" diff --git a/src/llama_stack/providers/inline/post_training/huggingface/recipes/finetune_single_device.py b/src/llama_stack/providers/inline/post_training/huggingface/recipes/finetune_single_device.py index 58a30618c..c7c737fbd 100644 --- a/src/llama_stack/providers/inline/post_training/huggingface/recipes/finetune_single_device.py +++ b/src/llama_stack/providers/inline/post_training/huggingface/recipes/finetune_single_device.py @@ -12,14 +12,6 @@ from typing import Any import torch from datasets import Dataset -from llama_stack_api import ( - Checkpoint, - DataConfig, - DatasetIO, - Datasets, - LoraFinetuningConfig, - TrainingConfig, -) from peft import LoraConfig from transformers import ( AutoTokenizer, @@ -28,6 +20,14 @@ from trl import SFTConfig, SFTTrainer from llama_stack.log import get_logger from llama_stack.providers.inline.post_training.common.utils import evacuate_model_from_device +from llama_stack_api import ( + Checkpoint, + DataConfig, + DatasetIO, + Datasets, + LoraFinetuningConfig, + TrainingConfig, +) from ..config import HuggingFacePostTrainingConfig from ..utils import ( diff --git a/src/llama_stack/providers/inline/post_training/huggingface/recipes/finetune_single_device_dpo.py b/src/llama_stack/providers/inline/post_training/huggingface/recipes/finetune_single_device_dpo.py index f7dc3ebf2..da2626555 100644 --- a/src/llama_stack/providers/inline/post_training/huggingface/recipes/finetune_single_device_dpo.py +++ b/src/llama_stack/providers/inline/post_training/huggingface/recipes/finetune_single_device_dpo.py @@ -11,13 +11,6 @@ from typing import Any import torch from datasets import Dataset -from llama_stack_api import ( - Checkpoint, - DatasetIO, - Datasets, - DPOAlignmentConfig, - TrainingConfig, -) from transformers import ( AutoTokenizer, ) @@ -25,6 +18,13 @@ from trl import DPOConfig, DPOTrainer from llama_stack.log import get_logger from llama_stack.providers.inline.post_training.common.utils import evacuate_model_from_device +from llama_stack_api import ( + Checkpoint, + DatasetIO, + Datasets, + DPOAlignmentConfig, + TrainingConfig, +) from ..config import HuggingFacePostTrainingConfig from ..utils import ( diff --git a/src/llama_stack/providers/inline/post_training/huggingface/utils.py b/src/llama_stack/providers/inline/post_training/huggingface/utils.py index 86c3c3f52..2037f70e7 100644 --- a/src/llama_stack/providers/inline/post_training/huggingface/utils.py +++ b/src/llama_stack/providers/inline/post_training/huggingface/utils.py @@ -14,9 +14,10 @@ from typing import TYPE_CHECKING, Any, Protocol import psutil import torch from datasets import Dataset -from llama_stack_api import Checkpoint, DatasetIO, TrainingConfig from transformers import AutoConfig, AutoModelForCausalLM +from llama_stack_api import Checkpoint, DatasetIO, TrainingConfig + if TYPE_CHECKING: from transformers import PretrainedConfig diff --git a/src/llama_stack/providers/inline/post_training/torchtune/common/utils.py b/src/llama_stack/providers/inline/post_training/torchtune/common/utils.py index 1483b8385..f929ea4dd 100644 --- a/src/llama_stack/providers/inline/post_training/torchtune/common/utils.py +++ b/src/llama_stack/providers/inline/post_training/torchtune/common/utils.py @@ -13,7 +13,6 @@ from collections.abc import Callable import torch -from llama_stack_api import DatasetFormat from pydantic import BaseModel from torchtune.data._messages import InputOutputToMessages, ShareGPTToMessages from torchtune.models.llama3 import llama3_tokenizer @@ -24,6 +23,7 @@ from torchtune.modules.transforms import Transform from llama_stack.models.llama.sku_list import resolve_model from llama_stack.models.llama.sku_types import Model +from llama_stack_api import DatasetFormat BuildLoraModelCallable = Callable[..., torch.nn.Module] BuildTokenizerCallable = Callable[..., Llama3Tokenizer] diff --git a/src/llama_stack/providers/inline/post_training/torchtune/post_training.py b/src/llama_stack/providers/inline/post_training/torchtune/post_training.py index 3370d42fa..515ff7b66 100644 --- a/src/llama_stack/providers/inline/post_training/torchtune/post_training.py +++ b/src/llama_stack/providers/inline/post_training/torchtune/post_training.py @@ -6,6 +6,11 @@ from enum import Enum from typing import Any +from llama_stack.providers.inline.post_training.torchtune.config import ( + TorchtunePostTrainingConfig, +) +from llama_stack.providers.utils.scheduler import JobArtifact, Scheduler +from llama_stack.providers.utils.scheduler import JobStatus as SchedulerJobStatus from llama_stack_api import ( AlgorithmConfig, Checkpoint, @@ -21,12 +26,6 @@ from llama_stack_api import ( TrainingConfig, ) -from llama_stack.providers.inline.post_training.torchtune.config import ( - TorchtunePostTrainingConfig, -) -from llama_stack.providers.utils.scheduler import JobArtifact, Scheduler -from llama_stack.providers.utils.scheduler import JobStatus as SchedulerJobStatus - class TrainingArtifactType(Enum): CHECKPOINT = "checkpoint" diff --git a/src/llama_stack/providers/inline/post_training/torchtune/recipes/lora_finetuning_single_device.py b/src/llama_stack/providers/inline/post_training/torchtune/recipes/lora_finetuning_single_device.py index 2bf1d0fe7..f5e5db415 100644 --- a/src/llama_stack/providers/inline/post_training/torchtune/recipes/lora_finetuning_single_device.py +++ b/src/llama_stack/providers/inline/post_training/torchtune/recipes/lora_finetuning_single_device.py @@ -12,17 +12,6 @@ from pathlib import Path from typing import Any import torch -from llama_stack_api import ( - Checkpoint, - DataConfig, - DatasetIO, - Datasets, - LoraFinetuningConfig, - OptimizerConfig, - PostTrainingMetric, - QATFinetuningConfig, - TrainingConfig, -) from torch import nn from torch.optim import Optimizer from torch.utils.data import DataLoader, DistributedSampler @@ -56,6 +45,17 @@ from llama_stack.providers.inline.post_training.torchtune.config import ( TorchtunePostTrainingConfig, ) from llama_stack.providers.inline.post_training.torchtune.datasets.sft import SFTDataset +from llama_stack_api import ( + Checkpoint, + DataConfig, + DatasetIO, + Datasets, + LoraFinetuningConfig, + OptimizerConfig, + PostTrainingMetric, + QATFinetuningConfig, + TrainingConfig, +) log = get_logger(name=__name__, category="post_training") diff --git a/src/llama_stack/providers/inline/safety/code_scanner/code_scanner.py b/src/llama_stack/providers/inline/safety/code_scanner/code_scanner.py index 80e907c10..071fbe2dc 100644 --- a/src/llama_stack/providers/inline/safety/code_scanner/code_scanner.py +++ b/src/llama_stack/providers/inline/safety/code_scanner/code_scanner.py @@ -10,6 +10,10 @@ from typing import TYPE_CHECKING, Any if TYPE_CHECKING: from codeshield.cs import CodeShieldScanResult +from llama_stack.log import get_logger +from llama_stack.providers.utils.inference.prompt_adapter import ( + interleaved_content_as_str, +) from llama_stack_api import ( ModerationObject, ModerationObjectResults, @@ -21,11 +25,6 @@ from llama_stack_api import ( ViolationLevel, ) -from llama_stack.log import get_logger -from llama_stack.providers.utils.inference.prompt_adapter import ( - interleaved_content_as_str, -) - from .config import CodeScannerConfig log = get_logger(name=__name__, category="safety") diff --git a/src/llama_stack/providers/inline/safety/llama_guard/llama_guard.py b/src/llama_stack/providers/inline/safety/llama_guard/llama_guard.py index 36e4280b9..ff1536bea 100644 --- a/src/llama_stack/providers/inline/safety/llama_guard/llama_guard.py +++ b/src/llama_stack/providers/inline/safety/llama_guard/llama_guard.py @@ -9,6 +9,13 @@ import uuid from string import Template from typing import Any +from llama_stack.core.datatypes import Api +from llama_stack.log import get_logger +from llama_stack.models.llama.datatypes import Role +from llama_stack.models.llama.sku_types import CoreModelId +from llama_stack.providers.utils.inference.prompt_adapter import ( + interleaved_content_as_str, +) from llama_stack_api import ( ImageContentItem, Inference, @@ -26,14 +33,6 @@ from llama_stack_api import ( ViolationLevel, ) -from llama_stack.core.datatypes import Api -from llama_stack.log import get_logger -from llama_stack.models.llama.datatypes import Role -from llama_stack.models.llama.sku_types import CoreModelId -from llama_stack.providers.utils.inference.prompt_adapter import ( - interleaved_content_as_str, -) - from .config import LlamaGuardConfig CANNED_RESPONSE_TEXT = "I can't answer that. Can I help with something else?" diff --git a/src/llama_stack/providers/inline/safety/prompt_guard/prompt_guard.py b/src/llama_stack/providers/inline/safety/prompt_guard/prompt_guard.py index b4f495f19..51383da1b 100644 --- a/src/llama_stack/providers/inline/safety/prompt_guard/prompt_guard.py +++ b/src/llama_stack/providers/inline/safety/prompt_guard/prompt_guard.py @@ -7,6 +7,11 @@ from typing import Any import torch +from transformers import AutoModelForSequenceClassification, AutoTokenizer + +from llama_stack.core.utils.model_utils import model_local_dir +from llama_stack.log import get_logger +from llama_stack.providers.utils.inference.prompt_adapter import interleaved_content_as_str from llama_stack_api import ( ModerationObject, OpenAIMessageParam, @@ -18,11 +23,6 @@ from llama_stack_api import ( ShieldStore, ViolationLevel, ) -from transformers import AutoModelForSequenceClassification, AutoTokenizer - -from llama_stack.core.utils.model_utils import model_local_dir -from llama_stack.log import get_logger -from llama_stack.providers.utils.inference.prompt_adapter import interleaved_content_as_str from .config import PromptGuardConfig, PromptGuardType diff --git a/src/llama_stack/providers/inline/scoring/basic/scoring.py b/src/llama_stack/providers/inline/scoring/basic/scoring.py index 326fd9211..cf5cb79ba 100644 --- a/src/llama_stack/providers/inline/scoring/basic/scoring.py +++ b/src/llama_stack/providers/inline/scoring/basic/scoring.py @@ -5,6 +5,11 @@ # the root directory of this source tree. from typing import Any +from llama_stack.core.datatypes import Api +from llama_stack.providers.utils.common.data_schema_validator import ( + get_valid_schemas, + validate_dataset_schema, +) from llama_stack_api import ( DatasetIO, Datasets, @@ -17,12 +22,6 @@ from llama_stack_api import ( ScoringResult, ) -from llama_stack.core.datatypes import Api -from llama_stack.providers.utils.common.data_schema_validator import ( - get_valid_schemas, - validate_dataset_schema, -) - from .config import BasicScoringConfig from .scoring_fn.docvqa_scoring_fn import DocVQAScoringFn from .scoring_fn.equality_scoring_fn import EqualityScoringFn diff --git a/src/llama_stack/providers/inline/scoring/basic/scoring_fn/docvqa_scoring_fn.py b/src/llama_stack/providers/inline/scoring/basic/scoring_fn/docvqa_scoring_fn.py index 93c2627dd..e48bab8fa 100644 --- a/src/llama_stack/providers/inline/scoring/basic/scoring_fn/docvqa_scoring_fn.py +++ b/src/llama_stack/providers/inline/scoring/basic/scoring_fn/docvqa_scoring_fn.py @@ -8,9 +8,8 @@ import json import re from typing import Any -from llama_stack_api import ScoringFnParams, ScoringResultRow - from llama_stack.providers.utils.scoring.base_scoring_fn import RegisteredBaseScoringFn +from llama_stack_api import ScoringFnParams, ScoringResultRow from .fn_defs.docvqa import docvqa diff --git a/src/llama_stack/providers/inline/scoring/basic/scoring_fn/equality_scoring_fn.py b/src/llama_stack/providers/inline/scoring/basic/scoring_fn/equality_scoring_fn.py index 382c64d88..2e79240be 100644 --- a/src/llama_stack/providers/inline/scoring/basic/scoring_fn/equality_scoring_fn.py +++ b/src/llama_stack/providers/inline/scoring/basic/scoring_fn/equality_scoring_fn.py @@ -6,9 +6,8 @@ from typing import Any -from llama_stack_api import ScoringFnParams, ScoringResultRow - from llama_stack.providers.utils.scoring.base_scoring_fn import RegisteredBaseScoringFn +from llama_stack_api import ScoringFnParams, ScoringResultRow from .fn_defs.equality import equality diff --git a/src/llama_stack/providers/inline/scoring/basic/scoring_fn/ifeval_scoring_fn.py b/src/llama_stack/providers/inline/scoring/basic/scoring_fn/ifeval_scoring_fn.py index 4ec85bb09..33b1c5a31 100644 --- a/src/llama_stack/providers/inline/scoring/basic/scoring_fn/ifeval_scoring_fn.py +++ b/src/llama_stack/providers/inline/scoring/basic/scoring_fn/ifeval_scoring_fn.py @@ -6,9 +6,8 @@ from typing import Any -from llama_stack_api import ScoringFnParams, ScoringResultRow - from llama_stack.providers.utils.scoring.base_scoring_fn import RegisteredBaseScoringFn +from llama_stack_api import ScoringFnParams, ScoringResultRow from .fn_defs.ifeval import ( ifeval, diff --git a/src/llama_stack/providers/inline/scoring/basic/scoring_fn/regex_parser_math_response_scoring_fn.py b/src/llama_stack/providers/inline/scoring/basic/scoring_fn/regex_parser_math_response_scoring_fn.py index 4e9d49e96..1f4f2f979 100644 --- a/src/llama_stack/providers/inline/scoring/basic/scoring_fn/regex_parser_math_response_scoring_fn.py +++ b/src/llama_stack/providers/inline/scoring/basic/scoring_fn/regex_parser_math_response_scoring_fn.py @@ -5,9 +5,8 @@ # the root directory of this source tree. from typing import Any -from llama_stack_api import ScoringFnParams, ScoringFnParamsType, ScoringResultRow - from llama_stack.providers.utils.scoring.base_scoring_fn import RegisteredBaseScoringFn +from llama_stack_api import ScoringFnParams, ScoringFnParamsType, ScoringResultRow from ..utils.math_utils import first_answer, normalize_final_answer, try_evaluate_frac, try_evaluate_latex from .fn_defs.regex_parser_math_response import ( diff --git a/src/llama_stack/providers/inline/scoring/basic/scoring_fn/regex_parser_scoring_fn.py b/src/llama_stack/providers/inline/scoring/basic/scoring_fn/regex_parser_scoring_fn.py index 7f213b38c..1cc74f874 100644 --- a/src/llama_stack/providers/inline/scoring/basic/scoring_fn/regex_parser_scoring_fn.py +++ b/src/llama_stack/providers/inline/scoring/basic/scoring_fn/regex_parser_scoring_fn.py @@ -6,9 +6,8 @@ import re from typing import Any -from llama_stack_api import ScoringFnParams, ScoringFnParamsType, ScoringResultRow - from llama_stack.providers.utils.scoring.base_scoring_fn import RegisteredBaseScoringFn +from llama_stack_api import ScoringFnParams, ScoringFnParamsType, ScoringResultRow from .fn_defs.regex_parser_multiple_choice_answer import ( regex_parser_multiple_choice_answer, diff --git a/src/llama_stack/providers/inline/scoring/basic/scoring_fn/subset_of_scoring_fn.py b/src/llama_stack/providers/inline/scoring/basic/scoring_fn/subset_of_scoring_fn.py index b291924d5..fe15a4972 100644 --- a/src/llama_stack/providers/inline/scoring/basic/scoring_fn/subset_of_scoring_fn.py +++ b/src/llama_stack/providers/inline/scoring/basic/scoring_fn/subset_of_scoring_fn.py @@ -6,9 +6,8 @@ from typing import Any -from llama_stack_api import ScoringFnParams, ScoringResultRow - from llama_stack.providers.utils.scoring.base_scoring_fn import RegisteredBaseScoringFn +from llama_stack_api import ScoringFnParams, ScoringResultRow from .fn_defs.subset_of import subset_of diff --git a/src/llama_stack/providers/inline/scoring/braintrust/braintrust.py b/src/llama_stack/providers/inline/scoring/braintrust/braintrust.py index cbab93c74..cfa35547b 100644 --- a/src/llama_stack/providers/inline/scoring/braintrust/braintrust.py +++ b/src/llama_stack/providers/inline/scoring/braintrust/braintrust.py @@ -17,6 +17,16 @@ from autoevals.ragas import ( ContextRelevancy, Faithfulness, ) +from pydantic import BaseModel + +from llama_stack.core.datatypes import Api +from llama_stack.core.request_headers import NeedsRequestProviderData +from llama_stack.providers.utils.common.data_schema_validator import ( + get_valid_schemas, + validate_dataset_schema, + validate_row_schema, +) +from llama_stack.providers.utils.scoring.aggregation_utils import aggregate_metrics from llama_stack_api import ( DatasetIO, Datasets, @@ -29,16 +39,6 @@ from llama_stack_api import ( ScoringResult, ScoringResultRow, ) -from pydantic import BaseModel - -from llama_stack.core.datatypes import Api -from llama_stack.core.request_headers import NeedsRequestProviderData -from llama_stack.providers.utils.common.data_schema_validator import ( - get_valid_schemas, - validate_dataset_schema, - validate_row_schema, -) -from llama_stack.providers.utils.scoring.aggregation_utils import aggregate_metrics from .config import BraintrustScoringConfig from .scoring_fn.fn_defs.answer_correctness import answer_correctness_fn_def diff --git a/src/llama_stack/providers/inline/scoring/llm_as_judge/scoring.py b/src/llama_stack/providers/inline/scoring/llm_as_judge/scoring.py index aa636d2b3..23e6ad705 100644 --- a/src/llama_stack/providers/inline/scoring/llm_as_judge/scoring.py +++ b/src/llama_stack/providers/inline/scoring/llm_as_judge/scoring.py @@ -5,6 +5,11 @@ # the root directory of this source tree. from typing import Any +from llama_stack.core.datatypes import Api +from llama_stack.providers.utils.common.data_schema_validator import ( + get_valid_schemas, + validate_dataset_schema, +) from llama_stack_api import ( DatasetIO, Datasets, @@ -18,12 +23,6 @@ from llama_stack_api import ( ScoringResult, ) -from llama_stack.core.datatypes import Api -from llama_stack.providers.utils.common.data_schema_validator import ( - get_valid_schemas, - validate_dataset_schema, -) - from .config import LlmAsJudgeScoringConfig from .scoring_fn.llm_as_judge_scoring_fn import LlmAsJudgeScoringFn diff --git a/src/llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/llm_as_judge_scoring_fn.py b/src/llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/llm_as_judge_scoring_fn.py index 169a4d8b7..73ce82cda 100644 --- a/src/llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/llm_as_judge_scoring_fn.py +++ b/src/llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/llm_as_judge_scoring_fn.py @@ -6,9 +6,8 @@ import re from typing import Any -from llama_stack_api import Inference, OpenAIChatCompletionRequestWithExtraBody, ScoringFnParams, ScoringResultRow - from llama_stack.providers.utils.scoring.base_scoring_fn import RegisteredBaseScoringFn +from llama_stack_api import Inference, OpenAIChatCompletionRequestWithExtraBody, ScoringFnParams, ScoringResultRow from .fn_defs.llm_as_judge_405b_simpleqa import llm_as_judge_405b_simpleqa from .fn_defs.llm_as_judge_base import llm_as_judge_base diff --git a/src/llama_stack/providers/inline/tool_runtime/rag/context_retriever.py b/src/llama_stack/providers/inline/tool_runtime/rag/context_retriever.py index f499989cb..240df199b 100644 --- a/src/llama_stack/providers/inline/tool_runtime/rag/context_retriever.py +++ b/src/llama_stack/providers/inline/tool_runtime/rag/context_retriever.py @@ -6,6 +6,10 @@ from jinja2 import Template + +from llama_stack.providers.utils.inference.prompt_adapter import ( + interleaved_content_as_str, +) from llama_stack_api import ( DefaultRAGQueryGeneratorConfig, InterleavedContent, @@ -16,10 +20,6 @@ from llama_stack_api import ( RAGQueryGeneratorConfig, ) -from llama_stack.providers.utils.inference.prompt_adapter import ( - interleaved_content_as_str, -) - async def generate_rag_query( config: RAGQueryGeneratorConfig, diff --git a/src/llama_stack/providers/inline/tool_runtime/rag/memory.py b/src/llama_stack/providers/inline/tool_runtime/rag/memory.py index aacb7bb38..895d219bb 100644 --- a/src/llama_stack/providers/inline/tool_runtime/rag/memory.py +++ b/src/llama_stack/providers/inline/tool_runtime/rag/memory.py @@ -12,6 +12,11 @@ from typing import Any import httpx from fastapi import UploadFile +from pydantic import TypeAdapter + +from llama_stack.log import get_logger +from llama_stack.providers.utils.inference.prompt_adapter import interleaved_content_as_str +from llama_stack.providers.utils.memory.vector_store import parse_data_url from llama_stack_api import ( URL, Files, @@ -34,11 +39,6 @@ from llama_stack_api import ( VectorStoreChunkingStrategyStatic, VectorStoreChunkingStrategyStaticConfig, ) -from pydantic import TypeAdapter - -from llama_stack.log import get_logger -from llama_stack.providers.utils.inference.prompt_adapter import interleaved_content_as_str -from llama_stack.providers.utils.memory.vector_store import parse_data_url from .config import RagToolRuntimeConfig from .context_retriever import generate_rag_query diff --git a/src/llama_stack/providers/inline/vector_io/chroma/config.py b/src/llama_stack/providers/inline/vector_io/chroma/config.py index d955b1d06..3897991f5 100644 --- a/src/llama_stack/providers/inline/vector_io/chroma/config.py +++ b/src/llama_stack/providers/inline/vector_io/chroma/config.py @@ -6,10 +6,10 @@ from typing import Any -from llama_stack_api import json_schema_type from pydantic import BaseModel, Field from llama_stack.core.storage.datatypes import KVStoreReference +from llama_stack_api import json_schema_type @json_schema_type diff --git a/src/llama_stack/providers/inline/vector_io/faiss/config.py b/src/llama_stack/providers/inline/vector_io/faiss/config.py index dd433f818..d516d9fe9 100644 --- a/src/llama_stack/providers/inline/vector_io/faiss/config.py +++ b/src/llama_stack/providers/inline/vector_io/faiss/config.py @@ -6,10 +6,10 @@ from typing import Any -from llama_stack_api import json_schema_type from pydantic import BaseModel from llama_stack.core.storage.datatypes import KVStoreReference +from llama_stack_api import json_schema_type @json_schema_type diff --git a/src/llama_stack/providers/inline/vector_io/faiss/faiss.py b/src/llama_stack/providers/inline/vector_io/faiss/faiss.py index abef42499..d52a54e6a 100644 --- a/src/llama_stack/providers/inline/vector_io/faiss/faiss.py +++ b/src/llama_stack/providers/inline/vector_io/faiss/faiss.py @@ -12,6 +12,13 @@ from typing import Any import faiss # type: ignore[import-untyped] import numpy as np +from numpy.typing import NDArray + +from llama_stack.log import get_logger +from llama_stack.providers.utils.kvstore import kvstore_impl +from llama_stack.providers.utils.kvstore.api import KVStore +from llama_stack.providers.utils.memory.openai_vector_store_mixin import OpenAIVectorStoreMixin +from llama_stack.providers.utils.memory.vector_store import ChunkForDeletion, EmbeddingIndex, VectorStoreWithIndex from llama_stack_api import ( Chunk, Files, @@ -25,13 +32,6 @@ from llama_stack_api import ( VectorStoreNotFoundError, VectorStoresProtocolPrivate, ) -from numpy.typing import NDArray - -from llama_stack.log import get_logger -from llama_stack.providers.utils.kvstore import kvstore_impl -from llama_stack.providers.utils.kvstore.api import KVStore -from llama_stack.providers.utils.memory.openai_vector_store_mixin import OpenAIVectorStoreMixin -from llama_stack.providers.utils.memory.vector_store import ChunkForDeletion, EmbeddingIndex, VectorStoreWithIndex from .config import FaissVectorIOConfig diff --git a/src/llama_stack/providers/inline/vector_io/milvus/config.py b/src/llama_stack/providers/inline/vector_io/milvus/config.py index 08d05c991..14ddd2362 100644 --- a/src/llama_stack/providers/inline/vector_io/milvus/config.py +++ b/src/llama_stack/providers/inline/vector_io/milvus/config.py @@ -6,10 +6,10 @@ from typing import Any -from llama_stack_api import json_schema_type from pydantic import BaseModel, Field from llama_stack.core.storage.datatypes import KVStoreReference +from llama_stack_api import json_schema_type @json_schema_type diff --git a/src/llama_stack/providers/inline/vector_io/qdrant/config.py b/src/llama_stack/providers/inline/vector_io/qdrant/config.py index 437d643f0..4251f2f39 100644 --- a/src/llama_stack/providers/inline/vector_io/qdrant/config.py +++ b/src/llama_stack/providers/inline/vector_io/qdrant/config.py @@ -7,10 +7,10 @@ from typing import Any -from llama_stack_api import json_schema_type from pydantic import BaseModel from llama_stack.core.storage.datatypes import KVStoreReference +from llama_stack_api import json_schema_type @json_schema_type diff --git a/src/llama_stack/providers/inline/vector_io/sqlite_vec/sqlite_vec.py b/src/llama_stack/providers/inline/vector_io/sqlite_vec/sqlite_vec.py index e979ff323..74bc349a5 100644 --- a/src/llama_stack/providers/inline/vector_io/sqlite_vec/sqlite_vec.py +++ b/src/llama_stack/providers/inline/vector_io/sqlite_vec/sqlite_vec.py @@ -12,16 +12,6 @@ from typing import Any import numpy as np import sqlite_vec # type: ignore[import-untyped] -from llama_stack_api import ( - Chunk, - Files, - Inference, - QueryChunksResponse, - VectorIO, - VectorStore, - VectorStoreNotFoundError, - VectorStoresProtocolPrivate, -) from numpy.typing import NDArray from llama_stack.log import get_logger @@ -35,6 +25,16 @@ from llama_stack.providers.utils.memory.vector_store import ( VectorStoreWithIndex, ) from llama_stack.providers.utils.vector_io.vector_utils import WeightedInMemoryAggregator +from llama_stack_api import ( + Chunk, + Files, + Inference, + QueryChunksResponse, + VectorIO, + VectorStore, + VectorStoreNotFoundError, + VectorStoresProtocolPrivate, +) logger = get_logger(name=__name__, category="vector_io") diff --git a/src/llama_stack/providers/registry/agents.py b/src/llama_stack/providers/registry/agents.py index bd204cecd..455be1ae7 100644 --- a/src/llama_stack/providers/registry/agents.py +++ b/src/llama_stack/providers/registry/agents.py @@ -5,14 +5,13 @@ # the root directory of this source tree. +from llama_stack.providers.utils.kvstore import kvstore_dependencies from llama_stack_api import ( Api, InlineProviderSpec, ProviderSpec, ) -from llama_stack.providers.utils.kvstore import kvstore_dependencies - def available_providers() -> list[ProviderSpec]: return [ diff --git a/src/llama_stack/providers/registry/files.py b/src/llama_stack/providers/registry/files.py index dfc527816..024254b57 100644 --- a/src/llama_stack/providers/registry/files.py +++ b/src/llama_stack/providers/registry/files.py @@ -4,9 +4,8 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. -from llama_stack_api import Api, InlineProviderSpec, ProviderSpec, RemoteProviderSpec - from llama_stack.providers.utils.sqlstore.sqlstore import sql_store_pip_packages +from llama_stack_api import Api, InlineProviderSpec, ProviderSpec, RemoteProviderSpec def available_providers() -> list[ProviderSpec]: diff --git a/src/llama_stack/providers/registry/tool_runtime.py b/src/llama_stack/providers/registry/tool_runtime.py index 3f0a83a30..d34312353 100644 --- a/src/llama_stack/providers/registry/tool_runtime.py +++ b/src/llama_stack/providers/registry/tool_runtime.py @@ -5,6 +5,7 @@ # the root directory of this source tree. +from llama_stack.providers.registry.vector_io import DEFAULT_VECTOR_IO_DEPS from llama_stack_api import ( Api, InlineProviderSpec, @@ -12,8 +13,6 @@ from llama_stack_api import ( RemoteProviderSpec, ) -from llama_stack.providers.registry.vector_io import DEFAULT_VECTOR_IO_DEPS - def available_providers() -> list[ProviderSpec]: return [ diff --git a/src/llama_stack/providers/remote/datasetio/huggingface/huggingface.py b/src/llama_stack/providers/remote/datasetio/huggingface/huggingface.py index 1260ce644..72069f716 100644 --- a/src/llama_stack/providers/remote/datasetio/huggingface/huggingface.py +++ b/src/llama_stack/providers/remote/datasetio/huggingface/huggingface.py @@ -6,10 +6,9 @@ from typing import Any from urllib.parse import parse_qs, urlparse -from llama_stack_api import Dataset, DatasetIO, DatasetsProtocolPrivate, PaginatedResponse - from llama_stack.providers.utils.kvstore import kvstore_impl from llama_stack.providers.utils.pagination import paginate_records +from llama_stack_api import Dataset, DatasetIO, DatasetsProtocolPrivate, PaginatedResponse from .config import HuggingfaceDatasetIOConfig diff --git a/src/llama_stack/providers/remote/datasetio/nvidia/datasetio.py b/src/llama_stack/providers/remote/datasetio/nvidia/datasetio.py index cb674b0d7..2f5548fa9 100644 --- a/src/llama_stack/providers/remote/datasetio/nvidia/datasetio.py +++ b/src/llama_stack/providers/remote/datasetio/nvidia/datasetio.py @@ -7,6 +7,7 @@ from typing import Any import aiohttp + from llama_stack_api import URL, Dataset, PaginatedResponse, ParamType from .config import NvidiaDatasetIOConfig diff --git a/src/llama_stack/providers/remote/eval/nvidia/eval.py b/src/llama_stack/providers/remote/eval/nvidia/eval.py index fbdec0d4d..5802cb098 100644 --- a/src/llama_stack/providers/remote/eval/nvidia/eval.py +++ b/src/llama_stack/providers/remote/eval/nvidia/eval.py @@ -6,6 +6,8 @@ from typing import Any import requests + +from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper from llama_stack_api import ( Agents, Benchmark, @@ -22,8 +24,6 @@ from llama_stack_api import ( ScoringResult, ) -from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper - from .config import NVIDIAEvalConfig DEFAULT_NAMESPACE = "nvidia" diff --git a/src/llama_stack/providers/remote/files/openai/files.py b/src/llama_stack/providers/remote/files/openai/files.py index bbd630977..d2f5a08eb 100644 --- a/src/llama_stack/providers/remote/files/openai/files.py +++ b/src/llama_stack/providers/remote/files/openai/files.py @@ -8,6 +8,12 @@ from datetime import UTC, datetime from typing import Annotated, Any from fastapi import Depends, File, Form, Response, UploadFile + +from llama_stack.core.datatypes import AccessRule +from llama_stack.providers.utils.files.form_data import parse_expires_after +from llama_stack.providers.utils.sqlstore.api import ColumnDefinition, ColumnType +from llama_stack.providers.utils.sqlstore.authorized_sqlstore import AuthorizedSqlStore +from llama_stack.providers.utils.sqlstore.sqlstore import sqlstore_impl from llama_stack_api import ( ExpiresAfter, Files, @@ -18,12 +24,6 @@ from llama_stack_api import ( Order, ResourceNotFoundError, ) - -from llama_stack.core.datatypes import AccessRule -from llama_stack.providers.utils.files.form_data import parse_expires_after -from llama_stack.providers.utils.sqlstore.api import ColumnDefinition, ColumnType -from llama_stack.providers.utils.sqlstore.authorized_sqlstore import AuthorizedSqlStore -from llama_stack.providers.utils.sqlstore.sqlstore import sqlstore_impl from openai import OpenAI from .config import OpenAIFilesImplConfig diff --git a/src/llama_stack/providers/remote/files/s3/files.py b/src/llama_stack/providers/remote/files/s3/files.py index 14f1e3852..68822eb77 100644 --- a/src/llama_stack/providers/remote/files/s3/files.py +++ b/src/llama_stack/providers/remote/files/s3/files.py @@ -17,6 +17,12 @@ from fastapi import Depends, File, Form, Response, UploadFile if TYPE_CHECKING: from mypy_boto3_s3.client import S3Client +from llama_stack.core.datatypes import AccessRule +from llama_stack.core.id_generation import generate_object_id +from llama_stack.providers.utils.files.form_data import parse_expires_after +from llama_stack.providers.utils.sqlstore.api import ColumnDefinition, ColumnType +from llama_stack.providers.utils.sqlstore.authorized_sqlstore import AuthorizedSqlStore +from llama_stack.providers.utils.sqlstore.sqlstore import sqlstore_impl from llama_stack_api import ( ExpiresAfter, Files, @@ -28,13 +34,6 @@ from llama_stack_api import ( ResourceNotFoundError, ) -from llama_stack.core.datatypes import AccessRule -from llama_stack.core.id_generation import generate_object_id -from llama_stack.providers.utils.files.form_data import parse_expires_after -from llama_stack.providers.utils.sqlstore.api import ColumnDefinition, ColumnType -from llama_stack.providers.utils.sqlstore.authorized_sqlstore import AuthorizedSqlStore -from llama_stack.providers.utils.sqlstore.sqlstore import sqlstore_impl - from .config import S3FilesImplConfig # TODO: provider data for S3 credentials diff --git a/src/llama_stack/providers/remote/inference/anthropic/config.py b/src/llama_stack/providers/remote/inference/anthropic/config.py index 7ee4c54e2..b706b90e1 100644 --- a/src/llama_stack/providers/remote/inference/anthropic/config.py +++ b/src/llama_stack/providers/remote/inference/anthropic/config.py @@ -6,10 +6,10 @@ from typing import Any -from llama_stack_api import json_schema_type from pydantic import BaseModel, Field from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig +from llama_stack_api import json_schema_type class AnthropicProviderDataValidator(BaseModel): diff --git a/src/llama_stack/providers/remote/inference/azure/config.py b/src/llama_stack/providers/remote/inference/azure/config.py index 596f6c234..b801b91b2 100644 --- a/src/llama_stack/providers/remote/inference/azure/config.py +++ b/src/llama_stack/providers/remote/inference/azure/config.py @@ -7,10 +7,10 @@ import os from typing import Any -from llama_stack_api import json_schema_type from pydantic import BaseModel, Field, HttpUrl, SecretStr from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig +from llama_stack_api import json_schema_type class AzureProviderDataValidator(BaseModel): diff --git a/src/llama_stack/providers/remote/inference/bedrock/bedrock.py b/src/llama_stack/providers/remote/inference/bedrock/bedrock.py index 1a9fe533b..70ee95916 100644 --- a/src/llama_stack/providers/remote/inference/bedrock/bedrock.py +++ b/src/llama_stack/providers/remote/inference/bedrock/bedrock.py @@ -6,6 +6,11 @@ from collections.abc import AsyncIterator, Iterable +from openai import AuthenticationError + +from llama_stack.core.telemetry.tracing import get_current_span +from llama_stack.log import get_logger +from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin from llama_stack_api import ( OpenAIChatCompletion, OpenAIChatCompletionChunk, @@ -15,11 +20,6 @@ from llama_stack_api import ( OpenAIEmbeddingsRequestWithExtraBody, OpenAIEmbeddingsResponse, ) -from openai import AuthenticationError - -from llama_stack.core.telemetry.tracing import get_current_span -from llama_stack.log import get_logger -from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin from .config import BedrockConfig diff --git a/src/llama_stack/providers/remote/inference/cerebras/cerebras.py b/src/llama_stack/providers/remote/inference/cerebras/cerebras.py index c7f3111f9..680431e22 100644 --- a/src/llama_stack/providers/remote/inference/cerebras/cerebras.py +++ b/src/llama_stack/providers/remote/inference/cerebras/cerebras.py @@ -6,13 +6,12 @@ from urllib.parse import urljoin +from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin from llama_stack_api import ( OpenAIEmbeddingsRequestWithExtraBody, OpenAIEmbeddingsResponse, ) -from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin - from .config import CerebrasImplConfig diff --git a/src/llama_stack/providers/remote/inference/cerebras/config.py b/src/llama_stack/providers/remote/inference/cerebras/config.py index a1fd41e2d..db357fd1c 100644 --- a/src/llama_stack/providers/remote/inference/cerebras/config.py +++ b/src/llama_stack/providers/remote/inference/cerebras/config.py @@ -7,10 +7,10 @@ import os from typing import Any -from llama_stack_api import json_schema_type from pydantic import BaseModel, Field from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig +from llama_stack_api import json_schema_type DEFAULT_BASE_URL = "https://api.cerebras.ai" diff --git a/src/llama_stack/providers/remote/inference/databricks/config.py b/src/llama_stack/providers/remote/inference/databricks/config.py index 4974593d2..bd409fa13 100644 --- a/src/llama_stack/providers/remote/inference/databricks/config.py +++ b/src/llama_stack/providers/remote/inference/databricks/config.py @@ -6,10 +6,10 @@ from typing import Any -from llama_stack_api import json_schema_type from pydantic import BaseModel, Field, SecretStr from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig +from llama_stack_api import json_schema_type class DatabricksProviderDataValidator(BaseModel): diff --git a/src/llama_stack/providers/remote/inference/databricks/databricks.py b/src/llama_stack/providers/remote/inference/databricks/databricks.py index 8b802379f..c07d97b67 100644 --- a/src/llama_stack/providers/remote/inference/databricks/databricks.py +++ b/src/llama_stack/providers/remote/inference/databricks/databricks.py @@ -7,10 +7,10 @@ from collections.abc import Iterable from databricks.sdk import WorkspaceClient -from llama_stack_api import OpenAICompletion, OpenAICompletionRequestWithExtraBody from llama_stack.log import get_logger from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin +from llama_stack_api import OpenAICompletion, OpenAICompletionRequestWithExtraBody from .config import DatabricksImplConfig diff --git a/src/llama_stack/providers/remote/inference/fireworks/config.py b/src/llama_stack/providers/remote/inference/fireworks/config.py index d786655eb..e36c76054 100644 --- a/src/llama_stack/providers/remote/inference/fireworks/config.py +++ b/src/llama_stack/providers/remote/inference/fireworks/config.py @@ -6,10 +6,10 @@ from typing import Any -from llama_stack_api import json_schema_type from pydantic import Field from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig +from llama_stack_api import json_schema_type @json_schema_type diff --git a/src/llama_stack/providers/remote/inference/gemini/config.py b/src/llama_stack/providers/remote/inference/gemini/config.py index 6c25c005c..46cec7d0d 100644 --- a/src/llama_stack/providers/remote/inference/gemini/config.py +++ b/src/llama_stack/providers/remote/inference/gemini/config.py @@ -6,10 +6,10 @@ from typing import Any -from llama_stack_api import json_schema_type from pydantic import BaseModel, Field from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig +from llama_stack_api import json_schema_type class GeminiProviderDataValidator(BaseModel): diff --git a/src/llama_stack/providers/remote/inference/gemini/gemini.py b/src/llama_stack/providers/remote/inference/gemini/gemini.py index 79d694f06..f6f48cc2b 100644 --- a/src/llama_stack/providers/remote/inference/gemini/gemini.py +++ b/src/llama_stack/providers/remote/inference/gemini/gemini.py @@ -6,6 +6,7 @@ from typing import Any +from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin from llama_stack_api import ( OpenAIEmbeddingData, OpenAIEmbeddingsRequestWithExtraBody, @@ -13,8 +14,6 @@ from llama_stack_api import ( OpenAIEmbeddingUsage, ) -from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin - from .config import GeminiConfig diff --git a/src/llama_stack/providers/remote/inference/groq/config.py b/src/llama_stack/providers/remote/inference/groq/config.py index cec327716..cca53a4e8 100644 --- a/src/llama_stack/providers/remote/inference/groq/config.py +++ b/src/llama_stack/providers/remote/inference/groq/config.py @@ -6,10 +6,10 @@ from typing import Any -from llama_stack_api import json_schema_type from pydantic import BaseModel, Field from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig +from llama_stack_api import json_schema_type class GroqProviderDataValidator(BaseModel): diff --git a/src/llama_stack/providers/remote/inference/llama_openai_compat/config.py b/src/llama_stack/providers/remote/inference/llama_openai_compat/config.py index c16311830..ded210d89 100644 --- a/src/llama_stack/providers/remote/inference/llama_openai_compat/config.py +++ b/src/llama_stack/providers/remote/inference/llama_openai_compat/config.py @@ -6,10 +6,10 @@ from typing import Any -from llama_stack_api import json_schema_type from pydantic import BaseModel, Field from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig +from llama_stack_api import json_schema_type class LlamaProviderDataValidator(BaseModel): diff --git a/src/llama_stack/providers/remote/inference/llama_openai_compat/llama.py b/src/llama_stack/providers/remote/inference/llama_openai_compat/llama.py index 1dea3e3cb..a5f67ecd1 100644 --- a/src/llama_stack/providers/remote/inference/llama_openai_compat/llama.py +++ b/src/llama_stack/providers/remote/inference/llama_openai_compat/llama.py @@ -4,6 +4,9 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. +from llama_stack.log import get_logger +from llama_stack.providers.remote.inference.llama_openai_compat.config import LlamaCompatConfig +from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin from llama_stack_api import ( OpenAICompletion, OpenAICompletionRequestWithExtraBody, @@ -11,10 +14,6 @@ from llama_stack_api import ( OpenAIEmbeddingsResponse, ) -from llama_stack.log import get_logger -from llama_stack.providers.remote.inference.llama_openai_compat.config import LlamaCompatConfig -from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin - logger = get_logger(name=__name__, category="inference::llama_openai_compat") diff --git a/src/llama_stack/providers/remote/inference/nvidia/config.py b/src/llama_stack/providers/remote/inference/nvidia/config.py index 6ff98d290..e5b0c6b73 100644 --- a/src/llama_stack/providers/remote/inference/nvidia/config.py +++ b/src/llama_stack/providers/remote/inference/nvidia/config.py @@ -7,10 +7,10 @@ import os from typing import Any -from llama_stack_api import json_schema_type from pydantic import BaseModel, Field from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig +from llama_stack_api import json_schema_type class NVIDIAProviderDataValidator(BaseModel): diff --git a/src/llama_stack/providers/remote/inference/nvidia/nvidia.py b/src/llama_stack/providers/remote/inference/nvidia/nvidia.py index 9e4c6f559..17f8775bf 100644 --- a/src/llama_stack/providers/remote/inference/nvidia/nvidia.py +++ b/src/llama_stack/providers/remote/inference/nvidia/nvidia.py @@ -8,6 +8,9 @@ from collections.abc import Iterable import aiohttp + +from llama_stack.log import get_logger +from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin from llama_stack_api import ( Model, ModelType, @@ -17,9 +20,6 @@ from llama_stack_api import ( RerankResponse, ) -from llama_stack.log import get_logger -from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin - from . import NVIDIAConfig from .utils import _is_nvidia_hosted diff --git a/src/llama_stack/providers/remote/inference/oci/config.py b/src/llama_stack/providers/remote/inference/oci/config.py index 24b4ad926..93cc36d76 100644 --- a/src/llama_stack/providers/remote/inference/oci/config.py +++ b/src/llama_stack/providers/remote/inference/oci/config.py @@ -7,10 +7,10 @@ import os from typing import Any -from llama_stack_api import json_schema_type from pydantic import BaseModel, Field from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig +from llama_stack_api import json_schema_type class OCIProviderDataValidator(BaseModel): diff --git a/src/llama_stack/providers/remote/inference/oci/oci.py b/src/llama_stack/providers/remote/inference/oci/oci.py index 36e56cf6c..239443963 100644 --- a/src/llama_stack/providers/remote/inference/oci/oci.py +++ b/src/llama_stack/providers/remote/inference/oci/oci.py @@ -10,11 +10,6 @@ from typing import Any import httpx import oci -from llama_stack_api import ( - ModelType, - OpenAIEmbeddingsRequestWithExtraBody, - OpenAIEmbeddingsResponse, -) from oci.generative_ai.generative_ai_client import GenerativeAiClient from oci.generative_ai.models import ModelCollection from openai._base_client import DefaultAsyncHttpxClient @@ -23,6 +18,11 @@ from llama_stack.log import get_logger from llama_stack.providers.remote.inference.oci.auth import OciInstancePrincipalAuth, OciUserPrincipalAuth from llama_stack.providers.remote.inference.oci.config import OCIConfig from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin +from llama_stack_api import ( + ModelType, + OpenAIEmbeddingsRequestWithExtraBody, + OpenAIEmbeddingsResponse, +) logger = get_logger(name=__name__, category="inference::oci") diff --git a/src/llama_stack/providers/remote/inference/ollama/ollama.py b/src/llama_stack/providers/remote/inference/ollama/ollama.py index 6a471429e..d1bf85361 100644 --- a/src/llama_stack/providers/remote/inference/ollama/ollama.py +++ b/src/llama_stack/providers/remote/inference/ollama/ollama.py @@ -7,17 +7,17 @@ import asyncio +from ollama import AsyncClient as AsyncOllamaClient + +from llama_stack.log import get_logger +from llama_stack.providers.remote.inference.ollama.config import OllamaImplConfig +from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin from llama_stack_api import ( HealthResponse, HealthStatus, Model, UnsupportedModelError, ) -from ollama import AsyncClient as AsyncOllamaClient - -from llama_stack.log import get_logger -from llama_stack.providers.remote.inference.ollama.config import OllamaImplConfig -from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin logger = get_logger(name=__name__, category="inference::ollama") diff --git a/src/llama_stack/providers/remote/inference/openai/config.py b/src/llama_stack/providers/remote/inference/openai/config.py index cbb01b2d0..ab28e571f 100644 --- a/src/llama_stack/providers/remote/inference/openai/config.py +++ b/src/llama_stack/providers/remote/inference/openai/config.py @@ -6,10 +6,10 @@ from typing import Any -from llama_stack_api import json_schema_type from pydantic import BaseModel, Field from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig +from llama_stack_api import json_schema_type class OpenAIProviderDataValidator(BaseModel): diff --git a/src/llama_stack/providers/remote/inference/passthrough/config.py b/src/llama_stack/providers/remote/inference/passthrough/config.py index 7045dbf2e..54508b6fb 100644 --- a/src/llama_stack/providers/remote/inference/passthrough/config.py +++ b/src/llama_stack/providers/remote/inference/passthrough/config.py @@ -6,10 +6,10 @@ from typing import Any -from llama_stack_api import json_schema_type from pydantic import Field from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig +from llama_stack_api import json_schema_type @json_schema_type diff --git a/src/llama_stack/providers/remote/inference/passthrough/passthrough.py b/src/llama_stack/providers/remote/inference/passthrough/passthrough.py index 19cf0c5d7..75eedf026 100644 --- a/src/llama_stack/providers/remote/inference/passthrough/passthrough.py +++ b/src/llama_stack/providers/remote/inference/passthrough/passthrough.py @@ -6,6 +6,9 @@ from collections.abc import AsyncIterator +from openai import AsyncOpenAI + +from llama_stack.core.request_headers import NeedsRequestProviderData from llama_stack_api import ( Inference, Model, @@ -17,9 +20,6 @@ from llama_stack_api import ( OpenAIEmbeddingsRequestWithExtraBody, OpenAIEmbeddingsResponse, ) -from openai import AsyncOpenAI - -from llama_stack.core.request_headers import NeedsRequestProviderData from .config import PassthroughImplConfig diff --git a/src/llama_stack/providers/remote/inference/runpod/config.py b/src/llama_stack/providers/remote/inference/runpod/config.py index aaa4230a8..2ee56ca94 100644 --- a/src/llama_stack/providers/remote/inference/runpod/config.py +++ b/src/llama_stack/providers/remote/inference/runpod/config.py @@ -6,10 +6,10 @@ from typing import Any -from llama_stack_api import json_schema_type from pydantic import BaseModel, Field, SecretStr from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig +from llama_stack_api import json_schema_type class RunpodProviderDataValidator(BaseModel): diff --git a/src/llama_stack/providers/remote/inference/runpod/runpod.py b/src/llama_stack/providers/remote/inference/runpod/runpod.py index 4596b2df5..9c770cc24 100644 --- a/src/llama_stack/providers/remote/inference/runpod/runpod.py +++ b/src/llama_stack/providers/remote/inference/runpod/runpod.py @@ -6,14 +6,13 @@ from collections.abc import AsyncIterator +from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin from llama_stack_api import ( OpenAIChatCompletion, OpenAIChatCompletionChunk, OpenAIChatCompletionRequestWithExtraBody, ) -from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin - from .config import RunpodImplConfig diff --git a/src/llama_stack/providers/remote/inference/sambanova/config.py b/src/llama_stack/providers/remote/inference/sambanova/config.py index 6d72e7205..93679ba99 100644 --- a/src/llama_stack/providers/remote/inference/sambanova/config.py +++ b/src/llama_stack/providers/remote/inference/sambanova/config.py @@ -6,10 +6,10 @@ from typing import Any -from llama_stack_api import json_schema_type from pydantic import BaseModel, Field from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig +from llama_stack_api import json_schema_type class SambaNovaProviderDataValidator(BaseModel): diff --git a/src/llama_stack/providers/remote/inference/tgi/config.py b/src/llama_stack/providers/remote/inference/tgi/config.py index 051a2afa3..74edc8523 100644 --- a/src/llama_stack/providers/remote/inference/tgi/config.py +++ b/src/llama_stack/providers/remote/inference/tgi/config.py @@ -5,10 +5,10 @@ # the root directory of this source tree. -from llama_stack_api import json_schema_type from pydantic import BaseModel, Field, SecretStr from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig +from llama_stack_api import json_schema_type @json_schema_type diff --git a/src/llama_stack/providers/remote/inference/tgi/tgi.py b/src/llama_stack/providers/remote/inference/tgi/tgi.py index 831a26e39..dd47ccc62 100644 --- a/src/llama_stack/providers/remote/inference/tgi/tgi.py +++ b/src/llama_stack/providers/remote/inference/tgi/tgi.py @@ -8,14 +8,14 @@ from collections.abc import Iterable from huggingface_hub import AsyncInferenceClient, HfApi -from llama_stack_api import ( - OpenAIEmbeddingsRequestWithExtraBody, - OpenAIEmbeddingsResponse, -) from pydantic import SecretStr from llama_stack.log import get_logger from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin +from llama_stack_api import ( + OpenAIEmbeddingsRequestWithExtraBody, + OpenAIEmbeddingsResponse, +) from .config import InferenceAPIImplConfig, InferenceEndpointImplConfig, TGIImplConfig diff --git a/src/llama_stack/providers/remote/inference/together/config.py b/src/llama_stack/providers/remote/inference/together/config.py index 96c0538e3..c1b3c4a55 100644 --- a/src/llama_stack/providers/remote/inference/together/config.py +++ b/src/llama_stack/providers/remote/inference/together/config.py @@ -6,10 +6,10 @@ from typing import Any -from llama_stack_api import json_schema_type from pydantic import Field from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig +from llama_stack_api import json_schema_type @json_schema_type diff --git a/src/llama_stack/providers/remote/inference/together/together.py b/src/llama_stack/providers/remote/inference/together/together.py index f1355a760..cd34aec5e 100644 --- a/src/llama_stack/providers/remote/inference/together/together.py +++ b/src/llama_stack/providers/remote/inference/together/together.py @@ -8,18 +8,18 @@ from collections.abc import Iterable from typing import Any, cast -from llama_stack_api import ( - Model, - OpenAIEmbeddingsRequestWithExtraBody, - OpenAIEmbeddingsResponse, - OpenAIEmbeddingUsage, -) from together import AsyncTogether # type: ignore[import-untyped] from together.constants import BASE_URL # type: ignore[import-untyped] from llama_stack.core.request_headers import NeedsRequestProviderData from llama_stack.log import get_logger from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin +from llama_stack_api import ( + Model, + OpenAIEmbeddingsRequestWithExtraBody, + OpenAIEmbeddingsResponse, + OpenAIEmbeddingUsage, +) from .config import TogetherImplConfig diff --git a/src/llama_stack/providers/remote/inference/vertexai/config.py b/src/llama_stack/providers/remote/inference/vertexai/config.py index 53e2b3e65..5891f7cd0 100644 --- a/src/llama_stack/providers/remote/inference/vertexai/config.py +++ b/src/llama_stack/providers/remote/inference/vertexai/config.py @@ -6,10 +6,10 @@ from typing import Any -from llama_stack_api import json_schema_type from pydantic import BaseModel, Field, SecretStr from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig +from llama_stack_api import json_schema_type class VertexAIProviderDataValidator(BaseModel): diff --git a/src/llama_stack/providers/remote/inference/vllm/config.py b/src/llama_stack/providers/remote/inference/vllm/config.py index 23f713961..c43533ee4 100644 --- a/src/llama_stack/providers/remote/inference/vllm/config.py +++ b/src/llama_stack/providers/remote/inference/vllm/config.py @@ -6,10 +6,10 @@ from pathlib import Path -from llama_stack_api import json_schema_type from pydantic import Field, SecretStr, field_validator from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig +from llama_stack_api import json_schema_type @json_schema_type diff --git a/src/llama_stack/providers/remote/inference/vllm/vllm.py b/src/llama_stack/providers/remote/inference/vllm/vllm.py index f7938c22c..1510e9384 100644 --- a/src/llama_stack/providers/remote/inference/vllm/vllm.py +++ b/src/llama_stack/providers/remote/inference/vllm/vllm.py @@ -7,6 +7,10 @@ from collections.abc import AsyncIterator from urllib.parse import urljoin import httpx +from pydantic import ConfigDict + +from llama_stack.log import get_logger +from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin from llama_stack_api import ( HealthResponse, HealthStatus, @@ -15,10 +19,6 @@ from llama_stack_api import ( OpenAIChatCompletionRequestWithExtraBody, ToolChoice, ) -from pydantic import ConfigDict - -from llama_stack.log import get_logger -from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin from .config import VLLMInferenceAdapterConfig diff --git a/src/llama_stack/providers/remote/inference/watsonx/config.py b/src/llama_stack/providers/remote/inference/watsonx/config.py index 1bba040ef..914f80820 100644 --- a/src/llama_stack/providers/remote/inference/watsonx/config.py +++ b/src/llama_stack/providers/remote/inference/watsonx/config.py @@ -7,10 +7,10 @@ import os from typing import Any -from llama_stack_api import json_schema_type from pydantic import BaseModel, Field from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig +from llama_stack_api import json_schema_type class WatsonXProviderDataValidator(BaseModel): diff --git a/src/llama_stack/providers/remote/inference/watsonx/watsonx.py b/src/llama_stack/providers/remote/inference/watsonx/watsonx.py index de23c25d7..aab9e2dca 100644 --- a/src/llama_stack/providers/remote/inference/watsonx/watsonx.py +++ b/src/llama_stack/providers/remote/inference/watsonx/watsonx.py @@ -9,6 +9,12 @@ from typing import Any import litellm import requests + +from llama_stack.core.telemetry.tracing import get_current_span +from llama_stack.log import get_logger +from llama_stack.providers.remote.inference.watsonx.config import WatsonXConfig +from llama_stack.providers.utils.inference.litellm_openai_mixin import LiteLLMOpenAIMixin +from llama_stack.providers.utils.inference.openai_compat import prepare_openai_completion_params from llama_stack_api import ( Model, ModelType, @@ -22,12 +28,6 @@ from llama_stack_api import ( OpenAIEmbeddingsResponse, ) -from llama_stack.core.telemetry.tracing import get_current_span -from llama_stack.log import get_logger -from llama_stack.providers.remote.inference.watsonx.config import WatsonXConfig -from llama_stack.providers.utils.inference.litellm_openai_mixin import LiteLLMOpenAIMixin -from llama_stack.providers.utils.inference.openai_compat import prepare_openai_completion_params - logger = get_logger(name=__name__, category="providers::remote::watsonx") @@ -238,9 +238,8 @@ class WatsonXInferenceAdapter(LiteLLMOpenAIMixin): ) # Convert response to OpenAI format - from llama_stack_api import OpenAIEmbeddingUsage - from llama_stack.providers.utils.inference.litellm_openai_mixin import b64_encode_openai_embeddings_response + from llama_stack_api import OpenAIEmbeddingUsage data = b64_encode_openai_embeddings_response(response.data, params.encoding_format) diff --git a/src/llama_stack/providers/remote/post_training/nvidia/post_training.py b/src/llama_stack/providers/remote/post_training/nvidia/post_training.py index 02c35241b..830a9f747 100644 --- a/src/llama_stack/providers/remote/post_training/nvidia/post_training.py +++ b/src/llama_stack/providers/remote/post_training/nvidia/post_training.py @@ -8,6 +8,11 @@ from datetime import datetime from typing import Any, Literal import aiohttp +from pydantic import BaseModel, ConfigDict + +from llama_stack.providers.remote.post_training.nvidia.config import NvidiaPostTrainingConfig +from llama_stack.providers.remote.post_training.nvidia.utils import warn_unsupported_params +from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper from llama_stack_api import ( AlgorithmConfig, DPOAlignmentConfig, @@ -17,11 +22,6 @@ from llama_stack_api import ( PostTrainingJobStatusResponse, TrainingConfig, ) -from pydantic import BaseModel, ConfigDict - -from llama_stack.providers.remote.post_training.nvidia.config import NvidiaPostTrainingConfig -from llama_stack.providers.remote.post_training.nvidia.utils import warn_unsupported_params -from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper from .models import _MODEL_ENTRIES diff --git a/src/llama_stack/providers/remote/post_training/nvidia/utils.py b/src/llama_stack/providers/remote/post_training/nvidia/utils.py index 78762155d..bd40dacb4 100644 --- a/src/llama_stack/providers/remote/post_training/nvidia/utils.py +++ b/src/llama_stack/providers/remote/post_training/nvidia/utils.py @@ -7,11 +7,11 @@ import warnings from typing import Any -from llama_stack_api import TrainingConfig from pydantic import BaseModel from llama_stack.log import get_logger from llama_stack.providers.remote.post_training.nvidia.config import SFTLoRADefaultConfig +from llama_stack_api import TrainingConfig from .config import NvidiaPostTrainingConfig diff --git a/src/llama_stack/providers/remote/safety/bedrock/bedrock.py b/src/llama_stack/providers/remote/safety/bedrock/bedrock.py index 86b93c32e..c321f759b 100644 --- a/src/llama_stack/providers/remote/safety/bedrock/bedrock.py +++ b/src/llama_stack/providers/remote/safety/bedrock/bedrock.py @@ -7,6 +7,8 @@ import json from typing import Any +from llama_stack.log import get_logger +from llama_stack.providers.utils.bedrock.client import create_bedrock_client from llama_stack_api import ( OpenAIMessageParam, RunShieldResponse, @@ -17,9 +19,6 @@ from llama_stack_api import ( ViolationLevel, ) -from llama_stack.log import get_logger -from llama_stack.providers.utils.bedrock.client import create_bedrock_client - from .config import BedrockSafetyConfig logger = get_logger(name=__name__, category="safety::bedrock") diff --git a/src/llama_stack/providers/remote/safety/bedrock/config.py b/src/llama_stack/providers/remote/safety/bedrock/config.py index ca28924d4..0b1f2581a 100644 --- a/src/llama_stack/providers/remote/safety/bedrock/config.py +++ b/src/llama_stack/providers/remote/safety/bedrock/config.py @@ -5,9 +5,8 @@ # the root directory of this source tree. -from llama_stack_api import json_schema_type - from llama_stack.providers.utils.bedrock.config import BedrockBaseConfig +from llama_stack_api import json_schema_type @json_schema_type diff --git a/src/llama_stack/providers/remote/safety/nvidia/config.py b/src/llama_stack/providers/remote/safety/nvidia/config.py index fc686ae73..f11de5feb 100644 --- a/src/llama_stack/providers/remote/safety/nvidia/config.py +++ b/src/llama_stack/providers/remote/safety/nvidia/config.py @@ -6,9 +6,10 @@ import os from typing import Any -from llama_stack_api import json_schema_type from pydantic import BaseModel, Field +from llama_stack_api import json_schema_type + @json_schema_type class NVIDIASafetyConfig(BaseModel): diff --git a/src/llama_stack/providers/remote/safety/nvidia/nvidia.py b/src/llama_stack/providers/remote/safety/nvidia/nvidia.py index b3b5090e0..43ff45cc9 100644 --- a/src/llama_stack/providers/remote/safety/nvidia/nvidia.py +++ b/src/llama_stack/providers/remote/safety/nvidia/nvidia.py @@ -7,6 +7,8 @@ from typing import Any import requests + +from llama_stack.log import get_logger from llama_stack_api import ( ModerationObject, OpenAIMessageParam, @@ -18,8 +20,6 @@ from llama_stack_api import ( ViolationLevel, ) -from llama_stack.log import get_logger - from .config import NVIDIASafetyConfig logger = get_logger(name=__name__, category="safety::nvidia") diff --git a/src/llama_stack/providers/remote/safety/sambanova/config.py b/src/llama_stack/providers/remote/safety/sambanova/config.py index a8e745851..bfb42d88a 100644 --- a/src/llama_stack/providers/remote/safety/sambanova/config.py +++ b/src/llama_stack/providers/remote/safety/sambanova/config.py @@ -6,9 +6,10 @@ from typing import Any -from llama_stack_api import json_schema_type from pydantic import BaseModel, Field, SecretStr +from llama_stack_api import json_schema_type + class SambaNovaProviderDataValidator(BaseModel): sambanova_api_key: str | None = Field( diff --git a/src/llama_stack/providers/remote/safety/sambanova/sambanova.py b/src/llama_stack/providers/remote/safety/sambanova/sambanova.py index 119ebb6ed..c11cb544d 100644 --- a/src/llama_stack/providers/remote/safety/sambanova/sambanova.py +++ b/src/llama_stack/providers/remote/safety/sambanova/sambanova.py @@ -8,6 +8,9 @@ from typing import Any import litellm import requests + +from llama_stack.core.request_headers import NeedsRequestProviderData +from llama_stack.log import get_logger from llama_stack_api import ( OpenAIMessageParam, RunShieldResponse, @@ -18,9 +21,6 @@ from llama_stack_api import ( ViolationLevel, ) -from llama_stack.core.request_headers import NeedsRequestProviderData -from llama_stack.log import get_logger - from .config import SambaNovaSafetyConfig logger = get_logger(name=__name__, category="safety::sambanova") diff --git a/src/llama_stack/providers/remote/tool_runtime/bing_search/bing_search.py b/src/llama_stack/providers/remote/tool_runtime/bing_search/bing_search.py index 84e47dd4f..a5a53a9eb 100644 --- a/src/llama_stack/providers/remote/tool_runtime/bing_search/bing_search.py +++ b/src/llama_stack/providers/remote/tool_runtime/bing_search/bing_search.py @@ -8,6 +8,8 @@ import json from typing import Any import httpx + +from llama_stack.core.request_headers import NeedsRequestProviderData from llama_stack_api import ( URL, ListToolDefsResponse, @@ -18,8 +20,6 @@ from llama_stack_api import ( ToolRuntime, ) -from llama_stack.core.request_headers import NeedsRequestProviderData - from .config import BingSearchToolConfig diff --git a/src/llama_stack/providers/remote/tool_runtime/brave_search/brave_search.py b/src/llama_stack/providers/remote/tool_runtime/brave_search/brave_search.py index b7eee776a..4888730e4 100644 --- a/src/llama_stack/providers/remote/tool_runtime/brave_search/brave_search.py +++ b/src/llama_stack/providers/remote/tool_runtime/brave_search/brave_search.py @@ -7,6 +7,9 @@ from typing import Any import httpx + +from llama_stack.core.request_headers import NeedsRequestProviderData +from llama_stack.models.llama.datatypes import BuiltinTool from llama_stack_api import ( URL, ListToolDefsResponse, @@ -17,9 +20,6 @@ from llama_stack_api import ( ToolRuntime, ) -from llama_stack.core.request_headers import NeedsRequestProviderData -from llama_stack.models.llama.datatypes import BuiltinTool - from .config import BraveSearchToolConfig diff --git a/src/llama_stack/providers/remote/tool_runtime/model_context_protocol/model_context_protocol.py b/src/llama_stack/providers/remote/tool_runtime/model_context_protocol/model_context_protocol.py index efb1eb2df..544597a51 100644 --- a/src/llama_stack/providers/remote/tool_runtime/model_context_protocol/model_context_protocol.py +++ b/src/llama_stack/providers/remote/tool_runtime/model_context_protocol/model_context_protocol.py @@ -7,6 +7,9 @@ from typing import Any from urllib.parse import urlparse +from llama_stack.core.request_headers import NeedsRequestProviderData +from llama_stack.log import get_logger +from llama_stack.providers.utils.tools.mcp import invoke_mcp_tool, list_mcp_tools from llama_stack_api import ( URL, Api, @@ -17,10 +20,6 @@ from llama_stack_api import ( ToolRuntime, ) -from llama_stack.core.request_headers import NeedsRequestProviderData -from llama_stack.log import get_logger -from llama_stack.providers.utils.tools.mcp import invoke_mcp_tool, list_mcp_tools - from .config import MCPProviderConfig logger = get_logger(__name__, category="tools") diff --git a/src/llama_stack/providers/remote/tool_runtime/tavily_search/tavily_search.py b/src/llama_stack/providers/remote/tool_runtime/tavily_search/tavily_search.py index d65d66e67..d86cf5d8e 100644 --- a/src/llama_stack/providers/remote/tool_runtime/tavily_search/tavily_search.py +++ b/src/llama_stack/providers/remote/tool_runtime/tavily_search/tavily_search.py @@ -8,6 +8,8 @@ import json from typing import Any import httpx + +from llama_stack.core.request_headers import NeedsRequestProviderData from llama_stack_api import ( URL, ListToolDefsResponse, @@ -18,8 +20,6 @@ from llama_stack_api import ( ToolRuntime, ) -from llama_stack.core.request_headers import NeedsRequestProviderData - from .config import TavilySearchToolConfig diff --git a/src/llama_stack/providers/remote/tool_runtime/wolfram_alpha/wolfram_alpha.py b/src/llama_stack/providers/remote/tool_runtime/wolfram_alpha/wolfram_alpha.py index 9cc865092..f8d806a5c 100644 --- a/src/llama_stack/providers/remote/tool_runtime/wolfram_alpha/wolfram_alpha.py +++ b/src/llama_stack/providers/remote/tool_runtime/wolfram_alpha/wolfram_alpha.py @@ -8,6 +8,8 @@ import json from typing import Any import httpx + +from llama_stack.core.request_headers import NeedsRequestProviderData from llama_stack_api import ( URL, ListToolDefsResponse, @@ -18,8 +20,6 @@ from llama_stack_api import ( ToolRuntime, ) -from llama_stack.core.request_headers import NeedsRequestProviderData - from .config import WolframAlphaToolConfig diff --git a/src/llama_stack/providers/remote/vector_io/chroma/chroma.py b/src/llama_stack/providers/remote/vector_io/chroma/chroma.py index eca5d349b..645b40661 100644 --- a/src/llama_stack/providers/remote/vector_io/chroma/chroma.py +++ b/src/llama_stack/providers/remote/vector_io/chroma/chroma.py @@ -9,6 +9,14 @@ from typing import Any from urllib.parse import urlparse import chromadb +from numpy.typing import NDArray + +from llama_stack.log import get_logger +from llama_stack.providers.inline.vector_io.chroma import ChromaVectorIOConfig as InlineChromaVectorIOConfig +from llama_stack.providers.utils.kvstore import kvstore_impl +from llama_stack.providers.utils.kvstore.api import KVStore +from llama_stack.providers.utils.memory.openai_vector_store_mixin import OpenAIVectorStoreMixin +from llama_stack.providers.utils.memory.vector_store import ChunkForDeletion, EmbeddingIndex, VectorStoreWithIndex from llama_stack_api import ( Chunk, Files, @@ -19,14 +27,6 @@ from llama_stack_api import ( VectorStore, VectorStoresProtocolPrivate, ) -from numpy.typing import NDArray - -from llama_stack.log import get_logger -from llama_stack.providers.inline.vector_io.chroma import ChromaVectorIOConfig as InlineChromaVectorIOConfig -from llama_stack.providers.utils.kvstore import kvstore_impl -from llama_stack.providers.utils.kvstore.api import KVStore -from llama_stack.providers.utils.memory.openai_vector_store_mixin import OpenAIVectorStoreMixin -from llama_stack.providers.utils.memory.vector_store import ChunkForDeletion, EmbeddingIndex, VectorStoreWithIndex from .config import ChromaVectorIOConfig as RemoteChromaVectorIOConfig diff --git a/src/llama_stack/providers/remote/vector_io/chroma/config.py b/src/llama_stack/providers/remote/vector_io/chroma/config.py index b1e4f9a4a..648d641ad 100644 --- a/src/llama_stack/providers/remote/vector_io/chroma/config.py +++ b/src/llama_stack/providers/remote/vector_io/chroma/config.py @@ -6,10 +6,10 @@ from typing import Any -from llama_stack_api import json_schema_type from pydantic import BaseModel, Field from llama_stack.core.storage.datatypes import KVStoreReference +from llama_stack_api import json_schema_type @json_schema_type diff --git a/src/llama_stack/providers/remote/vector_io/milvus/config.py b/src/llama_stack/providers/remote/vector_io/milvus/config.py index 2e2c788c7..4b9d6a566 100644 --- a/src/llama_stack/providers/remote/vector_io/milvus/config.py +++ b/src/llama_stack/providers/remote/vector_io/milvus/config.py @@ -6,10 +6,10 @@ from typing import Any -from llama_stack_api import json_schema_type from pydantic import BaseModel, ConfigDict, Field from llama_stack.core.storage.datatypes import KVStoreReference +from llama_stack_api import json_schema_type @json_schema_type diff --git a/src/llama_stack/providers/remote/vector_io/milvus/milvus.py b/src/llama_stack/providers/remote/vector_io/milvus/milvus.py index b856bf918..aefa20317 100644 --- a/src/llama_stack/providers/remote/vector_io/milvus/milvus.py +++ b/src/llama_stack/providers/remote/vector_io/milvus/milvus.py @@ -8,17 +8,6 @@ import asyncio import os from typing import Any -from llama_stack_api import ( - Chunk, - Files, - Inference, - InterleavedContent, - QueryChunksResponse, - VectorIO, - VectorStore, - VectorStoreNotFoundError, - VectorStoresProtocolPrivate, -) from numpy.typing import NDArray from pymilvus import AnnSearchRequest, DataType, Function, FunctionType, MilvusClient, RRFRanker, WeightedRanker @@ -34,6 +23,17 @@ from llama_stack.providers.utils.memory.vector_store import ( VectorStoreWithIndex, ) from llama_stack.providers.utils.vector_io.vector_utils import sanitize_collection_name +from llama_stack_api import ( + Chunk, + Files, + Inference, + InterleavedContent, + QueryChunksResponse, + VectorIO, + VectorStore, + VectorStoreNotFoundError, + VectorStoresProtocolPrivate, +) from .config import MilvusVectorIOConfig as RemoteMilvusVectorIOConfig diff --git a/src/llama_stack/providers/remote/vector_io/pgvector/config.py b/src/llama_stack/providers/remote/vector_io/pgvector/config.py index aeb1c83bb..87d40a883 100644 --- a/src/llama_stack/providers/remote/vector_io/pgvector/config.py +++ b/src/llama_stack/providers/remote/vector_io/pgvector/config.py @@ -6,10 +6,10 @@ from typing import Any -from llama_stack_api import json_schema_type from pydantic import BaseModel, Field from llama_stack.core.storage.datatypes import KVStoreReference +from llama_stack_api import json_schema_type @json_schema_type diff --git a/src/llama_stack/providers/remote/vector_io/pgvector/pgvector.py b/src/llama_stack/providers/remote/vector_io/pgvector/pgvector.py index 8aa0303b6..2901bad97 100644 --- a/src/llama_stack/providers/remote/vector_io/pgvector/pgvector.py +++ b/src/llama_stack/providers/remote/vector_io/pgvector/pgvector.py @@ -8,17 +8,6 @@ import heapq from typing import Any import psycopg2 -from llama_stack_api import ( - Chunk, - Files, - Inference, - InterleavedContent, - QueryChunksResponse, - VectorIO, - VectorStore, - VectorStoreNotFoundError, - VectorStoresProtocolPrivate, -) from numpy.typing import NDArray from psycopg2 import sql from psycopg2.extras import Json, execute_values @@ -31,6 +20,17 @@ from llama_stack.providers.utils.kvstore.api import KVStore from llama_stack.providers.utils.memory.openai_vector_store_mixin import OpenAIVectorStoreMixin from llama_stack.providers.utils.memory.vector_store import ChunkForDeletion, EmbeddingIndex, VectorStoreWithIndex from llama_stack.providers.utils.vector_io.vector_utils import WeightedInMemoryAggregator, sanitize_collection_name +from llama_stack_api import ( + Chunk, + Files, + Inference, + InterleavedContent, + QueryChunksResponse, + VectorIO, + VectorStore, + VectorStoreNotFoundError, + VectorStoresProtocolPrivate, +) from .config import PGVectorVectorIOConfig diff --git a/src/llama_stack/providers/remote/vector_io/qdrant/config.py b/src/llama_stack/providers/remote/vector_io/qdrant/config.py index 8cc4cbb2b..e0a3fe207 100644 --- a/src/llama_stack/providers/remote/vector_io/qdrant/config.py +++ b/src/llama_stack/providers/remote/vector_io/qdrant/config.py @@ -6,10 +6,10 @@ from typing import Any -from llama_stack_api import json_schema_type from pydantic import BaseModel from llama_stack.core.storage.datatypes import KVStoreReference +from llama_stack_api import json_schema_type @json_schema_type diff --git a/src/llama_stack/providers/remote/vector_io/qdrant/qdrant.py b/src/llama_stack/providers/remote/vector_io/qdrant/qdrant.py index 53d6be2b6..20ab653d0 100644 --- a/src/llama_stack/providers/remote/vector_io/qdrant/qdrant.py +++ b/src/llama_stack/providers/remote/vector_io/qdrant/qdrant.py @@ -9,6 +9,15 @@ import hashlib import uuid from typing import Any +from numpy.typing import NDArray +from qdrant_client import AsyncQdrantClient, models +from qdrant_client.models import PointStruct + +from llama_stack.log import get_logger +from llama_stack.providers.inline.vector_io.qdrant import QdrantVectorIOConfig as InlineQdrantVectorIOConfig +from llama_stack.providers.utils.kvstore import kvstore_impl +from llama_stack.providers.utils.memory.openai_vector_store_mixin import OpenAIVectorStoreMixin +from llama_stack.providers.utils.memory.vector_store import ChunkForDeletion, EmbeddingIndex, VectorStoreWithIndex from llama_stack_api import ( Chunk, Files, @@ -22,15 +31,6 @@ from llama_stack_api import ( VectorStoreNotFoundError, VectorStoresProtocolPrivate, ) -from numpy.typing import NDArray -from qdrant_client import AsyncQdrantClient, models -from qdrant_client.models import PointStruct - -from llama_stack.log import get_logger -from llama_stack.providers.inline.vector_io.qdrant import QdrantVectorIOConfig as InlineQdrantVectorIOConfig -from llama_stack.providers.utils.kvstore import kvstore_impl -from llama_stack.providers.utils.memory.openai_vector_store_mixin import OpenAIVectorStoreMixin -from llama_stack.providers.utils.memory.vector_store import ChunkForDeletion, EmbeddingIndex, VectorStoreWithIndex from .config import QdrantVectorIOConfig as RemoteQdrantVectorIOConfig diff --git a/src/llama_stack/providers/remote/vector_io/weaviate/config.py b/src/llama_stack/providers/remote/vector_io/weaviate/config.py index 19f9679fb..75d1b7c51 100644 --- a/src/llama_stack/providers/remote/vector_io/weaviate/config.py +++ b/src/llama_stack/providers/remote/vector_io/weaviate/config.py @@ -6,10 +6,10 @@ from typing import Any -from llama_stack_api import json_schema_type from pydantic import BaseModel, Field from llama_stack.core.storage.datatypes import KVStoreReference +from llama_stack_api import json_schema_type @json_schema_type diff --git a/src/llama_stack/providers/remote/vector_io/weaviate/weaviate.py b/src/llama_stack/providers/remote/vector_io/weaviate/weaviate.py index c72666f63..ba3e6b7ea 100644 --- a/src/llama_stack/providers/remote/vector_io/weaviate/weaviate.py +++ b/src/llama_stack/providers/remote/vector_io/weaviate/weaviate.py @@ -8,17 +8,6 @@ from typing import Any import weaviate import weaviate.classes as wvc -from llama_stack_api import ( - Chunk, - Files, - Inference, - InterleavedContent, - QueryChunksResponse, - VectorIO, - VectorStore, - VectorStoreNotFoundError, - VectorStoresProtocolPrivate, -) from numpy.typing import NDArray from weaviate.classes.init import Auth from weaviate.classes.query import Filter, HybridFusion @@ -35,6 +24,17 @@ from llama_stack.providers.utils.memory.vector_store import ( VectorStoreWithIndex, ) from llama_stack.providers.utils.vector_io.vector_utils import sanitize_collection_name +from llama_stack_api import ( + Chunk, + Files, + Inference, + InterleavedContent, + QueryChunksResponse, + VectorIO, + VectorStore, + VectorStoreNotFoundError, + VectorStoresProtocolPrivate, +) from .config import WeaviateVectorIOConfig diff --git a/src/llama_stack/providers/utils/common/data_schema_validator.py b/src/llama_stack/providers/utils/common/data_schema_validator.py index 7ef245779..c9a3b0920 100644 --- a/src/llama_stack/providers/utils/common/data_schema_validator.py +++ b/src/llama_stack/providers/utils/common/data_schema_validator.py @@ -7,9 +7,8 @@ from enum import Enum from typing import Any -from llama_stack_api import ChatCompletionInputType, CompletionInputType, StringType - from llama_stack.core.datatypes import Api +from llama_stack_api import ChatCompletionInputType, CompletionInputType, StringType class ColumnName(Enum): diff --git a/src/llama_stack/providers/utils/files/form_data.py b/src/llama_stack/providers/utils/files/form_data.py index 21afbec2b..3fac14f38 100644 --- a/src/llama_stack/providers/utils/files/form_data.py +++ b/src/llama_stack/providers/utils/files/form_data.py @@ -7,9 +7,10 @@ import json from fastapi import Request -from llama_stack_api import ExpiresAfter from pydantic import BaseModel, ValidationError +from llama_stack_api import ExpiresAfter + async def parse_pydantic_from_form[T: BaseModel](request: Request, field_name: str, model_class: type[T]) -> T | None: """ diff --git a/src/llama_stack/providers/utils/inference/inference_store.py b/src/llama_stack/providers/utils/inference/inference_store.py index 3c707dd01..49e3af7a1 100644 --- a/src/llama_stack/providers/utils/inference/inference_store.py +++ b/src/llama_stack/providers/utils/inference/inference_store.py @@ -6,6 +6,11 @@ import asyncio from typing import Any +from sqlalchemy.exc import IntegrityError + +from llama_stack.core.datatypes import AccessRule +from llama_stack.core.storage.datatypes import InferenceStoreReference, StorageBackendType +from llama_stack.log import get_logger from llama_stack_api import ( ListOpenAIChatCompletionResponse, OpenAIChatCompletion, @@ -13,11 +18,6 @@ from llama_stack_api import ( OpenAIMessageParam, Order, ) -from sqlalchemy.exc import IntegrityError - -from llama_stack.core.datatypes import AccessRule -from llama_stack.core.storage.datatypes import InferenceStoreReference, StorageBackendType -from llama_stack.log import get_logger from ..sqlstore.api import ColumnDefinition, ColumnType from ..sqlstore.authorized_sqlstore import AuthorizedSqlStore diff --git a/src/llama_stack/providers/utils/inference/litellm_openai_mixin.py b/src/llama_stack/providers/utils/inference/litellm_openai_mixin.py index 4f468725b..c462d1aad 100644 --- a/src/llama_stack/providers/utils/inference/litellm_openai_mixin.py +++ b/src/llama_stack/providers/utils/inference/litellm_openai_mixin.py @@ -9,6 +9,13 @@ import struct from collections.abc import AsyncIterator import litellm + +from llama_stack.core.request_headers import NeedsRequestProviderData +from llama_stack.log import get_logger +from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper, ProviderModelEntry +from llama_stack.providers.utils.inference.openai_compat import ( + prepare_openai_completion_params, +) from llama_stack_api import ( InferenceProvider, OpenAIChatCompletion, @@ -22,13 +29,6 @@ from llama_stack_api import ( OpenAIEmbeddingUsage, ) -from llama_stack.core.request_headers import NeedsRequestProviderData -from llama_stack.log import get_logger -from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper, ProviderModelEntry -from llama_stack.providers.utils.inference.openai_compat import ( - prepare_openai_completion_params, -) - logger = get_logger(name=__name__, category="providers::utils") diff --git a/src/llama_stack/providers/utils/inference/model_registry.py b/src/llama_stack/providers/utils/inference/model_registry.py index e7ca5ab74..42b54497f 100644 --- a/src/llama_stack/providers/utils/inference/model_registry.py +++ b/src/llama_stack/providers/utils/inference/model_registry.py @@ -6,13 +6,13 @@ from typing import Any -from llama_stack_api import Model, ModelsProtocolPrivate, ModelType, UnsupportedModelError from pydantic import BaseModel, Field, SecretStr from llama_stack.log import get_logger from llama_stack.providers.utils.inference import ( ALL_HUGGINGFACE_REPOS_TO_MODEL_DESCRIPTOR, ) +from llama_stack_api import Model, ModelsProtocolPrivate, ModelType, UnsupportedModelError logger = get_logger(name=__name__, category="providers::utils") diff --git a/src/llama_stack/providers/utils/inference/openai_compat.py b/src/llama_stack/providers/utils/inference/openai_compat.py index c97e42274..32d41ffde 100644 --- a/src/llama_stack/providers/utils/inference/openai_compat.py +++ b/src/llama_stack/providers/utils/inference/openai_compat.py @@ -20,18 +20,6 @@ except ImportError: from openai.types.chat.chat_completion_message_tool_call import ( ChatCompletionMessageToolCall as OpenAIChatCompletionMessageFunctionToolCall, ) -from llama_stack_api import ( - URL, - GreedySamplingStrategy, - ImageContentItem, - JsonSchemaResponseFormat, - OpenAIResponseFormatParam, - SamplingParams, - TextContentItem, - TopKSamplingStrategy, - TopPSamplingStrategy, - _URLOrData, -) from openai.types.chat import ( ChatCompletionMessageToolCall, ) @@ -44,6 +32,18 @@ from llama_stack.models.llama.datatypes import ( ToolCall, ToolDefinition, ) +from llama_stack_api import ( + URL, + GreedySamplingStrategy, + ImageContentItem, + JsonSchemaResponseFormat, + OpenAIResponseFormatParam, + SamplingParams, + TextContentItem, + TopKSamplingStrategy, + TopPSamplingStrategy, + _URLOrData, +) logger = get_logger(name=__name__, category="providers::utils") diff --git a/src/llama_stack/providers/utils/inference/openai_mixin.py b/src/llama_stack/providers/utils/inference/openai_mixin.py index c05873df5..559ac90ce 100644 --- a/src/llama_stack/providers/utils/inference/openai_mixin.py +++ b/src/llama_stack/providers/utils/inference/openai_mixin.py @@ -10,6 +10,14 @@ from abc import ABC, abstractmethod from collections.abc import AsyncIterator, Iterable from typing import Any +from openai import AsyncOpenAI +from pydantic import BaseModel, ConfigDict + +from llama_stack.core.request_headers import NeedsRequestProviderData +from llama_stack.log import get_logger +from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig +from llama_stack.providers.utils.inference.openai_compat import prepare_openai_completion_params +from llama_stack.providers.utils.inference.prompt_adapter import localize_image_content from llama_stack_api import ( Model, ModelType, @@ -24,14 +32,6 @@ from llama_stack_api import ( OpenAIEmbeddingUsage, OpenAIMessageParam, ) -from openai import AsyncOpenAI -from pydantic import BaseModel, ConfigDict - -from llama_stack.core.request_headers import NeedsRequestProviderData -from llama_stack.log import get_logger -from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig -from llama_stack.providers.utils.inference.openai_compat import prepare_openai_completion_params -from llama_stack.providers.utils.inference.prompt_adapter import localize_image_content logger = get_logger(name=__name__, category="providers::utils") diff --git a/src/llama_stack/providers/utils/inference/prompt_adapter.py b/src/llama_stack/providers/utils/inference/prompt_adapter.py index ea01a34e9..6272c9eed 100644 --- a/src/llama_stack/providers/utils/inference/prompt_adapter.py +++ b/src/llama_stack/providers/utils/inference/prompt_adapter.py @@ -12,24 +12,6 @@ import re from typing import Any import httpx -from llama_stack_api import ( - CompletionRequest, - ImageContentItem, - InterleavedContent, - InterleavedContentItem, - OpenAIAssistantMessageParam, - OpenAIChatCompletionContentPartImageParam, - OpenAIChatCompletionContentPartTextParam, - OpenAIFile, - OpenAIMessageParam, - OpenAISystemMessageParam, - OpenAIToolMessageParam, - OpenAIUserMessageParam, - ResponseFormat, - ResponseFormatType, - TextContentItem, - ToolChoice, -) from PIL import Image as PIL_Image from llama_stack.log import get_logger @@ -48,6 +30,24 @@ from llama_stack.models.llama.llama3.chat_format import ChatFormat from llama_stack.models.llama.llama3.tokenizer import Tokenizer from llama_stack.models.llama.sku_list import resolve_model from llama_stack.models.llama.sku_types import ModelFamily, is_multimodal +from llama_stack_api import ( + CompletionRequest, + ImageContentItem, + InterleavedContent, + InterleavedContentItem, + OpenAIAssistantMessageParam, + OpenAIChatCompletionContentPartImageParam, + OpenAIChatCompletionContentPartTextParam, + OpenAIFile, + OpenAIMessageParam, + OpenAISystemMessageParam, + OpenAIToolMessageParam, + OpenAIUserMessageParam, + ResponseFormat, + ResponseFormatType, + TextContentItem, + ToolChoice, +) log = get_logger(name=__name__, category="providers::utils") diff --git a/src/llama_stack/providers/utils/kvstore/sqlite/config.py b/src/llama_stack/providers/utils/kvstore/sqlite/config.py index 895268a4f..0f8fa0a95 100644 --- a/src/llama_stack/providers/utils/kvstore/sqlite/config.py +++ b/src/llama_stack/providers/utils/kvstore/sqlite/config.py @@ -4,9 +4,10 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. -from llama_stack_api import json_schema_type from pydantic import BaseModel, Field +from llama_stack_api import json_schema_type + @json_schema_type class SqliteControlPlaneConfig(BaseModel): diff --git a/src/llama_stack/providers/utils/memory/openai_vector_store_mixin.py b/src/llama_stack/providers/utils/memory/openai_vector_store_mixin.py index 68d1c11e5..540ff5940 100644 --- a/src/llama_stack/providers/utils/memory/openai_vector_store_mixin.py +++ b/src/llama_stack/providers/utils/memory/openai_vector_store_mixin.py @@ -13,6 +13,16 @@ from abc import ABC, abstractmethod from typing import Annotated, Any from fastapi import Body +from pydantic import TypeAdapter + +from llama_stack.core.id_generation import generate_object_id +from llama_stack.log import get_logger +from llama_stack.providers.utils.kvstore.api import KVStore +from llama_stack.providers.utils.memory.vector_store import ( + ChunkForDeletion, + content_from_data_and_mime_type, + make_overlapped_chunks, +) from llama_stack_api import ( Chunk, Files, @@ -43,16 +53,6 @@ from llama_stack_api import ( VectorStoreSearchResponse, VectorStoreSearchResponsePage, ) -from pydantic import TypeAdapter - -from llama_stack.core.id_generation import generate_object_id -from llama_stack.log import get_logger -from llama_stack.providers.utils.kvstore.api import KVStore -from llama_stack.providers.utils.memory.vector_store import ( - ChunkForDeletion, - content_from_data_and_mime_type, - make_overlapped_chunks, -) EMBEDDING_DIMENSION = 768 diff --git a/src/llama_stack/providers/utils/memory/vector_store.py b/src/llama_stack/providers/utils/memory/vector_store.py index 37ac79039..b6a671ddb 100644 --- a/src/llama_stack/providers/utils/memory/vector_store.py +++ b/src/llama_stack/providers/utils/memory/vector_store.py @@ -14,6 +14,15 @@ from urllib.parse import unquote import httpx import numpy as np +from numpy.typing import NDArray +from pydantic import BaseModel + +from llama_stack.log import get_logger +from llama_stack.models.llama.llama3.tokenizer import Tokenizer +from llama_stack.providers.utils.inference.prompt_adapter import ( + interleaved_content_as_str, +) +from llama_stack.providers.utils.vector_io.vector_utils import generate_chunk_id from llama_stack_api import ( URL, Api, @@ -25,15 +34,6 @@ from llama_stack_api import ( RAGDocument, VectorStore, ) -from numpy.typing import NDArray -from pydantic import BaseModel - -from llama_stack.log import get_logger -from llama_stack.models.llama.llama3.tokenizer import Tokenizer -from llama_stack.providers.utils.inference.prompt_adapter import ( - interleaved_content_as_str, -) -from llama_stack.providers.utils.vector_io.vector_utils import generate_chunk_id log = get_logger(name=__name__, category="providers::utils") diff --git a/src/llama_stack/providers/utils/responses/responses_store.py b/src/llama_stack/providers/utils/responses/responses_store.py index c7dfed15a..f6e7c435d 100644 --- a/src/llama_stack/providers/utils/responses/responses_store.py +++ b/src/llama_stack/providers/utils/responses/responses_store.py @@ -4,6 +4,9 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. +from llama_stack.core.datatypes import AccessRule +from llama_stack.core.storage.datatypes import ResponsesStoreReference, SqlStoreReference +from llama_stack.log import get_logger from llama_stack_api import ( ListOpenAIResponseInputItem, ListOpenAIResponseObject, @@ -15,10 +18,6 @@ from llama_stack_api import ( Order, ) -from llama_stack.core.datatypes import AccessRule -from llama_stack.core.storage.datatypes import ResponsesStoreReference, SqlStoreReference -from llama_stack.log import get_logger - from ..sqlstore.api import ColumnDefinition, ColumnType from ..sqlstore.authorized_sqlstore import AuthorizedSqlStore from ..sqlstore.sqlstore import sqlstore_impl diff --git a/src/llama_stack/providers/utils/scoring/base_scoring_fn.py b/src/llama_stack/providers/utils/scoring/base_scoring_fn.py index d16c75263..f372db8b5 100644 --- a/src/llama_stack/providers/utils/scoring/base_scoring_fn.py +++ b/src/llama_stack/providers/utils/scoring/base_scoring_fn.py @@ -6,9 +6,8 @@ from abc import ABC, abstractmethod from typing import Any -from llama_stack_api import ScoringFn, ScoringFnParams, ScoringResultRow - from llama_stack.providers.utils.scoring.aggregation_utils import aggregate_metrics +from llama_stack_api import ScoringFn, ScoringFnParams, ScoringResultRow class BaseScoringFn(ABC): diff --git a/src/llama_stack/providers/utils/sqlstore/api.py b/src/llama_stack/providers/utils/sqlstore/api.py index 033a00edc..708fc7095 100644 --- a/src/llama_stack/providers/utils/sqlstore/api.py +++ b/src/llama_stack/providers/utils/sqlstore/api.py @@ -8,9 +8,10 @@ from collections.abc import Mapping, Sequence from enum import Enum from typing import Any, Literal, Protocol -from llama_stack_api import PaginatedResponse from pydantic import BaseModel +from llama_stack_api import PaginatedResponse + class ColumnType(Enum): INTEGER = "INTEGER" diff --git a/src/llama_stack/providers/utils/sqlstore/sqlalchemy_sqlstore.py b/src/llama_stack/providers/utils/sqlstore/sqlalchemy_sqlstore.py index 263f5e69f..10009d396 100644 --- a/src/llama_stack/providers/utils/sqlstore/sqlalchemy_sqlstore.py +++ b/src/llama_stack/providers/utils/sqlstore/sqlalchemy_sqlstore.py @@ -6,7 +6,6 @@ from collections.abc import Mapping, Sequence from typing import Any, Literal, cast -from llama_stack_api import PaginatedResponse from sqlalchemy import ( JSON, Boolean, @@ -29,6 +28,7 @@ from sqlalchemy.sql.elements import ColumnElement from llama_stack.core.storage.datatypes import SqlAlchemySqlStoreConfig from llama_stack.log import get_logger +from llama_stack_api import PaginatedResponse from .api import ColumnDefinition, ColumnType, SqlStore diff --git a/src/llama_stack/providers/utils/tools/mcp.py b/src/llama_stack/providers/utils/tools/mcp.py index 82c85f46c..fad1bf0f0 100644 --- a/src/llama_stack/providers/utils/tools/mcp.py +++ b/src/llama_stack/providers/utils/tools/mcp.py @@ -10,6 +10,14 @@ from enum import Enum from typing import Any, cast import httpx +from mcp import ClientSession, McpError +from mcp import types as mcp_types +from mcp.client.sse import sse_client +from mcp.client.streamable_http import streamablehttp_client + +from llama_stack.core.datatypes import AuthenticationRequiredError +from llama_stack.log import get_logger +from llama_stack.providers.utils.tools.ttl_dict import TTLDict from llama_stack_api import ( ImageContentItem, InterleavedContentItem, @@ -19,14 +27,6 @@ from llama_stack_api import ( ToolInvocationResult, _URLOrData, ) -from mcp import ClientSession, McpError -from mcp import types as mcp_types -from mcp.client.sse import sse_client -from mcp.client.streamable_http import streamablehttp_client - -from llama_stack.core.datatypes import AuthenticationRequiredError -from llama_stack.log import get_logger -from llama_stack.providers.utils.tools.ttl_dict import TTLDict logger = get_logger(__name__, category="tools") diff --git a/src/llama-stack-api/README.md b/src/llama_stack_api/README.md similarity index 98% rename from src/llama-stack-api/README.md rename to src/llama_stack_api/README.md index aa6b05722..9bf1d2726 100644 --- a/src/llama-stack-api/README.md +++ b/src/llama_stack_api/README.md @@ -53,7 +53,7 @@ This package follows semantic versioning independently from the main `llama-stac - **Minor versions** (0.x.0): New APIs, backward-compatible changes - **Major versions** (x.0.0): Breaking changes to existing APIs -Current version: **0.1.0** +Current version: **0.4.0.dev0** ## Usage Example diff --git a/src/llama-stack-api/llama_stack_api/__init__.py b/src/llama_stack_api/__init__.py similarity index 99% rename from src/llama-stack-api/llama_stack_api/__init__.py rename to src/llama_stack_api/__init__.py index 8bbe9f8bd..19b29301b 100644 --- a/src/llama-stack-api/llama_stack_api/__init__.py +++ b/src/llama_stack_api/__init__.py @@ -19,7 +19,7 @@ Sub-module imports (e.g., from llama_stack_api.agents import Agents) are NOT sup and considered a code smell. All exported symbols are explicitly listed in __all__. """ -__version__ = "0.4.0" +__version__ = "0.4.0.dev0" # Import submodules for those who need them from . import common, strong_typing # noqa: F401 diff --git a/src/llama-stack-api/llama_stack_api/agents.py b/src/llama_stack_api/agents.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/agents.py rename to src/llama_stack_api/agents.py diff --git a/src/llama-stack-api/llama_stack_api/batches.py b/src/llama_stack_api/batches.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/batches.py rename to src/llama_stack_api/batches.py diff --git a/src/llama-stack-api/llama_stack_api/benchmarks.py b/src/llama_stack_api/benchmarks.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/benchmarks.py rename to src/llama_stack_api/benchmarks.py diff --git a/src/llama-stack-api/llama_stack_api/common/__init__.py b/src/llama_stack_api/common/__init__.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/common/__init__.py rename to src/llama_stack_api/common/__init__.py diff --git a/src/llama-stack-api/llama_stack_api/common/content_types.py b/src/llama_stack_api/common/content_types.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/common/content_types.py rename to src/llama_stack_api/common/content_types.py diff --git a/src/llama-stack-api/llama_stack_api/common/errors.py b/src/llama_stack_api/common/errors.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/common/errors.py rename to src/llama_stack_api/common/errors.py diff --git a/src/llama-stack-api/llama_stack_api/common/job_types.py b/src/llama_stack_api/common/job_types.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/common/job_types.py rename to src/llama_stack_api/common/job_types.py diff --git a/src/llama-stack-api/llama_stack_api/common/responses.py b/src/llama_stack_api/common/responses.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/common/responses.py rename to src/llama_stack_api/common/responses.py diff --git a/src/llama-stack-api/llama_stack_api/common/tracing.py b/src/llama_stack_api/common/tracing.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/common/tracing.py rename to src/llama_stack_api/common/tracing.py diff --git a/src/llama-stack-api/llama_stack_api/common/training_types.py b/src/llama_stack_api/common/training_types.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/common/training_types.py rename to src/llama_stack_api/common/training_types.py diff --git a/src/llama-stack-api/llama_stack_api/common/type_system.py b/src/llama_stack_api/common/type_system.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/common/type_system.py rename to src/llama_stack_api/common/type_system.py diff --git a/src/llama-stack-api/llama_stack_api/conversations.py b/src/llama_stack_api/conversations.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/conversations.py rename to src/llama_stack_api/conversations.py diff --git a/src/llama-stack-api/llama_stack_api/datasetio.py b/src/llama_stack_api/datasetio.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/datasetio.py rename to src/llama_stack_api/datasetio.py diff --git a/src/llama-stack-api/llama_stack_api/datasets.py b/src/llama_stack_api/datasets.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/datasets.py rename to src/llama_stack_api/datasets.py diff --git a/src/llama-stack-api/llama_stack_api/datatypes.py b/src/llama_stack_api/datatypes.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/datatypes.py rename to src/llama_stack_api/datatypes.py diff --git a/src/llama-stack-api/llama_stack_api/eval.py b/src/llama_stack_api/eval.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/eval.py rename to src/llama_stack_api/eval.py diff --git a/src/llama-stack-api/llama_stack_api/files.py b/src/llama_stack_api/files.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/files.py rename to src/llama_stack_api/files.py diff --git a/src/llama-stack-api/llama_stack_api/inference.py b/src/llama_stack_api/inference.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/inference.py rename to src/llama_stack_api/inference.py diff --git a/src/llama-stack-api/llama_stack_api/inspect.py b/src/llama_stack_api/inspect.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/inspect.py rename to src/llama_stack_api/inspect.py diff --git a/src/llama-stack-api/llama_stack_api/models.py b/src/llama_stack_api/models.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/models.py rename to src/llama_stack_api/models.py diff --git a/src/llama-stack-api/llama_stack_api/openai_responses.py b/src/llama_stack_api/openai_responses.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/openai_responses.py rename to src/llama_stack_api/openai_responses.py diff --git a/src/llama-stack-api/llama_stack_api/post_training.py b/src/llama_stack_api/post_training.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/post_training.py rename to src/llama_stack_api/post_training.py diff --git a/src/llama-stack-api/llama_stack_api/prompts.py b/src/llama_stack_api/prompts.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/prompts.py rename to src/llama_stack_api/prompts.py diff --git a/src/llama-stack-api/llama_stack_api/providers.py b/src/llama_stack_api/providers.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/providers.py rename to src/llama_stack_api/providers.py diff --git a/src/llama-stack-api/llama_stack_api/py.typed b/src/llama_stack_api/py.typed similarity index 100% rename from src/llama-stack-api/llama_stack_api/py.typed rename to src/llama_stack_api/py.typed diff --git a/src/llama-stack-api/pyproject.toml b/src/llama_stack_api/pyproject.toml similarity index 99% rename from src/llama-stack-api/pyproject.toml rename to src/llama_stack_api/pyproject.toml index a00472d36..0ceb2bb4e 100644 --- a/src/llama-stack-api/pyproject.toml +++ b/src/llama_stack_api/pyproject.toml @@ -7,7 +7,7 @@ required-version = ">=0.7.0" [project] name = "llama-stack-api" -version = "0.1.0" +version = "0.4.0.dev0" authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] description = "API and Provider specifications for Llama Stack - lightweight package with protocol definitions and provider specs" readme = "README.md" diff --git a/src/llama-stack-api/llama_stack_api/rag_tool.py b/src/llama_stack_api/rag_tool.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/rag_tool.py rename to src/llama_stack_api/rag_tool.py diff --git a/src/llama-stack-api/llama_stack_api/resource.py b/src/llama_stack_api/resource.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/resource.py rename to src/llama_stack_api/resource.py diff --git a/src/llama-stack-api/llama_stack_api/safety.py b/src/llama_stack_api/safety.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/safety.py rename to src/llama_stack_api/safety.py diff --git a/src/llama-stack-api/llama_stack_api/schema_utils.py b/src/llama_stack_api/schema_utils.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/schema_utils.py rename to src/llama_stack_api/schema_utils.py diff --git a/src/llama-stack-api/llama_stack_api/scoring.py b/src/llama_stack_api/scoring.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/scoring.py rename to src/llama_stack_api/scoring.py diff --git a/src/llama-stack-api/llama_stack_api/scoring_functions.py b/src/llama_stack_api/scoring_functions.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/scoring_functions.py rename to src/llama_stack_api/scoring_functions.py diff --git a/src/llama-stack-api/llama_stack_api/shields.py b/src/llama_stack_api/shields.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/shields.py rename to src/llama_stack_api/shields.py diff --git a/src/llama-stack-api/llama_stack_api/strong_typing/__init__.py b/src/llama_stack_api/strong_typing/__init__.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/strong_typing/__init__.py rename to src/llama_stack_api/strong_typing/__init__.py diff --git a/src/llama-stack-api/llama_stack_api/strong_typing/auxiliary.py b/src/llama_stack_api/strong_typing/auxiliary.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/strong_typing/auxiliary.py rename to src/llama_stack_api/strong_typing/auxiliary.py diff --git a/src/llama-stack-api/llama_stack_api/strong_typing/classdef.py b/src/llama_stack_api/strong_typing/classdef.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/strong_typing/classdef.py rename to src/llama_stack_api/strong_typing/classdef.py diff --git a/src/llama-stack-api/llama_stack_api/strong_typing/core.py b/src/llama_stack_api/strong_typing/core.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/strong_typing/core.py rename to src/llama_stack_api/strong_typing/core.py diff --git a/src/llama-stack-api/llama_stack_api/strong_typing/deserializer.py b/src/llama_stack_api/strong_typing/deserializer.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/strong_typing/deserializer.py rename to src/llama_stack_api/strong_typing/deserializer.py diff --git a/src/llama-stack-api/llama_stack_api/strong_typing/docstring.py b/src/llama_stack_api/strong_typing/docstring.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/strong_typing/docstring.py rename to src/llama_stack_api/strong_typing/docstring.py diff --git a/src/llama-stack-api/llama_stack_api/strong_typing/exception.py b/src/llama_stack_api/strong_typing/exception.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/strong_typing/exception.py rename to src/llama_stack_api/strong_typing/exception.py diff --git a/src/llama-stack-api/llama_stack_api/strong_typing/inspection.py b/src/llama_stack_api/strong_typing/inspection.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/strong_typing/inspection.py rename to src/llama_stack_api/strong_typing/inspection.py diff --git a/src/llama-stack-api/llama_stack_api/strong_typing/mapping.py b/src/llama_stack_api/strong_typing/mapping.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/strong_typing/mapping.py rename to src/llama_stack_api/strong_typing/mapping.py diff --git a/src/llama-stack-api/llama_stack_api/strong_typing/name.py b/src/llama_stack_api/strong_typing/name.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/strong_typing/name.py rename to src/llama_stack_api/strong_typing/name.py diff --git a/src/llama-stack-api/llama_stack_api/strong_typing/py.typed b/src/llama_stack_api/strong_typing/py.typed similarity index 100% rename from src/llama-stack-api/llama_stack_api/strong_typing/py.typed rename to src/llama_stack_api/strong_typing/py.typed diff --git a/src/llama-stack-api/llama_stack_api/strong_typing/schema.py b/src/llama_stack_api/strong_typing/schema.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/strong_typing/schema.py rename to src/llama_stack_api/strong_typing/schema.py diff --git a/src/llama-stack-api/llama_stack_api/strong_typing/serialization.py b/src/llama_stack_api/strong_typing/serialization.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/strong_typing/serialization.py rename to src/llama_stack_api/strong_typing/serialization.py diff --git a/src/llama-stack-api/llama_stack_api/strong_typing/serializer.py b/src/llama_stack_api/strong_typing/serializer.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/strong_typing/serializer.py rename to src/llama_stack_api/strong_typing/serializer.py diff --git a/src/llama-stack-api/llama_stack_api/strong_typing/slots.py b/src/llama_stack_api/strong_typing/slots.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/strong_typing/slots.py rename to src/llama_stack_api/strong_typing/slots.py diff --git a/src/llama-stack-api/llama_stack_api/strong_typing/topological.py b/src/llama_stack_api/strong_typing/topological.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/strong_typing/topological.py rename to src/llama_stack_api/strong_typing/topological.py diff --git a/src/llama-stack-api/llama_stack_api/tools.py b/src/llama_stack_api/tools.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/tools.py rename to src/llama_stack_api/tools.py diff --git a/src/llama_stack_api/uv.lock b/src/llama_stack_api/uv.lock new file mode 100644 index 000000000..d61eb9be7 --- /dev/null +++ b/src/llama_stack_api/uv.lock @@ -0,0 +1,498 @@ +version = 1 +revision = 3 +requires-python = ">=3.12" + +[[package]] +name = "annotated-types" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081, upload-time = "2024-05-20T21:33:25.928Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643, upload-time = "2024-05-20T21:33:24.1Z" }, +] + +[[package]] +name = "attrs" +version = "25.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6b/5c/685e6633917e101e5dcb62b9dd76946cbb57c26e133bae9e0cd36033c0a9/attrs-25.4.0.tar.gz", hash = "sha256:16d5969b87f0859ef33a48b35d55ac1be6e42ae49d5e853b597db70c35c57e11", size = 934251, upload-time = "2025-10-06T13:54:44.725Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3a/2a/7cc015f5b9f5db42b7d48157e23356022889fc354a2813c15934b7cb5c0e/attrs-25.4.0-py3-none-any.whl", hash = "sha256:adcf7e2a1fb3b36ac48d97835bb6d8ade15b8dcce26aba8bf1d14847b57a3373", size = 67615, upload-time = "2025-10-06T13:54:43.17Z" }, +] + +[[package]] +name = "certifi" +version = "2025.11.12" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/8c/58f469717fa48465e4a50c014a0400602d3c437d7c0c468e17ada824da3a/certifi-2025.11.12.tar.gz", hash = "sha256:d8ab5478f2ecd78af242878415affce761ca6bc54a22a27e026d7c25357c3316", size = 160538, upload-time = "2025-11-12T02:54:51.517Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/70/7d/9bc192684cea499815ff478dfcdc13835ddf401365057044fb721ec6bddb/certifi-2025.11.12-py3-none-any.whl", hash = "sha256:97de8790030bbd5c2d96b7ec782fc2f7820ef8dba6db909ccf95449f2d062d4b", size = 159438, upload-time = "2025-11-12T02:54:49.735Z" }, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/13/69/33ddede1939fdd074bce5434295f38fae7136463422fe4fd3e0e89b98062/charset_normalizer-3.4.4.tar.gz", hash = "sha256:94537985111c35f28720e43603b8e7b43a6ecfb2ce1d3058bbe955b73404e21a", size = 129418, upload-time = "2025-10-14T04:42:32.879Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f3/85/1637cd4af66fa687396e757dec650f28025f2a2f5a5531a3208dc0ec43f2/charset_normalizer-3.4.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0a98e6759f854bd25a58a73fa88833fba3b7c491169f86ce1180c948ab3fd394", size = 208425, upload-time = "2025-10-14T04:40:53.353Z" }, + { url = "https://files.pythonhosted.org/packages/9d/6a/04130023fef2a0d9c62d0bae2649b69f7b7d8d24ea5536feef50551029df/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b5b290ccc2a263e8d185130284f8501e3e36c5e02750fc6b6bdeb2e9e96f1e25", size = 148162, upload-time = "2025-10-14T04:40:54.558Z" }, + { url = "https://files.pythonhosted.org/packages/78/29/62328d79aa60da22c9e0b9a66539feae06ca0f5a4171ac4f7dc285b83688/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74bb723680f9f7a6234dcf67aea57e708ec1fbdf5699fb91dfd6f511b0a320ef", size = 144558, upload-time = "2025-10-14T04:40:55.677Z" }, + { url = "https://files.pythonhosted.org/packages/86/bb/b32194a4bf15b88403537c2e120b817c61cd4ecffa9b6876e941c3ee38fe/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f1e34719c6ed0b92f418c7c780480b26b5d9c50349e9a9af7d76bf757530350d", size = 161497, upload-time = "2025-10-14T04:40:57.217Z" }, + { url = "https://files.pythonhosted.org/packages/19/89/a54c82b253d5b9b111dc74aca196ba5ccfcca8242d0fb64146d4d3183ff1/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2437418e20515acec67d86e12bf70056a33abdacb5cb1655042f6538d6b085a8", size = 159240, upload-time = "2025-10-14T04:40:58.358Z" }, + { url = "https://files.pythonhosted.org/packages/c0/10/d20b513afe03acc89ec33948320a5544d31f21b05368436d580dec4e234d/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:11d694519d7f29d6cd09f6ac70028dba10f92f6cdd059096db198c283794ac86", size = 153471, upload-time = "2025-10-14T04:40:59.468Z" }, + { url = "https://files.pythonhosted.org/packages/61/fa/fbf177b55bdd727010f9c0a3c49eefa1d10f960e5f09d1d887bf93c2e698/charset_normalizer-3.4.4-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ac1c4a689edcc530fc9d9aa11f5774b9e2f33f9a0c6a57864e90908f5208d30a", size = 150864, upload-time = "2025-10-14T04:41:00.623Z" }, + { url = "https://files.pythonhosted.org/packages/05/12/9fbc6a4d39c0198adeebbde20b619790e9236557ca59fc40e0e3cebe6f40/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:21d142cc6c0ec30d2efee5068ca36c128a30b0f2c53c1c07bd78cb6bc1d3be5f", size = 150647, upload-time = "2025-10-14T04:41:01.754Z" }, + { url = "https://files.pythonhosted.org/packages/ad/1f/6a9a593d52e3e8c5d2b167daf8c6b968808efb57ef4c210acb907c365bc4/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:5dbe56a36425d26d6cfb40ce79c314a2e4dd6211d51d6d2191c00bed34f354cc", size = 145110, upload-time = "2025-10-14T04:41:03.231Z" }, + { url = "https://files.pythonhosted.org/packages/30/42/9a52c609e72471b0fc54386dc63c3781a387bb4fe61c20231a4ebcd58bdd/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5bfbb1b9acf3334612667b61bd3002196fe2a1eb4dd74d247e0f2a4d50ec9bbf", size = 162839, upload-time = "2025-10-14T04:41:04.715Z" }, + { url = "https://files.pythonhosted.org/packages/c4/5b/c0682bbf9f11597073052628ddd38344a3d673fda35a36773f7d19344b23/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:d055ec1e26e441f6187acf818b73564e6e6282709e9bcb5b63f5b23068356a15", size = 150667, upload-time = "2025-10-14T04:41:05.827Z" }, + { url = "https://files.pythonhosted.org/packages/e4/24/a41afeab6f990cf2daf6cb8c67419b63b48cf518e4f56022230840c9bfb2/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:af2d8c67d8e573d6de5bc30cdb27e9b95e49115cd9baad5ddbd1a6207aaa82a9", size = 160535, upload-time = "2025-10-14T04:41:06.938Z" }, + { url = "https://files.pythonhosted.org/packages/2a/e5/6a4ce77ed243c4a50a1fecca6aaaab419628c818a49434be428fe24c9957/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:780236ac706e66881f3b7f2f32dfe90507a09e67d1d454c762cf642e6e1586e0", size = 154816, upload-time = "2025-10-14T04:41:08.101Z" }, + { url = "https://files.pythonhosted.org/packages/a8/ef/89297262b8092b312d29cdb2517cb1237e51db8ecef2e9af5edbe7b683b1/charset_normalizer-3.4.4-cp312-cp312-win32.whl", hash = "sha256:5833d2c39d8896e4e19b689ffc198f08ea58116bee26dea51e362ecc7cd3ed26", size = 99694, upload-time = "2025-10-14T04:41:09.23Z" }, + { url = "https://files.pythonhosted.org/packages/3d/2d/1e5ed9dd3b3803994c155cd9aacb60c82c331bad84daf75bcb9c91b3295e/charset_normalizer-3.4.4-cp312-cp312-win_amd64.whl", hash = "sha256:a79cfe37875f822425b89a82333404539ae63dbdddf97f84dcbc3d339aae9525", size = 107131, upload-time = "2025-10-14T04:41:10.467Z" }, + { url = "https://files.pythonhosted.org/packages/d0/d9/0ed4c7098a861482a7b6a95603edce4c0d9db2311af23da1fb2b75ec26fc/charset_normalizer-3.4.4-cp312-cp312-win_arm64.whl", hash = "sha256:376bec83a63b8021bb5c8ea75e21c4ccb86e7e45ca4eb81146091b56599b80c3", size = 100390, upload-time = "2025-10-14T04:41:11.915Z" }, + { url = "https://files.pythonhosted.org/packages/97/45/4b3a1239bbacd321068ea6e7ac28875b03ab8bc0aa0966452db17cd36714/charset_normalizer-3.4.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e1f185f86a6f3403aa2420e815904c67b2f9ebc443f045edd0de921108345794", size = 208091, upload-time = "2025-10-14T04:41:13.346Z" }, + { url = "https://files.pythonhosted.org/packages/7d/62/73a6d7450829655a35bb88a88fca7d736f9882a27eacdca2c6d505b57e2e/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b39f987ae8ccdf0d2642338faf2abb1862340facc796048b604ef14919e55ed", size = 147936, upload-time = "2025-10-14T04:41:14.461Z" }, + { url = "https://files.pythonhosted.org/packages/89/c5/adb8c8b3d6625bef6d88b251bbb0d95f8205831b987631ab0c8bb5d937c2/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3162d5d8ce1bb98dd51af660f2121c55d0fa541b46dff7bb9b9f86ea1d87de72", size = 144180, upload-time = "2025-10-14T04:41:15.588Z" }, + { url = "https://files.pythonhosted.org/packages/91/ed/9706e4070682d1cc219050b6048bfd293ccf67b3d4f5a4f39207453d4b99/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:81d5eb2a312700f4ecaa977a8235b634ce853200e828fbadf3a9c50bab278328", size = 161346, upload-time = "2025-10-14T04:41:16.738Z" }, + { url = "https://files.pythonhosted.org/packages/d5/0d/031f0d95e4972901a2f6f09ef055751805ff541511dc1252ba3ca1f80cf5/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5bd2293095d766545ec1a8f612559f6b40abc0eb18bb2f5d1171872d34036ede", size = 158874, upload-time = "2025-10-14T04:41:17.923Z" }, + { url = "https://files.pythonhosted.org/packages/f5/83/6ab5883f57c9c801ce5e5677242328aa45592be8a00644310a008d04f922/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a8a8b89589086a25749f471e6a900d3f662d1d3b6e2e59dcecf787b1cc3a1894", size = 153076, upload-time = "2025-10-14T04:41:19.106Z" }, + { url = "https://files.pythonhosted.org/packages/75/1e/5ff781ddf5260e387d6419959ee89ef13878229732732ee73cdae01800f2/charset_normalizer-3.4.4-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bc7637e2f80d8530ee4a78e878bce464f70087ce73cf7c1caf142416923b98f1", size = 150601, upload-time = "2025-10-14T04:41:20.245Z" }, + { url = "https://files.pythonhosted.org/packages/d7/57/71be810965493d3510a6ca79b90c19e48696fb1ff964da319334b12677f0/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f8bf04158c6b607d747e93949aa60618b61312fe647a6369f88ce2ff16043490", size = 150376, upload-time = "2025-10-14T04:41:21.398Z" }, + { url = "https://files.pythonhosted.org/packages/e5/d5/c3d057a78c181d007014feb7e9f2e65905a6c4ef182c0ddf0de2924edd65/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:554af85e960429cf30784dd47447d5125aaa3b99a6f0683589dbd27e2f45da44", size = 144825, upload-time = "2025-10-14T04:41:22.583Z" }, + { url = "https://files.pythonhosted.org/packages/e6/8c/d0406294828d4976f275ffbe66f00266c4b3136b7506941d87c00cab5272/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:74018750915ee7ad843a774364e13a3db91682f26142baddf775342c3f5b1133", size = 162583, upload-time = "2025-10-14T04:41:23.754Z" }, + { url = "https://files.pythonhosted.org/packages/d7/24/e2aa1f18c8f15c4c0e932d9287b8609dd30ad56dbe41d926bd846e22fb8d/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:c0463276121fdee9c49b98908b3a89c39be45d86d1dbaa22957e38f6321d4ce3", size = 150366, upload-time = "2025-10-14T04:41:25.27Z" }, + { url = "https://files.pythonhosted.org/packages/e4/5b/1e6160c7739aad1e2df054300cc618b06bf784a7a164b0f238360721ab86/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:362d61fd13843997c1c446760ef36f240cf81d3ebf74ac62652aebaf7838561e", size = 160300, upload-time = "2025-10-14T04:41:26.725Z" }, + { url = "https://files.pythonhosted.org/packages/7a/10/f882167cd207fbdd743e55534d5d9620e095089d176d55cb22d5322f2afd/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9a26f18905b8dd5d685d6d07b0cdf98a79f3c7a918906af7cc143ea2e164c8bc", size = 154465, upload-time = "2025-10-14T04:41:28.322Z" }, + { url = "https://files.pythonhosted.org/packages/89/66/c7a9e1b7429be72123441bfdbaf2bc13faab3f90b933f664db506dea5915/charset_normalizer-3.4.4-cp313-cp313-win32.whl", hash = "sha256:9b35f4c90079ff2e2edc5b26c0c77925e5d2d255c42c74fdb70fb49b172726ac", size = 99404, upload-time = "2025-10-14T04:41:29.95Z" }, + { url = "https://files.pythonhosted.org/packages/c4/26/b9924fa27db384bdcd97ab83b4f0a8058d96ad9626ead570674d5e737d90/charset_normalizer-3.4.4-cp313-cp313-win_amd64.whl", hash = "sha256:b435cba5f4f750aa6c0a0d92c541fb79f69a387c91e61f1795227e4ed9cece14", size = 107092, upload-time = "2025-10-14T04:41:31.188Z" }, + { url = "https://files.pythonhosted.org/packages/af/8f/3ed4bfa0c0c72a7ca17f0380cd9e4dd842b09f664e780c13cff1dcf2ef1b/charset_normalizer-3.4.4-cp313-cp313-win_arm64.whl", hash = "sha256:542d2cee80be6f80247095cc36c418f7bddd14f4a6de45af91dfad36d817bba2", size = 100408, upload-time = "2025-10-14T04:41:32.624Z" }, + { url = "https://files.pythonhosted.org/packages/2a/35/7051599bd493e62411d6ede36fd5af83a38f37c4767b92884df7301db25d/charset_normalizer-3.4.4-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:da3326d9e65ef63a817ecbcc0df6e94463713b754fe293eaa03da99befb9a5bd", size = 207746, upload-time = "2025-10-14T04:41:33.773Z" }, + { url = "https://files.pythonhosted.org/packages/10/9a/97c8d48ef10d6cd4fcead2415523221624bf58bcf68a802721a6bc807c8f/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8af65f14dc14a79b924524b1e7fffe304517b2bff5a58bf64f30b98bbc5079eb", size = 147889, upload-time = "2025-10-14T04:41:34.897Z" }, + { url = "https://files.pythonhosted.org/packages/10/bf/979224a919a1b606c82bd2c5fa49b5c6d5727aa47b4312bb27b1734f53cd/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74664978bb272435107de04e36db5a9735e78232b85b77d45cfb38f758efd33e", size = 143641, upload-time = "2025-10-14T04:41:36.116Z" }, + { url = "https://files.pythonhosted.org/packages/ba/33/0ad65587441fc730dc7bd90e9716b30b4702dc7b617e6ba4997dc8651495/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:752944c7ffbfdd10c074dc58ec2d5a8a4cd9493b314d367c14d24c17684ddd14", size = 160779, upload-time = "2025-10-14T04:41:37.229Z" }, + { url = "https://files.pythonhosted.org/packages/67/ed/331d6b249259ee71ddea93f6f2f0a56cfebd46938bde6fcc6f7b9a3d0e09/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d1f13550535ad8cff21b8d757a3257963e951d96e20ec82ab44bc64aeb62a191", size = 159035, upload-time = "2025-10-14T04:41:38.368Z" }, + { url = "https://files.pythonhosted.org/packages/67/ff/f6b948ca32e4f2a4576aa129d8bed61f2e0543bf9f5f2b7fc3758ed005c9/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ecaae4149d99b1c9e7b88bb03e3221956f68fd6d50be2ef061b2381b61d20838", size = 152542, upload-time = "2025-10-14T04:41:39.862Z" }, + { url = "https://files.pythonhosted.org/packages/16/85/276033dcbcc369eb176594de22728541a925b2632f9716428c851b149e83/charset_normalizer-3.4.4-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:cb6254dc36b47a990e59e1068afacdcd02958bdcce30bb50cc1700a8b9d624a6", size = 149524, upload-time = "2025-10-14T04:41:41.319Z" }, + { url = "https://files.pythonhosted.org/packages/9e/f2/6a2a1f722b6aba37050e626530a46a68f74e63683947a8acff92569f979a/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c8ae8a0f02f57a6e61203a31428fa1d677cbe50c93622b4149d5c0f319c1d19e", size = 150395, upload-time = "2025-10-14T04:41:42.539Z" }, + { url = "https://files.pythonhosted.org/packages/60/bb/2186cb2f2bbaea6338cad15ce23a67f9b0672929744381e28b0592676824/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:47cc91b2f4dd2833fddaedd2893006b0106129d4b94fdb6af1f4ce5a9965577c", size = 143680, upload-time = "2025-10-14T04:41:43.661Z" }, + { url = "https://files.pythonhosted.org/packages/7d/a5/bf6f13b772fbb2a90360eb620d52ed8f796f3c5caee8398c3b2eb7b1c60d/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:82004af6c302b5d3ab2cfc4cc5f29db16123b1a8417f2e25f9066f91d4411090", size = 162045, upload-time = "2025-10-14T04:41:44.821Z" }, + { url = "https://files.pythonhosted.org/packages/df/c5/d1be898bf0dc3ef9030c3825e5d3b83f2c528d207d246cbabe245966808d/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:2b7d8f6c26245217bd2ad053761201e9f9680f8ce52f0fcd8d0755aeae5b2152", size = 149687, upload-time = "2025-10-14T04:41:46.442Z" }, + { url = "https://files.pythonhosted.org/packages/a5/42/90c1f7b9341eef50c8a1cb3f098ac43b0508413f33affd762855f67a410e/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:799a7a5e4fb2d5898c60b640fd4981d6a25f1c11790935a44ce38c54e985f828", size = 160014, upload-time = "2025-10-14T04:41:47.631Z" }, + { url = "https://files.pythonhosted.org/packages/76/be/4d3ee471e8145d12795ab655ece37baed0929462a86e72372fd25859047c/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:99ae2cffebb06e6c22bdc25801d7b30f503cc87dbd283479e7b606f70aff57ec", size = 154044, upload-time = "2025-10-14T04:41:48.81Z" }, + { url = "https://files.pythonhosted.org/packages/b0/6f/8f7af07237c34a1defe7defc565a9bc1807762f672c0fde711a4b22bf9c0/charset_normalizer-3.4.4-cp314-cp314-win32.whl", hash = "sha256:f9d332f8c2a2fcbffe1378594431458ddbef721c1769d78e2cbc06280d8155f9", size = 99940, upload-time = "2025-10-14T04:41:49.946Z" }, + { url = "https://files.pythonhosted.org/packages/4b/51/8ade005e5ca5b0d80fb4aff72a3775b325bdc3d27408c8113811a7cbe640/charset_normalizer-3.4.4-cp314-cp314-win_amd64.whl", hash = "sha256:8a6562c3700cce886c5be75ade4a5db4214fda19fede41d9792d100288d8f94c", size = 107104, upload-time = "2025-10-14T04:41:51.051Z" }, + { url = "https://files.pythonhosted.org/packages/da/5f/6b8f83a55bb8278772c5ae54a577f3099025f9ade59d0136ac24a0df4bde/charset_normalizer-3.4.4-cp314-cp314-win_arm64.whl", hash = "sha256:de00632ca48df9daf77a2c65a484531649261ec9f25489917f09e455cb09ddb2", size = 100743, upload-time = "2025-10-14T04:41:52.122Z" }, + { url = "https://files.pythonhosted.org/packages/0a/4c/925909008ed5a988ccbb72dcc897407e5d6d3bd72410d69e051fc0c14647/charset_normalizer-3.4.4-py3-none-any.whl", hash = "sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f", size = 53402, upload-time = "2025-10-14T04:42:31.76Z" }, +] + +[[package]] +name = "googleapis-common-protos" +version = "1.72.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "protobuf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e5/7b/adfd75544c415c487b33061fe7ae526165241c1ea133f9a9125a56b39fd8/googleapis_common_protos-1.72.0.tar.gz", hash = "sha256:e55a601c1b32b52d7a3e65f43563e2aa61bcd737998ee672ac9b951cd49319f5", size = 147433, upload-time = "2025-11-06T18:29:24.087Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c4/ab/09169d5a4612a5f92490806649ac8d41e3ec9129c636754575b3553f4ea4/googleapis_common_protos-1.72.0-py3-none-any.whl", hash = "sha256:4299c5a82d5ae1a9702ada957347726b167f9f8d1fc352477702a1e851ff4038", size = 297515, upload-time = "2025-11-06T18:29:13.14Z" }, +] + +[[package]] +name = "idna" +version = "3.11" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6f/6d/0703ccc57f3a7233505399edb88de3cbd678da106337b9fcde432b65ed60/idna-3.11.tar.gz", hash = "sha256:795dafcc9c04ed0c1fb032c2aa73654d8e8c5023a7df64a53f39190ada629902", size = 194582, upload-time = "2025-10-12T14:55:20.501Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0e/61/66938bbb5fc52dbdf84594873d5b51fb1f7c7794e9c0f5bd885f30bc507b/idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea", size = 71008, upload-time = "2025-10-12T14:55:18.883Z" }, +] + +[[package]] +name = "importlib-metadata" +version = "8.7.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "zipp" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/76/66/650a33bd90f786193e4de4b3ad86ea60b53c89b669a5c7be931fac31cdb0/importlib_metadata-8.7.0.tar.gz", hash = "sha256:d13b81ad223b890aa16c5471f2ac3056cf76c5f10f82d6f9292f0b415f389000", size = 56641, upload-time = "2025-04-27T15:29:01.736Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/b0/36bd937216ec521246249be3bf9855081de4c5e06a0c9b4219dbeda50373/importlib_metadata-8.7.0-py3-none-any.whl", hash = "sha256:e5dd1551894c77868a30651cef00984d50e1002d06942a7101d34870c5f02afd", size = 27656, upload-time = "2025-04-27T15:29:00.214Z" }, +] + +[[package]] +name = "jsonschema" +version = "4.25.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs" }, + { name = "jsonschema-specifications" }, + { name = "referencing" }, + { name = "rpds-py" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/74/69/f7185de793a29082a9f3c7728268ffb31cb5095131a9c139a74078e27336/jsonschema-4.25.1.tar.gz", hash = "sha256:e4a9655ce0da0c0b67a085847e00a3a51449e1157f4f75e9fb5aa545e122eb85", size = 357342, upload-time = "2025-08-18T17:03:50.038Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bf/9c/8c95d856233c1f82500c2450b8c68576b4cf1c871db3afac5c34ff84e6fd/jsonschema-4.25.1-py3-none-any.whl", hash = "sha256:3fba0169e345c7175110351d456342c364814cfcf3b964ba4587f22915230a63", size = 90040, upload-time = "2025-08-18T17:03:48.373Z" }, +] + +[[package]] +name = "jsonschema-specifications" +version = "2025.9.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "referencing" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/19/74/a633ee74eb36c44aa6d1095e7cc5569bebf04342ee146178e2d36600708b/jsonschema_specifications-2025.9.1.tar.gz", hash = "sha256:b540987f239e745613c7a9176f3edb72b832a4ac465cf02712288397832b5e8d", size = 32855, upload-time = "2025-09-08T01:34:59.186Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/41/45/1a4ed80516f02155c51f51e8cedb3c1902296743db0bbc66608a0db2814f/jsonschema_specifications-2025.9.1-py3-none-any.whl", hash = "sha256:98802fee3a11ee76ecaca44429fda8a41bff98b00a0f2838151b113f210cc6fe", size = 18437, upload-time = "2025-09-08T01:34:57.871Z" }, +] + +[[package]] +name = "llama-stack-api" +version = "0.4.0.dev0" +source = { editable = "." } +dependencies = [ + { name = "jsonschema" }, + { name = "opentelemetry-exporter-otlp-proto-http" }, + { name = "opentelemetry-sdk" }, + { name = "pydantic" }, +] + +[package.metadata] +requires-dist = [ + { name = "jsonschema" }, + { name = "opentelemetry-exporter-otlp-proto-http", specifier = ">=1.30.0" }, + { name = "opentelemetry-sdk", specifier = ">=1.30.0" }, + { name = "pydantic", specifier = ">=2.11.9" }, +] + +[[package]] +name = "opentelemetry-api" +version = "1.38.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "importlib-metadata" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/08/d8/0f354c375628e048bd0570645b310797299754730079853095bf000fba69/opentelemetry_api-1.38.0.tar.gz", hash = "sha256:f4c193b5e8acb0912b06ac5b16321908dd0843d75049c091487322284a3eea12", size = 65242, upload-time = "2025-10-16T08:35:50.25Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ae/a2/d86e01c28300bd41bab8f18afd613676e2bd63515417b77636fc1add426f/opentelemetry_api-1.38.0-py3-none-any.whl", hash = "sha256:2891b0197f47124454ab9f0cf58f3be33faca394457ac3e09daba13ff50aa582", size = 65947, upload-time = "2025-10-16T08:35:30.23Z" }, +] + +[[package]] +name = "opentelemetry-exporter-otlp-proto-common" +version = "1.38.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-proto" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/19/83/dd4660f2956ff88ed071e9e0e36e830df14b8c5dc06722dbde1841accbe8/opentelemetry_exporter_otlp_proto_common-1.38.0.tar.gz", hash = "sha256:e333278afab4695aa8114eeb7bf4e44e65c6607d54968271a249c180b2cb605c", size = 20431, upload-time = "2025-10-16T08:35:53.285Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a7/9e/55a41c9601191e8cd8eb626b54ee6827b9c9d4a46d736f32abc80d8039fc/opentelemetry_exporter_otlp_proto_common-1.38.0-py3-none-any.whl", hash = "sha256:03cb76ab213300fe4f4c62b7d8f17d97fcfd21b89f0b5ce38ea156327ddda74a", size = 18359, upload-time = "2025-10-16T08:35:34.099Z" }, +] + +[[package]] +name = "opentelemetry-exporter-otlp-proto-http" +version = "1.38.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "googleapis-common-protos" }, + { name = "opentelemetry-api" }, + { name = "opentelemetry-exporter-otlp-proto-common" }, + { name = "opentelemetry-proto" }, + { name = "opentelemetry-sdk" }, + { name = "requests" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/81/0a/debcdfb029fbd1ccd1563f7c287b89a6f7bef3b2902ade56797bfd020854/opentelemetry_exporter_otlp_proto_http-1.38.0.tar.gz", hash = "sha256:f16bd44baf15cbe07633c5112ffc68229d0edbeac7b37610be0b2def4e21e90b", size = 17282, upload-time = "2025-10-16T08:35:54.422Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e5/77/154004c99fb9f291f74aa0822a2f5bbf565a72d8126b3a1b63ed8e5f83c7/opentelemetry_exporter_otlp_proto_http-1.38.0-py3-none-any.whl", hash = "sha256:84b937305edfc563f08ec69b9cb2298be8188371217e867c1854d77198d0825b", size = 19579, upload-time = "2025-10-16T08:35:36.269Z" }, +] + +[[package]] +name = "opentelemetry-proto" +version = "1.38.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "protobuf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/51/14/f0c4f0f6371b9cb7f9fa9ee8918bfd59ac7040c7791f1e6da32a1839780d/opentelemetry_proto-1.38.0.tar.gz", hash = "sha256:88b161e89d9d372ce723da289b7da74c3a8354a8e5359992be813942969ed468", size = 46152, upload-time = "2025-10-16T08:36:01.612Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b6/6a/82b68b14efca5150b2632f3692d627afa76b77378c4999f2648979409528/opentelemetry_proto-1.38.0-py3-none-any.whl", hash = "sha256:b6ebe54d3217c42e45462e2a1ae28c3e2bf2ec5a5645236a490f55f45f1a0a18", size = 72535, upload-time = "2025-10-16T08:35:45.749Z" }, +] + +[[package]] +name = "opentelemetry-sdk" +version = "1.38.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/85/cb/f0eee1445161faf4c9af3ba7b848cc22a50a3d3e2515051ad8628c35ff80/opentelemetry_sdk-1.38.0.tar.gz", hash = "sha256:93df5d4d871ed09cb4272305be4d996236eedb232253e3ab864c8620f051cebe", size = 171942, upload-time = "2025-10-16T08:36:02.257Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2f/2e/e93777a95d7d9c40d270a371392b6d6f1ff170c2a3cb32d6176741b5b723/opentelemetry_sdk-1.38.0-py3-none-any.whl", hash = "sha256:1c66af6564ecc1553d72d811a01df063ff097cdc82ce188da9951f93b8d10f6b", size = 132349, upload-time = "2025-10-16T08:35:46.995Z" }, +] + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.59b0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/40/bc/8b9ad3802cd8ac6583a4eb7de7e5d7db004e89cb7efe7008f9c8a537ee75/opentelemetry_semantic_conventions-0.59b0.tar.gz", hash = "sha256:7a6db3f30d70202d5bf9fa4b69bc866ca6a30437287de6c510fb594878aed6b0", size = 129861, upload-time = "2025-10-16T08:36:03.346Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/24/7d/c88d7b15ba8fe5c6b8f93be50fc11795e9fc05386c44afaf6b76fe191f9b/opentelemetry_semantic_conventions-0.59b0-py3-none-any.whl", hash = "sha256:35d3b8833ef97d614136e253c1da9342b4c3c083bbaf29ce31d572a1c3825eed", size = 207954, upload-time = "2025-10-16T08:35:48.054Z" }, +] + +[[package]] +name = "protobuf" +version = "6.33.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0a/03/a1440979a3f74f16cab3b75b0da1a1a7f922d56a8ddea96092391998edc0/protobuf-6.33.1.tar.gz", hash = "sha256:97f65757e8d09870de6fd973aeddb92f85435607235d20b2dfed93405d00c85b", size = 443432, upload-time = "2025-11-13T16:44:18.895Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/06/f1/446a9bbd2c60772ca36556bac8bfde40eceb28d9cc7838755bc41e001d8f/protobuf-6.33.1-cp310-abi3-win32.whl", hash = "sha256:f8d3fdbc966aaab1d05046d0240dd94d40f2a8c62856d41eaa141ff64a79de6b", size = 425593, upload-time = "2025-11-13T16:44:06.275Z" }, + { url = "https://files.pythonhosted.org/packages/a6/79/8780a378c650e3df849b73de8b13cf5412f521ca2ff9b78a45c247029440/protobuf-6.33.1-cp310-abi3-win_amd64.whl", hash = "sha256:923aa6d27a92bf44394f6abf7ea0500f38769d4b07f4be41cb52bd8b1123b9ed", size = 436883, upload-time = "2025-11-13T16:44:09.222Z" }, + { url = "https://files.pythonhosted.org/packages/cd/93/26213ff72b103ae55bb0d73e7fb91ea570ef407c3ab4fd2f1f27cac16044/protobuf-6.33.1-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:fe34575f2bdde76ac429ec7b570235bf0c788883e70aee90068e9981806f2490", size = 427522, upload-time = "2025-11-13T16:44:10.475Z" }, + { url = "https://files.pythonhosted.org/packages/c2/32/df4a35247923393aa6b887c3b3244a8c941c32a25681775f96e2b418f90e/protobuf-6.33.1-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:f8adba2e44cde2d7618996b3fc02341f03f5bc3f2748be72dc7b063319276178", size = 324445, upload-time = "2025-11-13T16:44:11.869Z" }, + { url = "https://files.pythonhosted.org/packages/8e/d0/d796e419e2ec93d2f3fa44888861c3f88f722cde02b7c3488fcc6a166820/protobuf-6.33.1-cp39-abi3-manylinux2014_s390x.whl", hash = "sha256:0f4cf01222c0d959c2b399142deb526de420be8236f22c71356e2a544e153c53", size = 339161, upload-time = "2025-11-13T16:44:12.778Z" }, + { url = "https://files.pythonhosted.org/packages/1d/2a/3c5f05a4af06649547027d288747f68525755de692a26a7720dced3652c0/protobuf-6.33.1-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:8fd7d5e0eb08cd5b87fd3df49bc193f5cfd778701f47e11d127d0afc6c39f1d1", size = 323171, upload-time = "2025-11-13T16:44:14.035Z" }, + { url = "https://files.pythonhosted.org/packages/08/b4/46310463b4f6ceef310f8348786f3cff181cea671578e3d9743ba61a459e/protobuf-6.33.1-py3-none-any.whl", hash = "sha256:d595a9fd694fdeb061a62fbe10eb039cc1e444df81ec9bb70c7fc59ebcb1eafa", size = 170477, upload-time = "2025-11-13T16:44:17.633Z" }, +] + +[[package]] +name = "pydantic" +version = "2.12.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "annotated-types" }, + { name = "pydantic-core" }, + { name = "typing-extensions" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/96/ad/a17bc283d7d81837c061c49e3eaa27a45991759a1b7eae1031921c6bd924/pydantic-2.12.4.tar.gz", hash = "sha256:0f8cb9555000a4b5b617f66bfd2566264c4984b27589d3b845685983e8ea85ac", size = 821038, upload-time = "2025-11-05T10:50:08.59Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/82/2f/e68750da9b04856e2a7ec56fc6f034a5a79775e9b9a81882252789873798/pydantic-2.12.4-py3-none-any.whl", hash = "sha256:92d3d202a745d46f9be6df459ac5a064fdaa3c1c4cd8adcfa332ccf3c05f871e", size = 463400, upload-time = "2025-11-05T10:50:06.732Z" }, +] + +[[package]] +name = "pydantic-core" +version = "2.41.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/71/70/23b021c950c2addd24ec408e9ab05d59b035b39d97cdc1130e1bce647bb6/pydantic_core-2.41.5.tar.gz", hash = "sha256:08daa51ea16ad373ffd5e7606252cc32f07bc72b28284b6bc9c6df804816476e", size = 460952, upload-time = "2025-11-04T13:43:49.098Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5f/5d/5f6c63eebb5afee93bcaae4ce9a898f3373ca23df3ccaef086d0233a35a7/pydantic_core-2.41.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f41a7489d32336dbf2199c8c0a215390a751c5b014c2c1c5366e817202e9cdf7", size = 2110990, upload-time = "2025-11-04T13:39:58.079Z" }, + { url = "https://files.pythonhosted.org/packages/aa/32/9c2e8ccb57c01111e0fd091f236c7b371c1bccea0fa85247ac55b1e2b6b6/pydantic_core-2.41.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:070259a8818988b9a84a449a2a7337c7f430a22acc0859c6b110aa7212a6d9c0", size = 1896003, upload-time = "2025-11-04T13:39:59.956Z" }, + { url = "https://files.pythonhosted.org/packages/68/b8/a01b53cb0e59139fbc9e4fda3e9724ede8de279097179be4ff31f1abb65a/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e96cea19e34778f8d59fe40775a7a574d95816eb150850a85a7a4c8f4b94ac69", size = 1919200, upload-time = "2025-11-04T13:40:02.241Z" }, + { url = "https://files.pythonhosted.org/packages/38/de/8c36b5198a29bdaade07b5985e80a233a5ac27137846f3bc2d3b40a47360/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed2e99c456e3fadd05c991f8f437ef902e00eedf34320ba2b0842bd1c3ca3a75", size = 2052578, upload-time = "2025-11-04T13:40:04.401Z" }, + { url = "https://files.pythonhosted.org/packages/00/b5/0e8e4b5b081eac6cb3dbb7e60a65907549a1ce035a724368c330112adfdd/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65840751b72fbfd82c3c640cff9284545342a4f1eb1586ad0636955b261b0b05", size = 2208504, upload-time = "2025-11-04T13:40:06.072Z" }, + { url = "https://files.pythonhosted.org/packages/77/56/87a61aad59c7c5b9dc8caad5a41a5545cba3810c3e828708b3d7404f6cef/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e536c98a7626a98feb2d3eaf75944ef6f3dbee447e1f841eae16f2f0a72d8ddc", size = 2335816, upload-time = "2025-11-04T13:40:07.835Z" }, + { url = "https://files.pythonhosted.org/packages/0d/76/941cc9f73529988688a665a5c0ecff1112b3d95ab48f81db5f7606f522d3/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eceb81a8d74f9267ef4081e246ffd6d129da5d87e37a77c9bde550cb04870c1c", size = 2075366, upload-time = "2025-11-04T13:40:09.804Z" }, + { url = "https://files.pythonhosted.org/packages/d3/43/ebef01f69baa07a482844faaa0a591bad1ef129253ffd0cdaa9d8a7f72d3/pydantic_core-2.41.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d38548150c39b74aeeb0ce8ee1d8e82696f4a4e16ddc6de7b1d8823f7de4b9b5", size = 2171698, upload-time = "2025-11-04T13:40:12.004Z" }, + { url = "https://files.pythonhosted.org/packages/b1/87/41f3202e4193e3bacfc2c065fab7706ebe81af46a83d3e27605029c1f5a6/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c23e27686783f60290e36827f9c626e63154b82b116d7fe9adba1fda36da706c", size = 2132603, upload-time = "2025-11-04T13:40:13.868Z" }, + { url = "https://files.pythonhosted.org/packages/49/7d/4c00df99cb12070b6bccdef4a195255e6020a550d572768d92cc54dba91a/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:482c982f814460eabe1d3bb0adfdc583387bd4691ef00b90575ca0d2b6fe2294", size = 2329591, upload-time = "2025-11-04T13:40:15.672Z" }, + { url = "https://files.pythonhosted.org/packages/cc/6a/ebf4b1d65d458f3cda6a7335d141305dfa19bdc61140a884d165a8a1bbc7/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:bfea2a5f0b4d8d43adf9d7b8bf019fb46fdd10a2e5cde477fbcb9d1fa08c68e1", size = 2319068, upload-time = "2025-11-04T13:40:17.532Z" }, + { url = "https://files.pythonhosted.org/packages/49/3b/774f2b5cd4192d5ab75870ce4381fd89cf218af999515baf07e7206753f0/pydantic_core-2.41.5-cp312-cp312-win32.whl", hash = "sha256:b74557b16e390ec12dca509bce9264c3bbd128f8a2c376eaa68003d7f327276d", size = 1985908, upload-time = "2025-11-04T13:40:19.309Z" }, + { url = "https://files.pythonhosted.org/packages/86/45/00173a033c801cacf67c190fef088789394feaf88a98a7035b0e40d53dc9/pydantic_core-2.41.5-cp312-cp312-win_amd64.whl", hash = "sha256:1962293292865bca8e54702b08a4f26da73adc83dd1fcf26fbc875b35d81c815", size = 2020145, upload-time = "2025-11-04T13:40:21.548Z" }, + { url = "https://files.pythonhosted.org/packages/f9/22/91fbc821fa6d261b376a3f73809f907cec5ca6025642c463d3488aad22fb/pydantic_core-2.41.5-cp312-cp312-win_arm64.whl", hash = "sha256:1746d4a3d9a794cacae06a5eaaccb4b8643a131d45fbc9af23e353dc0a5ba5c3", size = 1976179, upload-time = "2025-11-04T13:40:23.393Z" }, + { url = "https://files.pythonhosted.org/packages/87/06/8806241ff1f70d9939f9af039c6c35f2360cf16e93c2ca76f184e76b1564/pydantic_core-2.41.5-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:941103c9be18ac8daf7b7adca8228f8ed6bb7a1849020f643b3a14d15b1924d9", size = 2120403, upload-time = "2025-11-04T13:40:25.248Z" }, + { url = "https://files.pythonhosted.org/packages/94/02/abfa0e0bda67faa65fef1c84971c7e45928e108fe24333c81f3bfe35d5f5/pydantic_core-2.41.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:112e305c3314f40c93998e567879e887a3160bb8689ef3d2c04b6cc62c33ac34", size = 1896206, upload-time = "2025-11-04T13:40:27.099Z" }, + { url = "https://files.pythonhosted.org/packages/15/df/a4c740c0943e93e6500f9eb23f4ca7ec9bf71b19e608ae5b579678c8d02f/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cbaad15cb0c90aa221d43c00e77bb33c93e8d36e0bf74760cd00e732d10a6a0", size = 1919307, upload-time = "2025-11-04T13:40:29.806Z" }, + { url = "https://files.pythonhosted.org/packages/9a/e3/6324802931ae1d123528988e0e86587c2072ac2e5394b4bc2bc34b61ff6e/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:03ca43e12fab6023fc79d28ca6b39b05f794ad08ec2feccc59a339b02f2b3d33", size = 2063258, upload-time = "2025-11-04T13:40:33.544Z" }, + { url = "https://files.pythonhosted.org/packages/c9/d4/2230d7151d4957dd79c3044ea26346c148c98fbf0ee6ebd41056f2d62ab5/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc799088c08fa04e43144b164feb0c13f9a0bc40503f8df3e9fde58a3c0c101e", size = 2214917, upload-time = "2025-11-04T13:40:35.479Z" }, + { url = "https://files.pythonhosted.org/packages/e6/9f/eaac5df17a3672fef0081b6c1bb0b82b33ee89aa5cec0d7b05f52fd4a1fa/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:97aeba56665b4c3235a0e52b2c2f5ae9cd071b8a8310ad27bddb3f7fb30e9aa2", size = 2332186, upload-time = "2025-11-04T13:40:37.436Z" }, + { url = "https://files.pythonhosted.org/packages/cf/4e/35a80cae583a37cf15604b44240e45c05e04e86f9cfd766623149297e971/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:406bf18d345822d6c21366031003612b9c77b3e29ffdb0f612367352aab7d586", size = 2073164, upload-time = "2025-11-04T13:40:40.289Z" }, + { url = "https://files.pythonhosted.org/packages/bf/e3/f6e262673c6140dd3305d144d032f7bd5f7497d3871c1428521f19f9efa2/pydantic_core-2.41.5-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b93590ae81f7010dbe380cdeab6f515902ebcbefe0b9327cc4804d74e93ae69d", size = 2179146, upload-time = "2025-11-04T13:40:42.809Z" }, + { url = "https://files.pythonhosted.org/packages/75/c7/20bd7fc05f0c6ea2056a4565c6f36f8968c0924f19b7d97bbfea55780e73/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:01a3d0ab748ee531f4ea6c3e48ad9dac84ddba4b0d82291f87248f2f9de8d740", size = 2137788, upload-time = "2025-11-04T13:40:44.752Z" }, + { url = "https://files.pythonhosted.org/packages/3a/8d/34318ef985c45196e004bc46c6eab2eda437e744c124ef0dbe1ff2c9d06b/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:6561e94ba9dacc9c61bce40e2d6bdc3bfaa0259d3ff36ace3b1e6901936d2e3e", size = 2340133, upload-time = "2025-11-04T13:40:46.66Z" }, + { url = "https://files.pythonhosted.org/packages/9c/59/013626bf8c78a5a5d9350d12e7697d3d4de951a75565496abd40ccd46bee/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:915c3d10f81bec3a74fbd4faebe8391013ba61e5a1a8d48c4455b923bdda7858", size = 2324852, upload-time = "2025-11-04T13:40:48.575Z" }, + { url = "https://files.pythonhosted.org/packages/1a/d9/c248c103856f807ef70c18a4f986693a46a8ffe1602e5d361485da502d20/pydantic_core-2.41.5-cp313-cp313-win32.whl", hash = "sha256:650ae77860b45cfa6e2cdafc42618ceafab3a2d9a3811fcfbd3bbf8ac3c40d36", size = 1994679, upload-time = "2025-11-04T13:40:50.619Z" }, + { url = "https://files.pythonhosted.org/packages/9e/8b/341991b158ddab181cff136acd2552c9f35bd30380422a639c0671e99a91/pydantic_core-2.41.5-cp313-cp313-win_amd64.whl", hash = "sha256:79ec52ec461e99e13791ec6508c722742ad745571f234ea6255bed38c6480f11", size = 2019766, upload-time = "2025-11-04T13:40:52.631Z" }, + { url = "https://files.pythonhosted.org/packages/73/7d/f2f9db34af103bea3e09735bb40b021788a5e834c81eedb541991badf8f5/pydantic_core-2.41.5-cp313-cp313-win_arm64.whl", hash = "sha256:3f84d5c1b4ab906093bdc1ff10484838aca54ef08de4afa9de0f5f14d69639cd", size = 1981005, upload-time = "2025-11-04T13:40:54.734Z" }, + { url = "https://files.pythonhosted.org/packages/ea/28/46b7c5c9635ae96ea0fbb779e271a38129df2550f763937659ee6c5dbc65/pydantic_core-2.41.5-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:3f37a19d7ebcdd20b96485056ba9e8b304e27d9904d233d7b1015db320e51f0a", size = 2119622, upload-time = "2025-11-04T13:40:56.68Z" }, + { url = "https://files.pythonhosted.org/packages/74/1a/145646e5687e8d9a1e8d09acb278c8535ebe9e972e1f162ed338a622f193/pydantic_core-2.41.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:1d1d9764366c73f996edd17abb6d9d7649a7eb690006ab6adbda117717099b14", size = 1891725, upload-time = "2025-11-04T13:40:58.807Z" }, + { url = "https://files.pythonhosted.org/packages/23/04/e89c29e267b8060b40dca97bfc64a19b2a3cf99018167ea1677d96368273/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25e1c2af0fce638d5f1988b686f3b3ea8cd7de5f244ca147c777769e798a9cd1", size = 1915040, upload-time = "2025-11-04T13:41:00.853Z" }, + { url = "https://files.pythonhosted.org/packages/84/a3/15a82ac7bd97992a82257f777b3583d3e84bdb06ba6858f745daa2ec8a85/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:506d766a8727beef16b7adaeb8ee6217c64fc813646b424d0804d67c16eddb66", size = 2063691, upload-time = "2025-11-04T13:41:03.504Z" }, + { url = "https://files.pythonhosted.org/packages/74/9b/0046701313c6ef08c0c1cf0e028c67c770a4e1275ca73131563c5f2a310a/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4819fa52133c9aa3c387b3328f25c1facc356491e6135b459f1de698ff64d869", size = 2213897, upload-time = "2025-11-04T13:41:05.804Z" }, + { url = "https://files.pythonhosted.org/packages/8a/cd/6bac76ecd1b27e75a95ca3a9a559c643b3afcd2dd62086d4b7a32a18b169/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b761d210c9ea91feda40d25b4efe82a1707da2ef62901466a42492c028553a2", size = 2333302, upload-time = "2025-11-04T13:41:07.809Z" }, + { url = "https://files.pythonhosted.org/packages/4c/d2/ef2074dc020dd6e109611a8be4449b98cd25e1b9b8a303c2f0fca2f2bcf7/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22f0fb8c1c583a3b6f24df2470833b40207e907b90c928cc8d3594b76f874375", size = 2064877, upload-time = "2025-11-04T13:41:09.827Z" }, + { url = "https://files.pythonhosted.org/packages/18/66/e9db17a9a763d72f03de903883c057b2592c09509ccfe468187f2a2eef29/pydantic_core-2.41.5-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2782c870e99878c634505236d81e5443092fba820f0373997ff75f90f68cd553", size = 2180680, upload-time = "2025-11-04T13:41:12.379Z" }, + { url = "https://files.pythonhosted.org/packages/d3/9e/3ce66cebb929f3ced22be85d4c2399b8e85b622db77dad36b73c5387f8f8/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:0177272f88ab8312479336e1d777f6b124537d47f2123f89cb37e0accea97f90", size = 2138960, upload-time = "2025-11-04T13:41:14.627Z" }, + { url = "https://files.pythonhosted.org/packages/a6/62/205a998f4327d2079326b01abee48e502ea739d174f0a89295c481a2272e/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:63510af5e38f8955b8ee5687740d6ebf7c2a0886d15a6d65c32814613681bc07", size = 2339102, upload-time = "2025-11-04T13:41:16.868Z" }, + { url = "https://files.pythonhosted.org/packages/3c/0d/f05e79471e889d74d3d88f5bd20d0ed189ad94c2423d81ff8d0000aab4ff/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:e56ba91f47764cc14f1daacd723e3e82d1a89d783f0f5afe9c364b8bb491ccdb", size = 2326039, upload-time = "2025-11-04T13:41:18.934Z" }, + { url = "https://files.pythonhosted.org/packages/ec/e1/e08a6208bb100da7e0c4b288eed624a703f4d129bde2da475721a80cab32/pydantic_core-2.41.5-cp314-cp314-win32.whl", hash = "sha256:aec5cf2fd867b4ff45b9959f8b20ea3993fc93e63c7363fe6851424c8a7e7c23", size = 1995126, upload-time = "2025-11-04T13:41:21.418Z" }, + { url = "https://files.pythonhosted.org/packages/48/5d/56ba7b24e9557f99c9237e29f5c09913c81eeb2f3217e40e922353668092/pydantic_core-2.41.5-cp314-cp314-win_amd64.whl", hash = "sha256:8e7c86f27c585ef37c35e56a96363ab8de4e549a95512445b85c96d3e2f7c1bf", size = 2015489, upload-time = "2025-11-04T13:41:24.076Z" }, + { url = "https://files.pythonhosted.org/packages/4e/bb/f7a190991ec9e3e0ba22e4993d8755bbc4a32925c0b5b42775c03e8148f9/pydantic_core-2.41.5-cp314-cp314-win_arm64.whl", hash = "sha256:e672ba74fbc2dc8eea59fb6d4aed6845e6905fc2a8afe93175d94a83ba2a01a0", size = 1977288, upload-time = "2025-11-04T13:41:26.33Z" }, + { url = "https://files.pythonhosted.org/packages/92/ed/77542d0c51538e32e15afe7899d79efce4b81eee631d99850edc2f5e9349/pydantic_core-2.41.5-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:8566def80554c3faa0e65ac30ab0932b9e3a5cd7f8323764303d468e5c37595a", size = 2120255, upload-time = "2025-11-04T13:41:28.569Z" }, + { url = "https://files.pythonhosted.org/packages/bb/3d/6913dde84d5be21e284439676168b28d8bbba5600d838b9dca99de0fad71/pydantic_core-2.41.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b80aa5095cd3109962a298ce14110ae16b8c1aece8b72f9dafe81cf597ad80b3", size = 1863760, upload-time = "2025-11-04T13:41:31.055Z" }, + { url = "https://files.pythonhosted.org/packages/5a/f0/e5e6b99d4191da102f2b0eb9687aaa7f5bea5d9964071a84effc3e40f997/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3006c3dd9ba34b0c094c544c6006cc79e87d8612999f1a5d43b769b89181f23c", size = 1878092, upload-time = "2025-11-04T13:41:33.21Z" }, + { url = "https://files.pythonhosted.org/packages/71/48/36fb760642d568925953bcc8116455513d6e34c4beaa37544118c36aba6d/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:72f6c8b11857a856bcfa48c86f5368439f74453563f951e473514579d44aa612", size = 2053385, upload-time = "2025-11-04T13:41:35.508Z" }, + { url = "https://files.pythonhosted.org/packages/20/25/92dc684dd8eb75a234bc1c764b4210cf2646479d54b47bf46061657292a8/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5cb1b2f9742240e4bb26b652a5aeb840aa4b417c7748b6f8387927bc6e45e40d", size = 2218832, upload-time = "2025-11-04T13:41:37.732Z" }, + { url = "https://files.pythonhosted.org/packages/e2/09/f53e0b05023d3e30357d82eb35835d0f6340ca344720a4599cd663dca599/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd3d54f38609ff308209bd43acea66061494157703364ae40c951f83ba99a1a9", size = 2327585, upload-time = "2025-11-04T13:41:40Z" }, + { url = "https://files.pythonhosted.org/packages/aa/4e/2ae1aa85d6af35a39b236b1b1641de73f5a6ac4d5a7509f77b814885760c/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ff4321e56e879ee8d2a879501c8e469414d948f4aba74a2d4593184eb326660", size = 2041078, upload-time = "2025-11-04T13:41:42.323Z" }, + { url = "https://files.pythonhosted.org/packages/cd/13/2e215f17f0ef326fc72afe94776edb77525142c693767fc347ed6288728d/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d0d2568a8c11bf8225044aa94409e21da0cb09dcdafe9ecd10250b2baad531a9", size = 2173914, upload-time = "2025-11-04T13:41:45.221Z" }, + { url = "https://files.pythonhosted.org/packages/02/7a/f999a6dcbcd0e5660bc348a3991c8915ce6599f4f2c6ac22f01d7a10816c/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:a39455728aabd58ceabb03c90e12f71fd30fa69615760a075b9fec596456ccc3", size = 2129560, upload-time = "2025-11-04T13:41:47.474Z" }, + { url = "https://files.pythonhosted.org/packages/3a/b1/6c990ac65e3b4c079a4fb9f5b05f5b013afa0f4ed6780a3dd236d2cbdc64/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_armv7l.whl", hash = "sha256:239edca560d05757817c13dc17c50766136d21f7cd0fac50295499ae24f90fdf", size = 2329244, upload-time = "2025-11-04T13:41:49.992Z" }, + { url = "https://files.pythonhosted.org/packages/d9/02/3c562f3a51afd4d88fff8dffb1771b30cfdfd79befd9883ee094f5b6c0d8/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:2a5e06546e19f24c6a96a129142a75cee553cc018ffee48a460059b1185f4470", size = 2331955, upload-time = "2025-11-04T13:41:54.079Z" }, + { url = "https://files.pythonhosted.org/packages/5c/96/5fb7d8c3c17bc8c62fdb031c47d77a1af698f1d7a406b0f79aaa1338f9ad/pydantic_core-2.41.5-cp314-cp314t-win32.whl", hash = "sha256:b4ececa40ac28afa90871c2cc2b9ffd2ff0bf749380fbdf57d165fd23da353aa", size = 1988906, upload-time = "2025-11-04T13:41:56.606Z" }, + { url = "https://files.pythonhosted.org/packages/22/ed/182129d83032702912c2e2d8bbe33c036f342cc735737064668585dac28f/pydantic_core-2.41.5-cp314-cp314t-win_amd64.whl", hash = "sha256:80aa89cad80b32a912a65332f64a4450ed00966111b6615ca6816153d3585a8c", size = 1981607, upload-time = "2025-11-04T13:41:58.889Z" }, + { url = "https://files.pythonhosted.org/packages/9f/ed/068e41660b832bb0b1aa5b58011dea2a3fe0ba7861ff38c4d4904c1c1a99/pydantic_core-2.41.5-cp314-cp314t-win_arm64.whl", hash = "sha256:35b44f37a3199f771c3eaa53051bc8a70cd7b54f333531c59e29fd4db5d15008", size = 1974769, upload-time = "2025-11-04T13:42:01.186Z" }, + { url = "https://files.pythonhosted.org/packages/09/32/59b0c7e63e277fa7911c2fc70ccfb45ce4b98991e7ef37110663437005af/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:7da7087d756b19037bc2c06edc6c170eeef3c3bafcb8f532ff17d64dc427adfd", size = 2110495, upload-time = "2025-11-04T13:42:49.689Z" }, + { url = "https://files.pythonhosted.org/packages/aa/81/05e400037eaf55ad400bcd318c05bb345b57e708887f07ddb2d20e3f0e98/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:aabf5777b5c8ca26f7824cb4a120a740c9588ed58df9b2d196ce92fba42ff8dc", size = 1915388, upload-time = "2025-11-04T13:42:52.215Z" }, + { url = "https://files.pythonhosted.org/packages/6e/0d/e3549b2399f71d56476b77dbf3cf8937cec5cd70536bdc0e374a421d0599/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c007fe8a43d43b3969e8469004e9845944f1a80e6acd47c150856bb87f230c56", size = 1942879, upload-time = "2025-11-04T13:42:56.483Z" }, + { url = "https://files.pythonhosted.org/packages/f7/07/34573da085946b6a313d7c42f82f16e8920bfd730665de2d11c0c37a74b5/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76d0819de158cd855d1cbb8fcafdf6f5cf1eb8e470abe056d5d161106e38062b", size = 2139017, upload-time = "2025-11-04T13:42:59.471Z" }, +] + +[[package]] +name = "referencing" +version = "0.37.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs" }, + { name = "rpds-py" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/22/f5/df4e9027acead3ecc63e50fe1e36aca1523e1719559c499951bb4b53188f/referencing-0.37.0.tar.gz", hash = "sha256:44aefc3142c5b842538163acb373e24cce6632bd54bdb01b21ad5863489f50d8", size = 78036, upload-time = "2025-10-13T15:30:48.871Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/58/ca301544e1fa93ed4f80d724bf5b194f6e4b945841c5bfd555878eea9fcb/referencing-0.37.0-py3-none-any.whl", hash = "sha256:381329a9f99628c9069361716891d34ad94af76e461dcb0335825aecc7692231", size = 26766, upload-time = "2025-10-13T15:30:47.625Z" }, +] + +[[package]] +name = "requests" +version = "2.32.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "charset-normalizer" }, + { name = "idna" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c9/74/b3ff8e6c8446842c3f5c837e9c3dfcfe2018ea6ecef224c710c85ef728f4/requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf", size = 134517, upload-time = "2025-08-18T20:46:02.573Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" }, +] + +[[package]] +name = "rpds-py" +version = "0.28.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/48/dc/95f074d43452b3ef5d06276696ece4b3b5d696e7c9ad7173c54b1390cd70/rpds_py-0.28.0.tar.gz", hash = "sha256:abd4df20485a0983e2ca334a216249b6186d6e3c1627e106651943dbdb791aea", size = 27419, upload-time = "2025-10-22T22:24:29.327Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b8/5c/6c3936495003875fe7b14f90ea812841a08fca50ab26bd840e924097d9c8/rpds_py-0.28.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:6b4f28583a4f247ff60cd7bdda83db8c3f5b05a7a82ff20dd4b078571747708f", size = 366439, upload-time = "2025-10-22T22:22:04.525Z" }, + { url = "https://files.pythonhosted.org/packages/56/f9/a0f1ca194c50aa29895b442771f036a25b6c41a35e4f35b1a0ea713bedae/rpds_py-0.28.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d678e91b610c29c4b3d52a2c148b641df2b4676ffe47c59f6388d58b99cdc424", size = 348170, upload-time = "2025-10-22T22:22:06.397Z" }, + { url = "https://files.pythonhosted.org/packages/18/ea/42d243d3a586beb72c77fa5def0487daf827210069a95f36328e869599ea/rpds_py-0.28.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e819e0e37a44a78e1383bf1970076e2ccc4dc8c2bbaa2f9bd1dc987e9afff628", size = 378838, upload-time = "2025-10-22T22:22:07.932Z" }, + { url = "https://files.pythonhosted.org/packages/e7/78/3de32e18a94791af8f33601402d9d4f39613136398658412a4e0b3047327/rpds_py-0.28.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5ee514e0f0523db5d3fb171f397c54875dbbd69760a414dccf9d4d7ad628b5bd", size = 393299, upload-time = "2025-10-22T22:22:09.435Z" }, + { url = "https://files.pythonhosted.org/packages/13/7e/4bdb435afb18acea2eb8a25ad56b956f28de7c59f8a1d32827effa0d4514/rpds_py-0.28.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5f3fa06d27fdcee47f07a39e02862da0100cb4982508f5ead53ec533cd5fe55e", size = 518000, upload-time = "2025-10-22T22:22:11.326Z" }, + { url = "https://files.pythonhosted.org/packages/31/d0/5f52a656875cdc60498ab035a7a0ac8f399890cc1ee73ebd567bac4e39ae/rpds_py-0.28.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:46959ef2e64f9e4a41fc89aa20dbca2b85531f9a72c21099a3360f35d10b0d5a", size = 408746, upload-time = "2025-10-22T22:22:13.143Z" }, + { url = "https://files.pythonhosted.org/packages/3e/cd/49ce51767b879cde77e7ad9fae164ea15dce3616fe591d9ea1df51152706/rpds_py-0.28.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8455933b4bcd6e83fde3fefc987a023389c4b13f9a58c8d23e4b3f6d13f78c84", size = 386379, upload-time = "2025-10-22T22:22:14.602Z" }, + { url = "https://files.pythonhosted.org/packages/6a/99/e4e1e1ee93a98f72fc450e36c0e4d99c35370220e815288e3ecd2ec36a2a/rpds_py-0.28.0-cp312-cp312-manylinux_2_31_riscv64.whl", hash = "sha256:ad50614a02c8c2962feebe6012b52f9802deec4263946cddea37aaf28dd25a66", size = 401280, upload-time = "2025-10-22T22:22:16.063Z" }, + { url = "https://files.pythonhosted.org/packages/61/35/e0c6a57488392a8b319d2200d03dad2b29c0db9996f5662c3b02d0b86c02/rpds_py-0.28.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e5deca01b271492553fdb6c7fd974659dce736a15bae5dad7ab8b93555bceb28", size = 412365, upload-time = "2025-10-22T22:22:17.504Z" }, + { url = "https://files.pythonhosted.org/packages/ff/6a/841337980ea253ec797eb084665436007a1aad0faac1ba097fb906c5f69c/rpds_py-0.28.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:735f8495a13159ce6a0d533f01e8674cec0c57038c920495f87dcb20b3ddb48a", size = 559573, upload-time = "2025-10-22T22:22:19.108Z" }, + { url = "https://files.pythonhosted.org/packages/e7/5e/64826ec58afd4c489731f8b00729c5f6afdb86f1df1df60bfede55d650bb/rpds_py-0.28.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:961ca621ff10d198bbe6ba4957decca61aa2a0c56695384c1d6b79bf61436df5", size = 583973, upload-time = "2025-10-22T22:22:20.768Z" }, + { url = "https://files.pythonhosted.org/packages/b6/ee/44d024b4843f8386a4eeaa4c171b3d31d55f7177c415545fd1a24c249b5d/rpds_py-0.28.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2374e16cc9131022e7d9a8f8d65d261d9ba55048c78f3b6e017971a4f5e6353c", size = 553800, upload-time = "2025-10-22T22:22:22.25Z" }, + { url = "https://files.pythonhosted.org/packages/7d/89/33e675dccff11a06d4d85dbb4d1865f878d5020cbb69b2c1e7b2d3f82562/rpds_py-0.28.0-cp312-cp312-win32.whl", hash = "sha256:d15431e334fba488b081d47f30f091e5d03c18527c325386091f31718952fe08", size = 216954, upload-time = "2025-10-22T22:22:24.105Z" }, + { url = "https://files.pythonhosted.org/packages/af/36/45f6ebb3210887e8ee6dbf1bc710ae8400bb417ce165aaf3024b8360d999/rpds_py-0.28.0-cp312-cp312-win_amd64.whl", hash = "sha256:a410542d61fc54710f750d3764380b53bf09e8c4edbf2f9141a82aa774a04f7c", size = 227844, upload-time = "2025-10-22T22:22:25.551Z" }, + { url = "https://files.pythonhosted.org/packages/57/91/f3fb250d7e73de71080f9a221d19bd6a1c1eb0d12a1ea26513f6c1052ad6/rpds_py-0.28.0-cp312-cp312-win_arm64.whl", hash = "sha256:1f0cfd1c69e2d14f8c892b893997fa9a60d890a0c8a603e88dca4955f26d1edd", size = 217624, upload-time = "2025-10-22T22:22:26.914Z" }, + { url = "https://files.pythonhosted.org/packages/d3/03/ce566d92611dfac0085c2f4b048cd53ed7c274a5c05974b882a908d540a2/rpds_py-0.28.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:e9e184408a0297086f880556b6168fa927d677716f83d3472ea333b42171ee3b", size = 366235, upload-time = "2025-10-22T22:22:28.397Z" }, + { url = "https://files.pythonhosted.org/packages/00/34/1c61da1b25592b86fd285bd7bd8422f4c9d748a7373b46126f9ae792a004/rpds_py-0.28.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:edd267266a9b0448f33dc465a97cfc5d467594b600fe28e7fa2f36450e03053a", size = 348241, upload-time = "2025-10-22T22:22:30.171Z" }, + { url = "https://files.pythonhosted.org/packages/fc/00/ed1e28616848c61c493a067779633ebf4b569eccaacf9ccbdc0e7cba2b9d/rpds_py-0.28.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85beb8b3f45e4e32f6802fb6cd6b17f615ef6c6a52f265371fb916fae02814aa", size = 378079, upload-time = "2025-10-22T22:22:31.644Z" }, + { url = "https://files.pythonhosted.org/packages/11/b2/ccb30333a16a470091b6e50289adb4d3ec656fd9951ba8c5e3aaa0746a67/rpds_py-0.28.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d2412be8d00a1b895f8ad827cc2116455196e20ed994bb704bf138fe91a42724", size = 393151, upload-time = "2025-10-22T22:22:33.453Z" }, + { url = "https://files.pythonhosted.org/packages/8c/d0/73e2217c3ee486d555cb84920597480627d8c0240ff3062005c6cc47773e/rpds_py-0.28.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cf128350d384b777da0e68796afdcebc2e9f63f0e9f242217754e647f6d32491", size = 517520, upload-time = "2025-10-22T22:22:34.949Z" }, + { url = "https://files.pythonhosted.org/packages/c4/91/23efe81c700427d0841a4ae7ea23e305654381831e6029499fe80be8a071/rpds_py-0.28.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a2036d09b363aa36695d1cc1a97b36865597f4478470b0697b5ee9403f4fe399", size = 408699, upload-time = "2025-10-22T22:22:36.584Z" }, + { url = "https://files.pythonhosted.org/packages/ca/ee/a324d3198da151820a326c1f988caaa4f37fc27955148a76fff7a2d787a9/rpds_py-0.28.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8e1e9be4fa6305a16be628959188e4fd5cd6f1b0e724d63c6d8b2a8adf74ea6", size = 385720, upload-time = "2025-10-22T22:22:38.014Z" }, + { url = "https://files.pythonhosted.org/packages/19/ad/e68120dc05af8b7cab4a789fccd8cdcf0fe7e6581461038cc5c164cd97d2/rpds_py-0.28.0-cp313-cp313-manylinux_2_31_riscv64.whl", hash = "sha256:0a403460c9dd91a7f23fc3188de6d8977f1d9603a351d5db6cf20aaea95b538d", size = 401096, upload-time = "2025-10-22T22:22:39.869Z" }, + { url = "https://files.pythonhosted.org/packages/99/90/c1e070620042459d60df6356b666bb1f62198a89d68881816a7ed121595a/rpds_py-0.28.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d7366b6553cdc805abcc512b849a519167db8f5e5c3472010cd1228b224265cb", size = 411465, upload-time = "2025-10-22T22:22:41.395Z" }, + { url = "https://files.pythonhosted.org/packages/68/61/7c195b30d57f1b8d5970f600efee72a4fad79ec829057972e13a0370fd24/rpds_py-0.28.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5b43c6a3726efd50f18d8120ec0551241c38785b68952d240c45ea553912ac41", size = 558832, upload-time = "2025-10-22T22:22:42.871Z" }, + { url = "https://files.pythonhosted.org/packages/b0/3d/06f3a718864773f69941d4deccdf18e5e47dd298b4628062f004c10f3b34/rpds_py-0.28.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:0cb7203c7bc69d7c1585ebb33a2e6074492d2fc21ad28a7b9d40457ac2a51ab7", size = 583230, upload-time = "2025-10-22T22:22:44.877Z" }, + { url = "https://files.pythonhosted.org/packages/66/df/62fc783781a121e77fee9a21ead0a926f1b652280a33f5956a5e7833ed30/rpds_py-0.28.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7a52a5169c664dfb495882adc75c304ae1d50df552fbd68e100fdc719dee4ff9", size = 553268, upload-time = "2025-10-22T22:22:46.441Z" }, + { url = "https://files.pythonhosted.org/packages/84/85/d34366e335140a4837902d3dea89b51f087bd6a63c993ebdff59e93ee61d/rpds_py-0.28.0-cp313-cp313-win32.whl", hash = "sha256:2e42456917b6687215b3e606ab46aa6bca040c77af7df9a08a6dcfe8a4d10ca5", size = 217100, upload-time = "2025-10-22T22:22:48.342Z" }, + { url = "https://files.pythonhosted.org/packages/3c/1c/f25a3f3752ad7601476e3eff395fe075e0f7813fbb9862bd67c82440e880/rpds_py-0.28.0-cp313-cp313-win_amd64.whl", hash = "sha256:e0a0311caedc8069d68fc2bf4c9019b58a2d5ce3cd7cb656c845f1615b577e1e", size = 227759, upload-time = "2025-10-22T22:22:50.219Z" }, + { url = "https://files.pythonhosted.org/packages/e0/d6/5f39b42b99615b5bc2f36ab90423ea404830bdfee1c706820943e9a645eb/rpds_py-0.28.0-cp313-cp313-win_arm64.whl", hash = "sha256:04c1b207ab8b581108801528d59ad80aa83bb170b35b0ddffb29c20e411acdc1", size = 217326, upload-time = "2025-10-22T22:22:51.647Z" }, + { url = "https://files.pythonhosted.org/packages/5c/8b/0c69b72d1cee20a63db534be0df271effe715ef6c744fdf1ff23bb2b0b1c/rpds_py-0.28.0-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:f296ea3054e11fc58ad42e850e8b75c62d9a93a9f981ad04b2e5ae7d2186ff9c", size = 355736, upload-time = "2025-10-22T22:22:53.211Z" }, + { url = "https://files.pythonhosted.org/packages/f7/6d/0c2ee773cfb55c31a8514d2cece856dd299170a49babd50dcffb15ddc749/rpds_py-0.28.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5a7306c19b19005ad98468fcefeb7100b19c79fc23a5f24a12e06d91181193fa", size = 342677, upload-time = "2025-10-22T22:22:54.723Z" }, + { url = "https://files.pythonhosted.org/packages/e2/1c/22513ab25a27ea205144414724743e305e8153e6abe81833b5e678650f5a/rpds_py-0.28.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e5d9b86aa501fed9862a443c5c3116f6ead8bc9296185f369277c42542bd646b", size = 371847, upload-time = "2025-10-22T22:22:56.295Z" }, + { url = "https://files.pythonhosted.org/packages/60/07/68e6ccdb4b05115ffe61d31afc94adef1833d3a72f76c9632d4d90d67954/rpds_py-0.28.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e5bbc701eff140ba0e872691d573b3d5d30059ea26e5785acba9132d10c8c31d", size = 381800, upload-time = "2025-10-22T22:22:57.808Z" }, + { url = "https://files.pythonhosted.org/packages/73/bf/6d6d15df80781d7f9f368e7c1a00caf764436518c4877fb28b029c4624af/rpds_py-0.28.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a5690671cd672a45aa8616d7374fdf334a1b9c04a0cac3c854b1136e92374fe", size = 518827, upload-time = "2025-10-22T22:22:59.826Z" }, + { url = "https://files.pythonhosted.org/packages/7b/d3/2decbb2976cc452cbf12a2b0aaac5f1b9dc5dd9d1f7e2509a3ee00421249/rpds_py-0.28.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9f1d92ecea4fa12f978a367c32a5375a1982834649cdb96539dcdc12e609ab1a", size = 399471, upload-time = "2025-10-22T22:23:01.968Z" }, + { url = "https://files.pythonhosted.org/packages/b1/2c/f30892f9e54bd02e5faca3f6a26d6933c51055e67d54818af90abed9748e/rpds_py-0.28.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d252db6b1a78d0a3928b6190156042d54c93660ce4d98290d7b16b5296fb7cc", size = 377578, upload-time = "2025-10-22T22:23:03.52Z" }, + { url = "https://files.pythonhosted.org/packages/f0/5d/3bce97e5534157318f29ac06bf2d279dae2674ec12f7cb9c12739cee64d8/rpds_py-0.28.0-cp313-cp313t-manylinux_2_31_riscv64.whl", hash = "sha256:d61b355c3275acb825f8777d6c4505f42b5007e357af500939d4a35b19177259", size = 390482, upload-time = "2025-10-22T22:23:05.391Z" }, + { url = "https://files.pythonhosted.org/packages/e3/f0/886bd515ed457b5bd93b166175edb80a0b21a210c10e993392127f1e3931/rpds_py-0.28.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:acbe5e8b1026c0c580d0321c8aae4b0a1e1676861d48d6e8c6586625055b606a", size = 402447, upload-time = "2025-10-22T22:23:06.93Z" }, + { url = "https://files.pythonhosted.org/packages/42/b5/71e8777ac55e6af1f4f1c05b47542a1eaa6c33c1cf0d300dca6a1c6e159a/rpds_py-0.28.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:8aa23b6f0fc59b85b4c7d89ba2965af274346f738e8d9fc2455763602e62fd5f", size = 552385, upload-time = "2025-10-22T22:23:08.557Z" }, + { url = "https://files.pythonhosted.org/packages/5d/cb/6ca2d70cbda5a8e36605e7788c4aa3bea7c17d71d213465a5a675079b98d/rpds_py-0.28.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:7b14b0c680286958817c22d76fcbca4800ddacef6f678f3a7c79a1fe7067fe37", size = 575642, upload-time = "2025-10-22T22:23:10.348Z" }, + { url = "https://files.pythonhosted.org/packages/4a/d4/407ad9960ca7856d7b25c96dcbe019270b5ffdd83a561787bc682c797086/rpds_py-0.28.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:bcf1d210dfee61a6c86551d67ee1031899c0fdbae88b2d44a569995d43797712", size = 544507, upload-time = "2025-10-22T22:23:12.434Z" }, + { url = "https://files.pythonhosted.org/packages/51/31/2f46fe0efcac23fbf5797c6b6b7e1c76f7d60773e525cb65fcbc582ee0f2/rpds_py-0.28.0-cp313-cp313t-win32.whl", hash = "sha256:3aa4dc0fdab4a7029ac63959a3ccf4ed605fee048ba67ce89ca3168da34a1342", size = 205376, upload-time = "2025-10-22T22:23:13.979Z" }, + { url = "https://files.pythonhosted.org/packages/92/e4/15947bda33cbedfc134490a41841ab8870a72a867a03d4969d886f6594a2/rpds_py-0.28.0-cp313-cp313t-win_amd64.whl", hash = "sha256:7b7d9d83c942855e4fdcfa75d4f96f6b9e272d42fffcb72cd4bb2577db2e2907", size = 215907, upload-time = "2025-10-22T22:23:15.5Z" }, + { url = "https://files.pythonhosted.org/packages/08/47/ffe8cd7a6a02833b10623bf765fbb57ce977e9a4318ca0e8cf97e9c3d2b3/rpds_py-0.28.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:dcdcb890b3ada98a03f9f2bb108489cdc7580176cb73b4f2d789e9a1dac1d472", size = 353830, upload-time = "2025-10-22T22:23:17.03Z" }, + { url = "https://files.pythonhosted.org/packages/f9/9f/890f36cbd83a58491d0d91ae0db1702639edb33fb48eeb356f80ecc6b000/rpds_py-0.28.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:f274f56a926ba2dc02976ca5b11c32855cbd5925534e57cfe1fda64e04d1add2", size = 341819, upload-time = "2025-10-22T22:23:18.57Z" }, + { url = "https://files.pythonhosted.org/packages/09/e3/921eb109f682aa24fb76207698fbbcf9418738f35a40c21652c29053f23d/rpds_py-0.28.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4fe0438ac4a29a520ea94c8c7f1754cdd8feb1bc490dfda1bfd990072363d527", size = 373127, upload-time = "2025-10-22T22:23:20.216Z" }, + { url = "https://files.pythonhosted.org/packages/23/13/bce4384d9f8f4989f1a9599c71b7a2d877462e5fd7175e1f69b398f729f4/rpds_py-0.28.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8a358a32dd3ae50e933347889b6af9a1bdf207ba5d1a3f34e1a38cd3540e6733", size = 382767, upload-time = "2025-10-22T22:23:21.787Z" }, + { url = "https://files.pythonhosted.org/packages/23/e1/579512b2d89a77c64ccef5a0bc46a6ef7f72ae0cf03d4b26dcd52e57ee0a/rpds_py-0.28.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e80848a71c78aa328fefaba9c244d588a342c8e03bda518447b624ea64d1ff56", size = 517585, upload-time = "2025-10-22T22:23:23.699Z" }, + { url = "https://files.pythonhosted.org/packages/62/3c/ca704b8d324a2591b0b0adcfcaadf9c862375b11f2f667ac03c61b4fd0a6/rpds_py-0.28.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f586db2e209d54fe177e58e0bc4946bea5fb0102f150b1b2f13de03e1f0976f8", size = 399828, upload-time = "2025-10-22T22:23:25.713Z" }, + { url = "https://files.pythonhosted.org/packages/da/37/e84283b9e897e3adc46b4c88bb3f6ec92a43bd4d2f7ef5b13459963b2e9c/rpds_py-0.28.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ae8ee156d6b586e4292491e885d41483136ab994e719a13458055bec14cf370", size = 375509, upload-time = "2025-10-22T22:23:27.32Z" }, + { url = "https://files.pythonhosted.org/packages/1a/c2/a980beab869d86258bf76ec42dec778ba98151f253a952b02fe36d72b29c/rpds_py-0.28.0-cp314-cp314-manylinux_2_31_riscv64.whl", hash = "sha256:a805e9b3973f7e27f7cab63a6b4f61d90f2e5557cff73b6e97cd5b8540276d3d", size = 392014, upload-time = "2025-10-22T22:23:29.332Z" }, + { url = "https://files.pythonhosted.org/packages/da/b5/b1d3c5f9d3fa5aeef74265f9c64de3c34a0d6d5cd3c81c8b17d5c8f10ed4/rpds_py-0.28.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5d3fd16b6dc89c73a4da0b4ac8b12a7ecc75b2864b95c9e5afed8003cb50a728", size = 402410, upload-time = "2025-10-22T22:23:31.14Z" }, + { url = "https://files.pythonhosted.org/packages/74/ae/cab05ff08dfcc052afc73dcb38cbc765ffc86f94e966f3924cd17492293c/rpds_py-0.28.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:6796079e5d24fdaba6d49bda28e2c47347e89834678f2bc2c1b4fc1489c0fb01", size = 553593, upload-time = "2025-10-22T22:23:32.834Z" }, + { url = "https://files.pythonhosted.org/packages/70/80/50d5706ea2a9bfc9e9c5f401d91879e7c790c619969369800cde202da214/rpds_py-0.28.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:76500820c2af232435cbe215e3324c75b950a027134e044423f59f5b9a1ba515", size = 576925, upload-time = "2025-10-22T22:23:34.47Z" }, + { url = "https://files.pythonhosted.org/packages/ab/12/85a57d7a5855a3b188d024b099fd09c90db55d32a03626d0ed16352413ff/rpds_py-0.28.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:bbdc5640900a7dbf9dd707fe6388972f5bbd883633eb68b76591044cfe346f7e", size = 542444, upload-time = "2025-10-22T22:23:36.093Z" }, + { url = "https://files.pythonhosted.org/packages/6c/65/10643fb50179509150eb94d558e8837c57ca8b9adc04bd07b98e57b48f8c/rpds_py-0.28.0-cp314-cp314-win32.whl", hash = "sha256:adc8aa88486857d2b35d75f0640b949759f79dc105f50aa2c27816b2e0dd749f", size = 207968, upload-time = "2025-10-22T22:23:37.638Z" }, + { url = "https://files.pythonhosted.org/packages/b4/84/0c11fe4d9aaea784ff4652499e365963222481ac647bcd0251c88af646eb/rpds_py-0.28.0-cp314-cp314-win_amd64.whl", hash = "sha256:66e6fa8e075b58946e76a78e69e1a124a21d9a48a5b4766d15ba5b06869d1fa1", size = 218876, upload-time = "2025-10-22T22:23:39.179Z" }, + { url = "https://files.pythonhosted.org/packages/0f/e0/3ab3b86ded7bb18478392dc3e835f7b754cd446f62f3fc96f4fe2aca78f6/rpds_py-0.28.0-cp314-cp314-win_arm64.whl", hash = "sha256:a6fe887c2c5c59413353b7c0caff25d0e566623501ccfff88957fa438a69377d", size = 212506, upload-time = "2025-10-22T22:23:40.755Z" }, + { url = "https://files.pythonhosted.org/packages/51/ec/d5681bb425226c3501eab50fc30e9d275de20c131869322c8a1729c7b61c/rpds_py-0.28.0-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:7a69df082db13c7070f7b8b1f155fa9e687f1d6aefb7b0e3f7231653b79a067b", size = 355433, upload-time = "2025-10-22T22:23:42.259Z" }, + { url = "https://files.pythonhosted.org/packages/be/ec/568c5e689e1cfb1ea8b875cffea3649260955f677fdd7ddc6176902d04cd/rpds_py-0.28.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b1cde22f2c30ebb049a9e74c5374994157b9b70a16147d332f89c99c5960737a", size = 342601, upload-time = "2025-10-22T22:23:44.372Z" }, + { url = "https://files.pythonhosted.org/packages/32/fe/51ada84d1d2a1d9d8f2c902cfddd0133b4a5eb543196ab5161d1c07ed2ad/rpds_py-0.28.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5338742f6ba7a51012ea470bd4dc600a8c713c0c72adaa0977a1b1f4327d6592", size = 372039, upload-time = "2025-10-22T22:23:46.025Z" }, + { url = "https://files.pythonhosted.org/packages/07/c1/60144a2f2620abade1a78e0d91b298ac2d9b91bc08864493fa00451ef06e/rpds_py-0.28.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e1460ebde1bcf6d496d80b191d854adedcc619f84ff17dc1c6d550f58c9efbba", size = 382407, upload-time = "2025-10-22T22:23:48.098Z" }, + { url = "https://files.pythonhosted.org/packages/45/ed/091a7bbdcf4038a60a461df50bc4c82a7ed6d5d5e27649aab61771c17585/rpds_py-0.28.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e3eb248f2feba84c692579257a043a7699e28a77d86c77b032c1d9fbb3f0219c", size = 518172, upload-time = "2025-10-22T22:23:50.16Z" }, + { url = "https://files.pythonhosted.org/packages/54/dd/02cc90c2fd9c2ef8016fd7813bfacd1c3a1325633ec8f244c47b449fc868/rpds_py-0.28.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd3bbba5def70b16cd1c1d7255666aad3b290fbf8d0fe7f9f91abafb73611a91", size = 399020, upload-time = "2025-10-22T22:23:51.81Z" }, + { url = "https://files.pythonhosted.org/packages/ab/81/5d98cc0329bbb911ccecd0b9e19fbf7f3a5de8094b4cda5e71013b2dd77e/rpds_py-0.28.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3114f4db69ac5a1f32e7e4d1cbbe7c8f9cf8217f78e6e002cedf2d54c2a548ed", size = 377451, upload-time = "2025-10-22T22:23:53.711Z" }, + { url = "https://files.pythonhosted.org/packages/b4/07/4d5bcd49e3dfed2d38e2dcb49ab6615f2ceb9f89f5a372c46dbdebb4e028/rpds_py-0.28.0-cp314-cp314t-manylinux_2_31_riscv64.whl", hash = "sha256:4b0cb8a906b1a0196b863d460c0222fb8ad0f34041568da5620f9799b83ccf0b", size = 390355, upload-time = "2025-10-22T22:23:55.299Z" }, + { url = "https://files.pythonhosted.org/packages/3f/79/9f14ba9010fee74e4f40bf578735cfcbb91d2e642ffd1abe429bb0b96364/rpds_py-0.28.0-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cf681ac76a60b667106141e11a92a3330890257e6f559ca995fbb5265160b56e", size = 403146, upload-time = "2025-10-22T22:23:56.929Z" }, + { url = "https://files.pythonhosted.org/packages/39/4c/f08283a82ac141331a83a40652830edd3a4a92c34e07e2bbe00baaea2f5f/rpds_py-0.28.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:1e8ee6413cfc677ce8898d9cde18cc3a60fc2ba756b0dec5b71eb6eb21c49fa1", size = 552656, upload-time = "2025-10-22T22:23:58.62Z" }, + { url = "https://files.pythonhosted.org/packages/61/47/d922fc0666f0dd8e40c33990d055f4cc6ecff6f502c2d01569dbed830f9b/rpds_py-0.28.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:b3072b16904d0b5572a15eb9d31c1954e0d3227a585fc1351aa9878729099d6c", size = 576782, upload-time = "2025-10-22T22:24:00.312Z" }, + { url = "https://files.pythonhosted.org/packages/d3/0c/5bafdd8ccf6aa9d3bfc630cfece457ff5b581af24f46a9f3590f790e3df2/rpds_py-0.28.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:b670c30fd87a6aec281c3c9896d3bae4b205fd75d79d06dc87c2503717e46092", size = 544671, upload-time = "2025-10-22T22:24:02.297Z" }, + { url = "https://files.pythonhosted.org/packages/2c/37/dcc5d8397caa924988693519069d0beea077a866128719351a4ad95e82fc/rpds_py-0.28.0-cp314-cp314t-win32.whl", hash = "sha256:8014045a15b4d2b3476f0a287fcc93d4f823472d7d1308d47884ecac9e612be3", size = 205749, upload-time = "2025-10-22T22:24:03.848Z" }, + { url = "https://files.pythonhosted.org/packages/d7/69/64d43b21a10d72b45939a28961216baeb721cc2a430f5f7c3bfa21659a53/rpds_py-0.28.0-cp314-cp314t-win_amd64.whl", hash = "sha256:7a4e59c90d9c27c561eb3160323634a9ff50b04e4f7820600a2beb0ac90db578", size = 216233, upload-time = "2025-10-22T22:24:05.471Z" }, +] + +[[package]] +name = "typing-extensions" +version = "4.15.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/72/94/1a15dd82efb362ac84269196e94cf00f187f7ed21c242792a923cdb1c61f/typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466", size = 109391, upload-time = "2025-08-25T13:49:26.313Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" }, +] + +[[package]] +name = "typing-inspection" +version = "0.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/55/e3/70399cb7dd41c10ac53367ae42139cf4b1ca5f36bb3dc6c9d33acdb43655/typing_inspection-0.4.2.tar.gz", hash = "sha256:ba561c48a67c5958007083d386c3295464928b01faa735ab8547c5692e87f464", size = 75949, upload-time = "2025-10-01T02:14:41.687Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/dc/9b/47798a6c91d8bdb567fe2698fe81e0c6b7cb7ef4d13da4114b41d239f65d/typing_inspection-0.4.2-py3-none-any.whl", hash = "sha256:4ed1cacbdc298c220f1bd249ed5287caa16f34d44ef4e9c3d0cbad5b521545e7", size = 14611, upload-time = "2025-10-01T02:14:40.154Z" }, +] + +[[package]] +name = "urllib3" +version = "2.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/15/22/9ee70a2574a4f4599c47dd506532914ce044817c7752a79b6a51286319bc/urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760", size = 393185, upload-time = "2025-06-18T14:07:41.644Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a7/c2/fe1e52489ae3122415c51f387e221dd0773709bad6c6cdaa599e8a2c5185/urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc", size = 129795, upload-time = "2025-06-18T14:07:40.39Z" }, +] + +[[package]] +name = "zipp" +version = "3.23.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e3/02/0f2892c661036d50ede074e376733dca2ae7c6eb617489437771209d4180/zipp-3.23.0.tar.gz", hash = "sha256:a07157588a12518c9d4034df3fbbee09c814741a33ff63c05fa29d26a2404166", size = 25547, upload-time = "2025-06-08T17:06:39.4Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2e/54/647ade08bf0db230bfea292f893923872fd20be6ac6f53b2b936ba839d75/zipp-3.23.0-py3-none-any.whl", hash = "sha256:071652d6115ed432f5ce1d34c336c0adfd6a884660d1e9712a256d3d3bd4b14e", size = 10276, upload-time = "2025-06-08T17:06:38.034Z" }, +] diff --git a/src/llama-stack-api/llama_stack_api/vector_io.py b/src/llama_stack_api/vector_io.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/vector_io.py rename to src/llama_stack_api/vector_io.py diff --git a/src/llama-stack-api/llama_stack_api/vector_stores.py b/src/llama_stack_api/vector_stores.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/vector_stores.py rename to src/llama_stack_api/vector_stores.py diff --git a/src/llama-stack-api/llama_stack_api/version.py b/src/llama_stack_api/version.py similarity index 100% rename from src/llama-stack-api/llama_stack_api/version.py rename to src/llama_stack_api/version.py diff --git a/tests/integration/batches/conftest.py b/tests/integration/batches/conftest.py index b9c0ac916..4dc5b7993 100644 --- a/tests/integration/batches/conftest.py +++ b/tests/integration/batches/conftest.py @@ -13,6 +13,7 @@ from contextlib import contextmanager from io import BytesIO import pytest + from llama_stack_api import OpenAIFilePurpose diff --git a/tests/integration/files/test_files.py b/tests/integration/files/test_files.py index 61878ac4c..1f19c88c5 100644 --- a/tests/integration/files/test_files.py +++ b/tests/integration/files/test_files.py @@ -9,9 +9,9 @@ from unittest.mock import patch import pytest import requests -from llama_stack_api import OpenAIFilePurpose from llama_stack.core.datatypes import User +from llama_stack_api import OpenAIFilePurpose purpose = OpenAIFilePurpose.ASSISTANTS diff --git a/tests/integration/inference/test_provider_data_routing.py b/tests/integration/inference/test_provider_data_routing.py index d007b57d6..e4a0a24b5 100644 --- a/tests/integration/inference/test_provider_data_routing.py +++ b/tests/integration/inference/test_provider_data_routing.py @@ -15,6 +15,9 @@ that enables routing based on provider_data alone. from unittest.mock import AsyncMock, patch import pytest + +from llama_stack.core.library_client import LlamaStackAsLibraryClient +from llama_stack.core.telemetry.telemetry import MetricEvent from llama_stack_api import ( Api, OpenAIAssistantMessageParam, @@ -23,9 +26,6 @@ from llama_stack_api import ( OpenAIChoice, ) -from llama_stack.core.library_client import LlamaStackAsLibraryClient -from llama_stack.core.telemetry.telemetry import MetricEvent - class OpenAIChatCompletionWithMetrics(OpenAIChatCompletion): metrics: list[MetricEvent] | None = None diff --git a/tests/integration/post_training/test_post_training.py b/tests/integration/post_training/test_post_training.py index ff6925b58..e6868019a 100644 --- a/tests/integration/post_training/test_post_training.py +++ b/tests/integration/post_training/test_post_training.py @@ -9,6 +9,8 @@ import time import uuid import pytest + +from llama_stack.log import get_logger from llama_stack_api import ( DataConfig, DatasetFormat, @@ -18,8 +20,6 @@ from llama_stack_api import ( TrainingConfig, ) -from llama_stack.log import get_logger - # Configure logging logger = get_logger(name=__name__, category="post_training") diff --git a/tests/integration/safety/test_llama_guard.py b/tests/integration/safety/test_llama_guard.py index 99b4982f0..a554752cd 100644 --- a/tests/integration/safety/test_llama_guard.py +++ b/tests/integration/safety/test_llama_guard.py @@ -12,9 +12,9 @@ import warnings from collections.abc import Generator import pytest -from llama_stack_api import ViolationLevel from llama_stack.models.llama.sku_types import CoreModelId +from llama_stack_api import ViolationLevel # Llama Guard models available for text and vision shields LLAMA_GUARD_TEXT_MODELS = [CoreModelId.llama_guard_4_12b.value] diff --git a/tests/integration/safety/test_safety.py b/tests/integration/safety/test_safety.py index 6a926f1d5..857ff2f81 100644 --- a/tests/integration/safety/test_safety.py +++ b/tests/integration/safety/test_safety.py @@ -7,6 +7,7 @@ import base64 import mimetypes import pytest + from llama_stack_api import ViolationLevel CODE_SCANNER_ENABLED_PROVIDERS = {"ollama", "together", "fireworks"} diff --git a/tests/integration/safety/test_vision_safety.py b/tests/integration/safety/test_vision_safety.py index b85a23263..dc7b7e1ad 100644 --- a/tests/integration/safety/test_vision_safety.py +++ b/tests/integration/safety/test_vision_safety.py @@ -9,6 +9,7 @@ import mimetypes import os import pytest + from llama_stack_api import ViolationLevel VISION_SHIELD_ENABLED_PROVIDERS = {"together"} diff --git a/tests/integration/tool_runtime/test_registration.py b/tests/integration/tool_runtime/test_registration.py index 1b1b6ef28..036a5f018 100644 --- a/tests/integration/tool_runtime/test_registration.py +++ b/tests/integration/tool_runtime/test_registration.py @@ -7,9 +7,9 @@ import re import pytest -from llama_stack_api import ToolGroupNotFoundError from llama_stack.core.library_client import LlamaStackAsLibraryClient +from llama_stack_api import ToolGroupNotFoundError from tests.common.mcp import MCP_TOOLGROUP_ID, make_mcp_server diff --git a/tests/integration/vector_io/test_openai_vector_stores.py b/tests/integration/vector_io/test_openai_vector_stores.py index c65dfecac..102f3f00c 100644 --- a/tests/integration/vector_io/test_openai_vector_stores.py +++ b/tests/integration/vector_io/test_openai_vector_stores.py @@ -8,12 +8,12 @@ import time from io import BytesIO import pytest -from llama_stack_api import Chunk, ExpiresAfter from llama_stack_client import BadRequestError from openai import BadRequestError as OpenAIBadRequestError from llama_stack.core.library_client import LlamaStackAsLibraryClient from llama_stack.log import get_logger +from llama_stack_api import Chunk, ExpiresAfter from ..conftest import vector_provider_wrapper diff --git a/tests/integration/vector_io/test_vector_io.py b/tests/integration/vector_io/test_vector_io.py index acaa44bcb..29dbd3e56 100644 --- a/tests/integration/vector_io/test_vector_io.py +++ b/tests/integration/vector_io/test_vector_io.py @@ -5,6 +5,7 @@ # the root directory of this source tree. import pytest + from llama_stack_api import Chunk from ..conftest import vector_provider_wrapper diff --git a/tests/unit/conversations/test_conversations.py b/tests/unit/conversations/test_conversations.py index 2f942eb9c..95c54d379 100644 --- a/tests/unit/conversations/test_conversations.py +++ b/tests/unit/conversations/test_conversations.py @@ -8,7 +8,6 @@ import tempfile from pathlib import Path import pytest -from llama_stack_api import OpenAIResponseInputMessageContentText, OpenAIResponseMessage from openai.types.conversations.conversation import Conversation as OpenAIConversation from openai.types.conversations.conversation_item import ConversationItem as OpenAIConversationItem from pydantic import TypeAdapter @@ -25,6 +24,7 @@ from llama_stack.core.storage.datatypes import ( StorageConfig, ) from llama_stack.providers.utils.sqlstore.sqlstore import register_sqlstore_backends +from llama_stack_api import OpenAIResponseInputMessageContentText, OpenAIResponseMessage @pytest.fixture diff --git a/tests/unit/core/routers/test_safety_router.py b/tests/unit/core/routers/test_safety_router.py index 7e465513e..1b24a59a2 100644 --- a/tests/unit/core/routers/test_safety_router.py +++ b/tests/unit/core/routers/test_safety_router.py @@ -6,10 +6,9 @@ from unittest.mock import AsyncMock -from llama_stack_api import ListShieldsResponse, ModerationObject, ModerationObjectResults, Shield - from llama_stack.core.datatypes import SafetyConfig from llama_stack.core.routers.safety import SafetyRouter +from llama_stack_api import ListShieldsResponse, ModerationObject, ModerationObjectResults, Shield async def test_run_moderation_uses_default_shield_when_model_missing(): diff --git a/tests/unit/core/routers/test_vector_io.py b/tests/unit/core/routers/test_vector_io.py index 03bc1ff5f..a6df0694b 100644 --- a/tests/unit/core/routers/test_vector_io.py +++ b/tests/unit/core/routers/test_vector_io.py @@ -7,6 +7,8 @@ from unittest.mock import AsyncMock, Mock import pytest + +from llama_stack.core.routers.vector_io import VectorIORouter from llama_stack_api import ( ModelNotFoundError, ModelType, @@ -14,8 +16,6 @@ from llama_stack_api import ( OpenAICreateVectorStoreRequestWithExtraBody, ) -from llama_stack.core.routers.vector_io import VectorIORouter - async def test_single_provider_auto_selection(): # provider_id automatically selected during vector store create() when only one provider available @@ -127,7 +127,8 @@ async def test_update_vector_store_same_provider_id_succeeds(): async def test_create_vector_store_with_unknown_embedding_model_raises_error(): - """Test that creating a vector store with an unknown embedding model raises ModelNotFoundError.""" + """Test that creating a vector store with an unknown embedding model raises + FoundError.""" mock_routing_table = Mock(impls_by_provider_id={"provider": "mock"}) mock_routing_table.get_object_by_identifier = AsyncMock(return_value=None) diff --git a/tests/unit/core/test_stack_validation.py b/tests/unit/core/test_stack_validation.py index acb31e1c9..462a25c8b 100644 --- a/tests/unit/core/test_stack_validation.py +++ b/tests/unit/core/test_stack_validation.py @@ -9,10 +9,10 @@ from unittest.mock import AsyncMock import pytest -from llama_stack_api import Api, ListModelsResponse, ListShieldsResponse, Model, ModelType, Shield from llama_stack.core.datatypes import QualifiedModel, SafetyConfig, StackRunConfig, StorageConfig, VectorStoresConfig from llama_stack.core.stack import validate_safety_config, validate_vector_stores_config +from llama_stack_api import Api, ListModelsResponse, ListShieldsResponse, Model, ModelType, Shield class TestVectorStoresValidation: diff --git a/tests/unit/distribution/routers/test_routing_tables.py b/tests/unit/distribution/routers/test_routing_tables.py index 2405d536e..8fd9d6ec3 100644 --- a/tests/unit/distribution/routers/test_routing_tables.py +++ b/tests/unit/distribution/routers/test_routing_tables.py @@ -9,6 +9,14 @@ from unittest.mock import AsyncMock import pytest + +from llama_stack.core.datatypes import RegistryEntrySource +from llama_stack.core.routing_tables.benchmarks import BenchmarksRoutingTable +from llama_stack.core.routing_tables.datasets import DatasetsRoutingTable +from llama_stack.core.routing_tables.models import ModelsRoutingTable +from llama_stack.core.routing_tables.scoring_functions import ScoringFunctionsRoutingTable +from llama_stack.core.routing_tables.shields import ShieldsRoutingTable +from llama_stack.core.routing_tables.toolgroups import ToolGroupsRoutingTable from llama_stack_api import ( URL, Api, @@ -25,14 +33,6 @@ from llama_stack_api import ( URIDataSource, ) -from llama_stack.core.datatypes import RegistryEntrySource -from llama_stack.core.routing_tables.benchmarks import BenchmarksRoutingTable -from llama_stack.core.routing_tables.datasets import DatasetsRoutingTable -from llama_stack.core.routing_tables.models import ModelsRoutingTable -from llama_stack.core.routing_tables.scoring_functions import ScoringFunctionsRoutingTable -from llama_stack.core.routing_tables.shields import ShieldsRoutingTable -from llama_stack.core.routing_tables.toolgroups import ToolGroupsRoutingTable - class Impl: def __init__(self, api: Api): diff --git a/tests/unit/distribution/test_api_recordings.py b/tests/unit/distribution/test_api_recordings.py index f66b57df8..889f063e6 100644 --- a/tests/unit/distribution/test_api_recordings.py +++ b/tests/unit/distribution/test_api_recordings.py @@ -9,6 +9,14 @@ from pathlib import Path from unittest.mock import patch import pytest +from openai import AsyncOpenAI + +from llama_stack.testing.api_recorder import ( + APIRecordingMode, + ResponseStorage, + api_recording, + normalize_inference_request, +) # Import the real Pydantic response types instead of using Mocks from llama_stack_api import ( @@ -19,14 +27,6 @@ from llama_stack_api import ( OpenAIEmbeddingsResponse, OpenAIEmbeddingUsage, ) -from openai import AsyncOpenAI - -from llama_stack.testing.api_recorder import ( - APIRecordingMode, - ResponseStorage, - api_recording, - normalize_inference_request, -) @pytest.fixture diff --git a/tests/unit/distribution/test_distribution.py b/tests/unit/distribution/test_distribution.py index a27455e24..b8d6ba55d 100644 --- a/tests/unit/distribution/test_distribution.py +++ b/tests/unit/distribution/test_distribution.py @@ -9,7 +9,6 @@ from unittest.mock import patch import pytest import yaml -from llama_stack_api import ProviderSpec from pydantic import BaseModel, Field, ValidationError from llama_stack.core.datatypes import Api, Provider, StackRunConfig @@ -23,6 +22,7 @@ from llama_stack.core.storage.datatypes import ( SqlStoreReference, StorageConfig, ) +from llama_stack_api import ProviderSpec class SampleConfig(BaseModel): @@ -395,9 +395,8 @@ pip_packages: def test_external_provider_from_module_building(self, mock_providers): """Test loading an external provider from a module during build (building=True, partial spec).""" - from llama_stack_api import Api - from llama_stack.core.datatypes import BuildConfig, BuildProvider, DistributionSpec + from llama_stack_api import Api # No importlib patch needed, should not import module when type of `config` is BuildConfig or DistributionSpec build_config = BuildConfig( @@ -457,9 +456,8 @@ class TestGetExternalProvidersFromModule: """Test provider with module containing version spec (e.g., package==1.0.0).""" from types import SimpleNamespace - from llama_stack_api import ProviderSpec - from llama_stack.core.distribution import get_external_providers_from_module + from llama_stack_api import ProviderSpec fake_spec = ProviderSpec( api=Api.inference, @@ -595,9 +593,8 @@ class TestGetExternalProvidersFromModule: """Test when get_provider_spec returns a list of specs.""" from types import SimpleNamespace - from llama_stack_api import ProviderSpec - from llama_stack.core.distribution import get_external_providers_from_module + from llama_stack_api import ProviderSpec spec1 = ProviderSpec( api=Api.inference, @@ -644,9 +641,8 @@ class TestGetExternalProvidersFromModule: """Test that list return filters specs by provider_type.""" from types import SimpleNamespace - from llama_stack_api import ProviderSpec - from llama_stack.core.distribution import get_external_providers_from_module + from llama_stack_api import ProviderSpec spec1 = ProviderSpec( api=Api.inference, @@ -693,9 +689,8 @@ class TestGetExternalProvidersFromModule: """Test that list return adds multiple different provider_types when config requests them.""" from types import SimpleNamespace - from llama_stack_api import ProviderSpec - from llama_stack.core.distribution import get_external_providers_from_module + from llama_stack_api import ProviderSpec # Module returns both inline and remote variants spec1 = ProviderSpec( @@ -833,9 +828,8 @@ class TestGetExternalProvidersFromModule: """Test multiple APIs with providers.""" from types import SimpleNamespace - from llama_stack_api import ProviderSpec - from llama_stack.core.distribution import get_external_providers_from_module + from llama_stack_api import ProviderSpec inference_spec = ProviderSpec( api=Api.inference, diff --git a/tests/unit/files/test_files.py b/tests/unit/files/test_files.py index 080d1ddbe..793f4edd3 100644 --- a/tests/unit/files/test_files.py +++ b/tests/unit/files/test_files.py @@ -6,7 +6,6 @@ import pytest -from llama_stack_api import OpenAIFilePurpose, Order, ResourceNotFoundError from llama_stack.core.access_control.access_control import default_policy from llama_stack.core.storage.datatypes import SqliteSqlStoreConfig, SqlStoreReference @@ -15,6 +14,7 @@ from llama_stack.providers.inline.files.localfs import ( LocalfsFilesImplConfig, ) from llama_stack.providers.utils.sqlstore.sqlstore import register_sqlstore_backends +from llama_stack_api import OpenAIFilePurpose, Order, ResourceNotFoundError class MockUploadFile: diff --git a/tests/unit/providers/batches/test_reference.py b/tests/unit/providers/batches/test_reference.py index 3c93a578d..32d59234d 100644 --- a/tests/unit/providers/batches/test_reference.py +++ b/tests/unit/providers/batches/test_reference.py @@ -58,6 +58,7 @@ import json from unittest.mock import AsyncMock, MagicMock import pytest + from llama_stack_api import BatchObject, ConflictError, ResourceNotFoundError diff --git a/tests/unit/providers/batches/test_reference_idempotency.py b/tests/unit/providers/batches/test_reference_idempotency.py index 4cd5d962d..acb7ca01c 100644 --- a/tests/unit/providers/batches/test_reference_idempotency.py +++ b/tests/unit/providers/batches/test_reference_idempotency.py @@ -43,6 +43,7 @@ Key Behaviors Tested: import asyncio import pytest + from llama_stack_api import ConflictError diff --git a/tests/unit/providers/files/test_s3_files.py b/tests/unit/providers/files/test_s3_files.py index ae63c1a78..de6c92e9c 100644 --- a/tests/unit/providers/files/test_s3_files.py +++ b/tests/unit/providers/files/test_s3_files.py @@ -8,6 +8,7 @@ from unittest.mock import patch import pytest from botocore.exceptions import ClientError + from llama_stack_api import OpenAIFilePurpose, ResourceNotFoundError diff --git a/tests/unit/providers/files/test_s3_files_auth.py b/tests/unit/providers/files/test_s3_files_auth.py index 873db4e27..e113611bd 100644 --- a/tests/unit/providers/files/test_s3_files_auth.py +++ b/tests/unit/providers/files/test_s3_files_auth.py @@ -7,10 +7,10 @@ from unittest.mock import patch import pytest -from llama_stack_api import OpenAIFilePurpose, ResourceNotFoundError from llama_stack.core.datatypes import User from llama_stack.providers.remote.files.s3.files import S3FilesImpl +from llama_stack_api import OpenAIFilePurpose, ResourceNotFoundError async def test_listing_hides_other_users_file(s3_provider, sample_text_file): diff --git a/tests/unit/providers/inference/test_bedrock_adapter.py b/tests/unit/providers/inference/test_bedrock_adapter.py index b3eecc558..a20f2860a 100644 --- a/tests/unit/providers/inference/test_bedrock_adapter.py +++ b/tests/unit/providers/inference/test_bedrock_adapter.py @@ -8,11 +8,11 @@ from types import SimpleNamespace from unittest.mock import AsyncMock, MagicMock import pytest -from llama_stack_api import OpenAIChatCompletionRequestWithExtraBody from openai import AuthenticationError from llama_stack.providers.remote.inference.bedrock.bedrock import BedrockInferenceAdapter from llama_stack.providers.remote.inference.bedrock.config import BedrockConfig +from llama_stack_api import OpenAIChatCompletionRequestWithExtraBody def test_adapter_initialization(): diff --git a/tests/unit/providers/inference/test_remote_vllm.py b/tests/unit/providers/inference/test_remote_vllm.py index e2a5455b7..958895cc4 100644 --- a/tests/unit/providers/inference/test_remote_vllm.py +++ b/tests/unit/providers/inference/test_remote_vllm.py @@ -9,6 +9,11 @@ import time from unittest.mock import AsyncMock, MagicMock, PropertyMock, patch import pytest + +from llama_stack.core.routers.inference import InferenceRouter +from llama_stack.core.routing_tables.models import ModelsRoutingTable +from llama_stack.providers.remote.inference.vllm.config import VLLMInferenceAdapterConfig +from llama_stack.providers.remote.inference.vllm.vllm import VLLMInferenceAdapter from llama_stack_api import ( HealthStatus, Model, @@ -22,11 +27,6 @@ from llama_stack_api import ( ToolChoice, ) -from llama_stack.core.routers.inference import InferenceRouter -from llama_stack.core.routing_tables.models import ModelsRoutingTable -from llama_stack.providers.remote.inference.vllm.config import VLLMInferenceAdapterConfig -from llama_stack.providers.remote.inference.vllm.vllm import VLLMInferenceAdapter - # These are unit test for the remote vllm provider # implementation. This should only contain tests which are specific to # the implementation details of those classes. More general diff --git a/tests/unit/providers/inline/agents/meta_reference/responses/test_streaming.py b/tests/unit/providers/inline/agents/meta_reference/responses/test_streaming.py index 36d2b86a9..658132340 100644 --- a/tests/unit/providers/inline/agents/meta_reference/responses/test_streaming.py +++ b/tests/unit/providers/inline/agents/meta_reference/responses/test_streaming.py @@ -7,12 +7,12 @@ from unittest.mock import AsyncMock import pytest -from llama_stack_api import ToolDef from llama_stack.providers.inline.agents.meta_reference.responses.streaming import ( convert_tooldef_to_chat_tool, ) from llama_stack.providers.inline.agents.meta_reference.responses.types import ChatCompletionContext +from llama_stack_api import ToolDef @pytest.fixture diff --git a/tests/unit/providers/nvidia/test_datastore.py b/tests/unit/providers/nvidia/test_datastore.py index 0d9f1cc35..36006cc39 100644 --- a/tests/unit/providers/nvidia/test_datastore.py +++ b/tests/unit/providers/nvidia/test_datastore.py @@ -8,10 +8,10 @@ import os from unittest.mock import patch import pytest -from llama_stack_api import Dataset, DatasetPurpose, ResourceType, URIDataSource from llama_stack.providers.remote.datasetio.nvidia.config import NvidiaDatasetIOConfig from llama_stack.providers.remote.datasetio.nvidia.datasetio import NvidiaDatasetIOAdapter +from llama_stack_api import Dataset, DatasetPurpose, ResourceType, URIDataSource @pytest.fixture diff --git a/tests/unit/providers/nvidia/test_eval.py b/tests/unit/providers/nvidia/test_eval.py index c41379801..783d664bf 100644 --- a/tests/unit/providers/nvidia/test_eval.py +++ b/tests/unit/providers/nvidia/test_eval.py @@ -8,6 +8,10 @@ import os from unittest.mock import MagicMock, patch import pytest + +from llama_stack.models.llama.sku_types import CoreModelId +from llama_stack.providers.remote.eval.nvidia.config import NVIDIAEvalConfig +from llama_stack.providers.remote.eval.nvidia.eval import NVIDIAEvalImpl from llama_stack_api import ( Benchmark, BenchmarkConfig, @@ -20,10 +24,6 @@ from llama_stack_api import ( TopPSamplingStrategy, ) -from llama_stack.models.llama.sku_types import CoreModelId -from llama_stack.providers.remote.eval.nvidia.config import NVIDIAEvalConfig -from llama_stack.providers.remote.eval.nvidia.eval import NVIDIAEvalImpl - MOCK_DATASET_ID = "default/test-dataset" MOCK_BENCHMARK_ID = "test-benchmark" diff --git a/tests/unit/providers/nvidia/test_parameters.py b/tests/unit/providers/nvidia/test_parameters.py index ba68a7abe..b714fc607 100644 --- a/tests/unit/providers/nvidia/test_parameters.py +++ b/tests/unit/providers/nvidia/test_parameters.py @@ -9,6 +9,12 @@ import warnings from unittest.mock import patch import pytest + +from llama_stack.core.library_client import convert_pydantic_to_json_value +from llama_stack.providers.remote.post_training.nvidia.post_training import ( + NvidiaPostTrainingAdapter, + NvidiaPostTrainingConfig, +) from llama_stack_api import ( DataConfig, DatasetFormat, @@ -19,12 +25,6 @@ from llama_stack_api import ( TrainingConfig, ) -from llama_stack.core.library_client import convert_pydantic_to_json_value -from llama_stack.providers.remote.post_training.nvidia.post_training import ( - NvidiaPostTrainingAdapter, - NvidiaPostTrainingConfig, -) - class TestNvidiaParameters: @pytest.fixture(autouse=True) diff --git a/tests/unit/providers/nvidia/test_rerank_inference.py b/tests/unit/providers/nvidia/test_rerank_inference.py index 8b313abcd..ee62910b8 100644 --- a/tests/unit/providers/nvidia/test_rerank_inference.py +++ b/tests/unit/providers/nvidia/test_rerank_inference.py @@ -8,11 +8,11 @@ from unittest.mock import AsyncMock, MagicMock, patch import aiohttp import pytest -from llama_stack_api import ModelType from llama_stack.providers.remote.inference.nvidia.config import NVIDIAConfig from llama_stack.providers.remote.inference.nvidia.nvidia import NVIDIAInferenceAdapter from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin +from llama_stack_api import ModelType class MockResponse: diff --git a/tests/unit/providers/nvidia/test_safety.py b/tests/unit/providers/nvidia/test_safety.py index ea6254841..07e04ddea 100644 --- a/tests/unit/providers/nvidia/test_safety.py +++ b/tests/unit/providers/nvidia/test_safety.py @@ -9,6 +9,9 @@ from typing import Any from unittest.mock import AsyncMock, MagicMock, patch import pytest + +from llama_stack.providers.remote.safety.nvidia.config import NVIDIASafetyConfig +from llama_stack.providers.remote.safety.nvidia.nvidia import NVIDIASafetyAdapter from llama_stack_api import ( OpenAIAssistantMessageParam, OpenAIUserMessageParam, @@ -18,9 +21,6 @@ from llama_stack_api import ( ViolationLevel, ) -from llama_stack.providers.remote.safety.nvidia.config import NVIDIASafetyConfig -from llama_stack.providers.remote.safety.nvidia.nvidia import NVIDIASafetyAdapter - class FakeNVIDIASafetyAdapter(NVIDIASafetyAdapter): """Test implementation that provides the required shield_store.""" diff --git a/tests/unit/providers/nvidia/test_supervised_fine_tuning.py b/tests/unit/providers/nvidia/test_supervised_fine_tuning.py index 4d0ce695b..94948da41 100644 --- a/tests/unit/providers/nvidia/test_supervised_fine_tuning.py +++ b/tests/unit/providers/nvidia/test_supervised_fine_tuning.py @@ -9,15 +9,6 @@ import warnings from unittest.mock import patch import pytest -from llama_stack_api import ( - DataConfig, - DatasetFormat, - LoraFinetuningConfig, - OptimizerConfig, - OptimizerType, - QATFinetuningConfig, - TrainingConfig, -) from llama_stack.core.library_client import convert_pydantic_to_json_value from llama_stack.providers.remote.post_training.nvidia.post_training import ( @@ -27,6 +18,15 @@ from llama_stack.providers.remote.post_training.nvidia.post_training import ( NvidiaPostTrainingJob, NvidiaPostTrainingJobStatusResponse, ) +from llama_stack_api import ( + DataConfig, + DatasetFormat, + LoraFinetuningConfig, + OptimizerConfig, + OptimizerType, + QATFinetuningConfig, + TrainingConfig, +) @pytest.fixture diff --git a/tests/unit/providers/test_bedrock.py b/tests/unit/providers/test_bedrock.py index df7453712..7126e1b69 100644 --- a/tests/unit/providers/test_bedrock.py +++ b/tests/unit/providers/test_bedrock.py @@ -7,10 +7,9 @@ from types import SimpleNamespace from unittest.mock import AsyncMock, PropertyMock, patch -from llama_stack_api import OpenAIChatCompletionRequestWithExtraBody - from llama_stack.providers.remote.inference.bedrock.bedrock import BedrockInferenceAdapter from llama_stack.providers.remote.inference.bedrock.config import BedrockConfig +from llama_stack_api import OpenAIChatCompletionRequestWithExtraBody def test_can_create_adapter(): diff --git a/tests/unit/providers/utils/inference/test_openai_mixin.py b/tests/unit/providers/utils/inference/test_openai_mixin.py index b9b59bb79..5b13a75f4 100644 --- a/tests/unit/providers/utils/inference/test_openai_mixin.py +++ b/tests/unit/providers/utils/inference/test_openai_mixin.py @@ -10,12 +10,12 @@ from typing import Any from unittest.mock import AsyncMock, MagicMock, Mock, PropertyMock, patch import pytest -from llama_stack_api import Model, ModelType, OpenAIChatCompletionRequestWithExtraBody, OpenAIUserMessageParam from pydantic import BaseModel, Field from llama_stack.core.request_headers import request_provider_data_context from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin +from llama_stack_api import Model, ModelType, OpenAIChatCompletionRequestWithExtraBody, OpenAIUserMessageParam class OpenAIMixinImpl(OpenAIMixin): diff --git a/tests/unit/providers/utils/inference/test_prompt_adapter.py b/tests/unit/providers/utils/inference/test_prompt_adapter.py index a7c9289d7..ab5736ac5 100644 --- a/tests/unit/providers/utils/inference/test_prompt_adapter.py +++ b/tests/unit/providers/utils/inference/test_prompt_adapter.py @@ -4,12 +4,11 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. -from llama_stack_api import OpenAIAssistantMessageParam, OpenAIUserMessageParam - from llama_stack.models.llama.datatypes import RawTextItem from llama_stack.providers.utils.inference.prompt_adapter import ( convert_openai_message_to_raw_message, ) +from llama_stack_api import OpenAIAssistantMessageParam, OpenAIUserMessageParam class TestConvertOpenAIMessageToRawMessage: diff --git a/tests/unit/providers/utils/memory/test_vector_store.py b/tests/unit/providers/utils/memory/test_vector_store.py index 00db5795a..f3241ba20 100644 --- a/tests/unit/providers/utils/memory/test_vector_store.py +++ b/tests/unit/providers/utils/memory/test_vector_store.py @@ -7,9 +7,9 @@ from unittest.mock import AsyncMock, MagicMock, patch import pytest -from llama_stack_api import URL, RAGDocument, TextContentItem from llama_stack.providers.utils.memory.vector_store import content_from_data_and_mime_type, content_from_doc +from llama_stack_api import URL, RAGDocument, TextContentItem async def test_content_from_doc_with_url(): diff --git a/tests/unit/providers/utils/test_model_registry.py b/tests/unit/providers/utils/test_model_registry.py index 4a85cf8b8..1e3efafa1 100644 --- a/tests/unit/providers/utils/test_model_registry.py +++ b/tests/unit/providers/utils/test_model_registry.py @@ -34,9 +34,9 @@ # import pytest -from llama_stack_api import Model from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper, ProviderModelEntry +from llama_stack_api import Model @pytest.fixture diff --git a/tests/unit/providers/vector_io/conftest.py b/tests/unit/providers/vector_io/conftest.py index 216e9b8ea..6408e25ab 100644 --- a/tests/unit/providers/vector_io/conftest.py +++ b/tests/unit/providers/vector_io/conftest.py @@ -9,7 +9,6 @@ from unittest.mock import AsyncMock, MagicMock, patch import numpy as np import pytest -from llama_stack_api import Chunk, ChunkMetadata, QueryChunksResponse, VectorStore from llama_stack.core.storage.datatypes import KVStoreReference, SqliteKVStoreConfig from llama_stack.providers.inline.vector_io.faiss.config import FaissVectorIOConfig @@ -19,6 +18,7 @@ from llama_stack.providers.inline.vector_io.sqlite_vec.sqlite_vec import SQLiteV from llama_stack.providers.remote.vector_io.pgvector.config import PGVectorVectorIOConfig from llama_stack.providers.remote.vector_io.pgvector.pgvector import PGVectorIndex, PGVectorVectorIOAdapter from llama_stack.providers.utils.kvstore import register_kvstore_backends +from llama_stack_api import Chunk, ChunkMetadata, QueryChunksResponse, VectorStore EMBEDDING_DIMENSION = 768 COLLECTION_PREFIX = "test_collection" diff --git a/tests/unit/providers/vector_io/test_faiss.py b/tests/unit/providers/vector_io/test_faiss.py index 0d5c1399f..075296cbb 100644 --- a/tests/unit/providers/vector_io/test_faiss.py +++ b/tests/unit/providers/vector_io/test_faiss.py @@ -9,13 +9,13 @@ from unittest.mock import MagicMock, patch import numpy as np import pytest -from llama_stack_api import Chunk, Files, HealthStatus, QueryChunksResponse, VectorStore from llama_stack.providers.inline.vector_io.faiss.config import FaissVectorIOConfig from llama_stack.providers.inline.vector_io.faiss.faiss import ( FaissIndex, FaissVectorIOAdapter, ) +from llama_stack_api import Chunk, Files, HealthStatus, QueryChunksResponse, VectorStore # This test is a unit test for the FaissVectorIOAdapter class. This should only contain # tests which are specific to this class. More general (API-level) tests should be placed in diff --git a/tests/unit/providers/vector_io/test_sqlite_vec.py b/tests/unit/providers/vector_io/test_sqlite_vec.py index 17a99ce1c..d1548cf37 100644 --- a/tests/unit/providers/vector_io/test_sqlite_vec.py +++ b/tests/unit/providers/vector_io/test_sqlite_vec.py @@ -8,13 +8,13 @@ import asyncio import numpy as np import pytest -from llama_stack_api import Chunk, QueryChunksResponse from llama_stack.providers.inline.vector_io.sqlite_vec.sqlite_vec import ( SQLiteVecIndex, SQLiteVecVectorIOAdapter, _create_sqlite_connection, ) +from llama_stack_api import Chunk, QueryChunksResponse # This test is a unit test for the SQLiteVecVectorIOAdapter class. This should only contain # tests which are specific to this class. More general (API-level) tests should be placed in diff --git a/tests/unit/providers/vector_io/test_vector_io_openai_vector_stores.py b/tests/unit/providers/vector_io/test_vector_io_openai_vector_stores.py index 7ba40eefb..3797abb2c 100644 --- a/tests/unit/providers/vector_io/test_vector_io_openai_vector_stores.py +++ b/tests/unit/providers/vector_io/test_vector_io_openai_vector_stores.py @@ -10,6 +10,8 @@ from unittest.mock import AsyncMock, patch import numpy as np import pytest + +from llama_stack.providers.inline.vector_io.sqlite_vec.sqlite_vec import VECTOR_DBS_PREFIX from llama_stack_api import ( Chunk, OpenAICreateVectorStoreFileBatchRequestWithExtraBody, @@ -21,8 +23,6 @@ from llama_stack_api import ( VectorStoreNotFoundError, ) -from llama_stack.providers.inline.vector_io.sqlite_vec.sqlite_vec import VECTOR_DBS_PREFIX - # This test is a unit test for the inline VectorIO providers. This should only contain # tests which are specific to this class. More general (API-level) tests should be placed in # tests/integration/vector_io/ @@ -255,10 +255,9 @@ async def test_insert_chunks_with_missing_document_id(vector_io_adapter): async def test_document_id_with_invalid_type_raises_error(): """Ensure TypeError is raised when document_id is not a string.""" - from llama_stack_api import Chunk - # Integer document_id should raise TypeError from llama_stack.providers.utils.vector_io.vector_utils import generate_chunk_id + from llama_stack_api import Chunk chunk = Chunk(content="test", chunk_id=generate_chunk_id("test", "test"), metadata={"document_id": 12345}) with pytest.raises(TypeError) as exc_info: diff --git a/tests/unit/providers/vector_io/test_vector_utils.py b/tests/unit/providers/vector_io/test_vector_utils.py index 678b76fbd..7f6b4af79 100644 --- a/tests/unit/providers/vector_io/test_vector_utils.py +++ b/tests/unit/providers/vector_io/test_vector_utils.py @@ -4,9 +4,8 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. -from llama_stack_api import Chunk, ChunkMetadata - from llama_stack.providers.utils.vector_io.vector_utils import generate_chunk_id +from llama_stack_api import Chunk, ChunkMetadata # This test is a unit test for the chunk_utils.py helpers. This should only contain # tests which are specific to this file. More general (API-level) tests should be placed in diff --git a/tests/unit/rag/test_rag_query.py b/tests/unit/rag/test_rag_query.py index e3f5e46d7..7eb17b74b 100644 --- a/tests/unit/rag/test_rag_query.py +++ b/tests/unit/rag/test_rag_query.py @@ -7,9 +7,9 @@ from unittest.mock import AsyncMock, MagicMock import pytest -from llama_stack_api import Chunk, ChunkMetadata, QueryChunksResponse, RAGQueryConfig from llama_stack.providers.inline.tool_runtime.rag.memory import MemoryToolRuntimeImpl +from llama_stack_api import Chunk, ChunkMetadata, QueryChunksResponse, RAGQueryConfig class TestRagQuery: diff --git a/tests/unit/rag/test_vector_store.py b/tests/unit/rag/test_vector_store.py index 23c12dcab..2562df8d6 100644 --- a/tests/unit/rag/test_vector_store.py +++ b/tests/unit/rag/test_vector_store.py @@ -12,7 +12,6 @@ from unittest.mock import AsyncMock, MagicMock import numpy as np import pytest -from llama_stack_api import Chunk, OpenAIEmbeddingData, OpenAIEmbeddingsRequestWithExtraBody, RAGDocument from llama_stack.providers.utils.memory.vector_store import ( URL, @@ -22,6 +21,7 @@ from llama_stack.providers.utils.memory.vector_store import ( make_overlapped_chunks, ) from llama_stack.providers.utils.vector_io.vector_utils import generate_chunk_id +from llama_stack_api import Chunk, OpenAIEmbeddingData, OpenAIEmbeddingsRequestWithExtraBody, RAGDocument DUMMY_PDF_PATH = Path(os.path.abspath(__file__)).parent / "fixtures" / "dummy.pdf" # Depending on the machine, this can get parsed a couple of ways diff --git a/tests/unit/registry/test_registry.py b/tests/unit/registry/test_registry.py index 01f486ab2..1b5032782 100644 --- a/tests/unit/registry/test_registry.py +++ b/tests/unit/registry/test_registry.py @@ -6,7 +6,6 @@ import pytest -from llama_stack_api import Model, VectorStore from llama_stack.core.datatypes import VectorStoreWithOwner from llama_stack.core.storage.datatypes import KVStoreReference, SqliteKVStoreConfig @@ -16,6 +15,7 @@ from llama_stack.core.store.registry import ( DiskDistributionRegistry, ) from llama_stack.providers.utils.kvstore import kvstore_impl, register_kvstore_backends +from llama_stack_api import Model, VectorStore @pytest.fixture @@ -303,9 +303,8 @@ async def test_double_registration_different_objects(disk_dist_registry): async def test_double_registration_with_cache(cached_disk_dist_registry): """Test double registration behavior with caching enabled.""" - from llama_stack_api import ModelType - from llama_stack.core.datatypes import ModelWithOwner + from llama_stack_api import ModelType model1 = ModelWithOwner( identifier="test_model", diff --git a/tests/unit/registry/test_registry_acl.py b/tests/unit/registry/test_registry_acl.py index 2827f60b9..a09d2a30d 100644 --- a/tests/unit/registry/test_registry_acl.py +++ b/tests/unit/registry/test_registry_acl.py @@ -5,10 +5,9 @@ # the root directory of this source tree. -from llama_stack_api import ModelType - from llama_stack.core.datatypes import ModelWithOwner, User from llama_stack.core.store.registry import CachedDiskDistributionRegistry +from llama_stack_api import ModelType async def test_registry_cache_with_acl(cached_disk_dist_registry): diff --git a/tests/unit/server/test_access_control.py b/tests/unit/server/test_access_control.py index 1df933d4d..23a9636d5 100644 --- a/tests/unit/server/test_access_control.py +++ b/tests/unit/server/test_access_control.py @@ -8,12 +8,12 @@ from unittest.mock import MagicMock, Mock, patch import pytest import yaml -from llama_stack_api import Api, ModelType from pydantic import TypeAdapter, ValidationError from llama_stack.core.access_control.access_control import AccessDeniedError, is_action_allowed from llama_stack.core.datatypes import AccessRule, ModelWithOwner, User from llama_stack.core.routing_tables.models import ModelsRoutingTable +from llama_stack_api import Api, ModelType class AsyncMock(MagicMock): diff --git a/tests/unit/server/test_resolver.py b/tests/unit/server/test_resolver.py index 071178f96..8f8a61ea7 100644 --- a/tests/unit/server/test_resolver.py +++ b/tests/unit/server/test_resolver.py @@ -9,7 +9,6 @@ import sys from typing import Any, Protocol from unittest.mock import AsyncMock, MagicMock -from llama_stack_api import Inference, InlineProviderSpec, ProviderSpec from pydantic import BaseModel, Field from llama_stack.core.datatypes import Api, Provider, StackRunConfig @@ -27,6 +26,7 @@ from llama_stack.core.storage.datatypes import ( ) from llama_stack.providers.utils.kvstore import register_kvstore_backends from llama_stack.providers.utils.sqlstore.sqlstore import register_sqlstore_backends +from llama_stack_api import Inference, InlineProviderSpec, ProviderSpec def add_protocol_methods(cls: type, protocol: type[Protocol]) -> None: diff --git a/tests/unit/server/test_sse.py b/tests/unit/server/test_sse.py index fdaf9022b..d82743c80 100644 --- a/tests/unit/server/test_sse.py +++ b/tests/unit/server/test_sse.py @@ -9,9 +9,9 @@ import logging # allow-direct-logging from unittest.mock import AsyncMock, MagicMock import pytest -from llama_stack_api import PaginatedResponse from llama_stack.core.server.server import create_dynamic_typed_route, create_sse_event, sse_generator +from llama_stack_api import PaginatedResponse @pytest.fixture diff --git a/tests/unit/tools/test_tools_json_schema.py b/tests/unit/tools/test_tools_json_schema.py index 79e0b6e28..623955984 100644 --- a/tests/unit/tools/test_tools_json_schema.py +++ b/tests/unit/tools/test_tools_json_schema.py @@ -9,10 +9,10 @@ Unit tests for JSON Schema-based tool definitions. Tests the new input_schema and output_schema fields. """ -from llama_stack_api import ToolDef from pydantic import ValidationError from llama_stack.models.llama.datatypes import BuiltinTool, ToolDefinition +from llama_stack_api import ToolDef class TestToolDefValidation: diff --git a/tests/unit/utils/inference/test_inference_store.py b/tests/unit/utils/inference/test_inference_store.py index 4da20b125..bdcc529ce 100644 --- a/tests/unit/utils/inference/test_inference_store.py +++ b/tests/unit/utils/inference/test_inference_store.py @@ -7,6 +7,10 @@ import time import pytest + +from llama_stack.core.storage.datatypes import InferenceStoreReference, SqliteSqlStoreConfig +from llama_stack.providers.utils.inference.inference_store import InferenceStore +from llama_stack.providers.utils.sqlstore.sqlstore import register_sqlstore_backends from llama_stack_api import ( OpenAIAssistantMessageParam, OpenAIChatCompletion, @@ -15,10 +19,6 @@ from llama_stack_api import ( Order, ) -from llama_stack.core.storage.datatypes import InferenceStoreReference, SqliteSqlStoreConfig -from llama_stack.providers.utils.inference.inference_store import InferenceStore -from llama_stack.providers.utils.sqlstore.sqlstore import register_sqlstore_backends - @pytest.fixture(autouse=True) def setup_backends(tmp_path): diff --git a/tests/unit/utils/responses/test_responses_store.py b/tests/unit/utils/responses/test_responses_store.py index 1119a93d8..8c108d9c1 100644 --- a/tests/unit/utils/responses/test_responses_store.py +++ b/tests/unit/utils/responses/test_responses_store.py @@ -9,11 +9,11 @@ from tempfile import TemporaryDirectory from uuid import uuid4 import pytest -from llama_stack_api import OpenAIMessageParam, OpenAIResponseInput, OpenAIResponseObject, OpenAIUserMessageParam, Order from llama_stack.core.storage.datatypes import ResponsesStoreReference, SqliteSqlStoreConfig from llama_stack.providers.utils.responses.responses_store import ResponsesStore from llama_stack.providers.utils.sqlstore.sqlstore import register_sqlstore_backends +from llama_stack_api import OpenAIMessageParam, OpenAIResponseInput, OpenAIResponseObject, OpenAIUserMessageParam, Order def build_store(db_path: str, policy: list | None = None) -> ResponsesStore: diff --git a/uv.lock b/uv.lock index ddf8c1cd4..0b8b555f6 100644 --- a/uv.lock +++ b/uv.lock @@ -1,5 +1,5 @@ version = 1 -revision = 2 +revision = 3 requires-python = ">=3.12" resolution-markers = [ "(python_full_version >= '3.13' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.13' and sys_platform != 'darwin' and sys_platform != 'linux')", @@ -2095,7 +2095,7 @@ requires-dist = [ { name = "httpx" }, { name = "jinja2", specifier = ">=3.1.6" }, { name = "jsonschema" }, - { name = "llama-stack-api", editable = "src/llama-stack-api" }, + { name = "llama-stack-api", editable = "src/llama_stack_api" }, { name = "llama-stack-client", marker = "extra == 'client'", specifier = ">=0.3.0" }, { name = "openai", specifier = ">=2.5.0" }, { name = "opentelemetry-exporter-otlp-proto-http", specifier = ">=1.30.0" }, @@ -2230,8 +2230,8 @@ unit = [ [[package]] name = "llama-stack-api" -version = "0.1.0" -source = { editable = "src/llama-stack-api" } +version = "0.4.0.dev0" +source = { editable = "src/llama_stack_api" } dependencies = [ { name = "jsonschema" }, { name = "opentelemetry-exporter-otlp-proto-http" },