From 35e17e92fafdd3e2196d63e9187b471f3c5a4419 Mon Sep 17 00:00:00 2001 From: Charlie Doern Date: Tue, 29 Jul 2025 15:18:54 -0400 Subject: [PATCH 1/4] refactor: convert providers to be installed via package currently providers have a `pip_package` list. Rather than make our own form of python dependency management, we should use `pyproject.toml` files in each provider declaring the dependencies in a more trackable manner. Each provider can then be installed using the already in place `module` field in the ProviderSpec, pointing to the directory the provider lives in we can then simply `uv pip install` this directory as opposed to installing the dependencies one by one Signed-off-by: Charlie Doern --- llama_stack/providers/datatypes.py | 3 +- .../agents/meta_reference/pyproject.toml | 24 +++++++++++++ .../inline/datasetio/localfs/pyproject.toml | 20 +++++++++++ .../inline/eval/meta_reference/pyproject.toml | 24 +++++++++++++ .../inline/files/localfs/pyproject.toml | 18 ++++++++++ .../inference/meta_reference/pyproject.toml | 29 +++++++++++++++ .../sentence_transformers/pyproject.toml | 22 ++++++++++++ .../post_training/huggingface/pyproject.toml | 24 +++++++++++++ .../post_training/torchtune/pyproject.toml | 23 ++++++++++++ .../inline/safety/code_scanner/pyproject.toml | 20 +++++++++++ .../inline/safety/llama_guard/pyproject.toml | 18 ++++++++++ .../inline/safety/prompt_guard/pyproject.toml | 21 +++++++++++ .../inline/scoring/basic/pyproject.toml | 20 +++++++++++ .../inline/scoring/braintrust/pyproject.toml | 21 +++++++++++ .../scoring/llm_as_judge/pyproject.toml | 18 ++++++++++ .../telemetry/meta_reference/pyproject.toml | 21 +++++++++++ .../inline/tool_runtime/rag/pyproject.toml | 28 +++++++++++++++ .../inline/vector_io/chroma/pyproject.toml | 20 +++++++++++ .../inline/vector_io/faiss/pyproject.toml | 20 +++++++++++ .../inline/vector_io/milvus/pyproject.toml | 20 +++++++++++ .../inline/vector_io/qdrant/pyproject.toml | 20 +++++++++++ .../vector_io/sqlite_vec/pyproject.toml | 20 +++++++++++ llama_stack/providers/registry/datasetio.py | 1 - llama_stack/providers/registry/eval.py | 1 - llama_stack/providers/registry/inference.py | 36 ------------------- .../providers/registry/post_training.py | 3 -- llama_stack/providers/registry/safety.py | 4 --- llama_stack/providers/registry/scoring.py | 3 -- .../providers/registry/tool_runtime.py | 5 --- llama_stack/providers/registry/vector_io.py | 12 ------- .../datasetio/huggingface/pyproject.toml | 20 +++++++++++ .../remote/datasetio/nvidia/pyproject.toml | 20 +++++++++++ .../remote/eval/nvidia/pyproject.toml | 20 +++++++++++ .../remote/inference/anthropic/pyproject.toml | 21 +++++++++++ .../remote/inference/bedrock/pyproject.toml | 21 +++++++++++ .../remote/inference/cerebras/pyproject.toml | 21 +++++++++++ .../inference/databricks/pyproject.toml | 21 +++++++++++ .../remote/inference/fireworks/pyproject.toml | 21 +++++++++++ .../remote/inference/gemini/pyproject.toml | 21 +++++++++++ .../remote/inference/groq/pyproject.toml | 21 +++++++++++ .../llama_openai_compat/pyproject.toml | 21 +++++++++++ .../remote/inference/nvidia/pyproject.toml | 21 +++++++++++ .../remote/inference/ollama/pyproject.toml | 23 ++++++++++++ .../remote/inference/openai/pyproject.toml | 21 +++++++++++ .../inference/passthrough/pyproject.toml | 20 +++++++++++ .../remote/inference/runpod/pyproject.toml | 21 +++++++++++ .../remote/inference/sambanova/pyproject.toml | 21 +++++++++++ .../remote/inference/tgi/pyproject.toml | 22 ++++++++++++ .../remote/inference/together/pyproject.toml | 21 +++++++++++ .../remote/inference/vertexai/pyproject.toml | 19 ++++++++++ .../remote/inference/vllm/pyproject.toml | 21 +++++++++++ .../remote/inference/watsonx/pyproject.toml | 21 +++++++++++ .../post_training/nvidia/pyproject.toml | 21 +++++++++++ .../remote/safety/bedrock/pyproject.toml | 20 +++++++++++ .../remote/safety/nvidia/pyproject.toml | 20 +++++++++++ .../remote/safety/sambanova/pyproject.toml | 21 +++++++++++ .../tool_runtime/bing_search/pyproject.toml | 20 +++++++++++ .../tool_runtime/brave_search/pyproject.toml | 20 +++++++++++ .../model_context_protocol/pyproject.toml | 20 +++++++++++ .../tool_runtime/tavily_search/pyproject.toml | 20 +++++++++++ .../tool_runtime/wolfram_alpha/pyproject.toml | 20 +++++++++++ .../remote/vector_io/chroma/pyproject.toml | 20 +++++++++++ .../remote/vector_io/milvus/pyproject.toml | 20 +++++++++++ .../remote/vector_io/pgvector/pyproject.toml | 20 +++++++++++ .../remote/vector_io/qdrant/pyproject.toml | 20 +++++++++++ .../remote/vector_io/weaviate/pyproject.toml | 20 +++++++++++ 66 files changed, 1193 insertions(+), 67 deletions(-) create mode 100644 llama_stack/providers/inline/agents/meta_reference/pyproject.toml create mode 100644 llama_stack/providers/inline/datasetio/localfs/pyproject.toml create mode 100644 llama_stack/providers/inline/eval/meta_reference/pyproject.toml create mode 100644 llama_stack/providers/inline/files/localfs/pyproject.toml create mode 100644 llama_stack/providers/inline/inference/meta_reference/pyproject.toml create mode 100644 llama_stack/providers/inline/inference/sentence_transformers/pyproject.toml create mode 100644 llama_stack/providers/inline/post_training/huggingface/pyproject.toml create mode 100644 llama_stack/providers/inline/post_training/torchtune/pyproject.toml create mode 100644 llama_stack/providers/inline/safety/code_scanner/pyproject.toml create mode 100644 llama_stack/providers/inline/safety/llama_guard/pyproject.toml create mode 100644 llama_stack/providers/inline/safety/prompt_guard/pyproject.toml create mode 100644 llama_stack/providers/inline/scoring/basic/pyproject.toml create mode 100644 llama_stack/providers/inline/scoring/braintrust/pyproject.toml create mode 100644 llama_stack/providers/inline/scoring/llm_as_judge/pyproject.toml create mode 100644 llama_stack/providers/inline/telemetry/meta_reference/pyproject.toml create mode 100644 llama_stack/providers/inline/tool_runtime/rag/pyproject.toml create mode 100644 llama_stack/providers/inline/vector_io/chroma/pyproject.toml create mode 100644 llama_stack/providers/inline/vector_io/faiss/pyproject.toml create mode 100644 llama_stack/providers/inline/vector_io/milvus/pyproject.toml create mode 100644 llama_stack/providers/inline/vector_io/qdrant/pyproject.toml create mode 100644 llama_stack/providers/inline/vector_io/sqlite_vec/pyproject.toml create mode 100644 llama_stack/providers/remote/datasetio/huggingface/pyproject.toml create mode 100644 llama_stack/providers/remote/datasetio/nvidia/pyproject.toml create mode 100644 llama_stack/providers/remote/eval/nvidia/pyproject.toml create mode 100644 llama_stack/providers/remote/inference/anthropic/pyproject.toml create mode 100644 llama_stack/providers/remote/inference/bedrock/pyproject.toml create mode 100644 llama_stack/providers/remote/inference/cerebras/pyproject.toml create mode 100644 llama_stack/providers/remote/inference/databricks/pyproject.toml create mode 100644 llama_stack/providers/remote/inference/fireworks/pyproject.toml create mode 100644 llama_stack/providers/remote/inference/gemini/pyproject.toml create mode 100644 llama_stack/providers/remote/inference/groq/pyproject.toml create mode 100644 llama_stack/providers/remote/inference/llama_openai_compat/pyproject.toml create mode 100644 llama_stack/providers/remote/inference/nvidia/pyproject.toml create mode 100644 llama_stack/providers/remote/inference/ollama/pyproject.toml create mode 100644 llama_stack/providers/remote/inference/openai/pyproject.toml create mode 100644 llama_stack/providers/remote/inference/passthrough/pyproject.toml create mode 100644 llama_stack/providers/remote/inference/runpod/pyproject.toml create mode 100644 llama_stack/providers/remote/inference/sambanova/pyproject.toml create mode 100644 llama_stack/providers/remote/inference/tgi/pyproject.toml create mode 100644 llama_stack/providers/remote/inference/together/pyproject.toml create mode 100644 llama_stack/providers/remote/inference/vertexai/pyproject.toml create mode 100644 llama_stack/providers/remote/inference/vllm/pyproject.toml create mode 100644 llama_stack/providers/remote/inference/watsonx/pyproject.toml create mode 100644 llama_stack/providers/remote/post_training/nvidia/pyproject.toml create mode 100644 llama_stack/providers/remote/safety/bedrock/pyproject.toml create mode 100644 llama_stack/providers/remote/safety/nvidia/pyproject.toml create mode 100644 llama_stack/providers/remote/safety/sambanova/pyproject.toml create mode 100644 llama_stack/providers/remote/tool_runtime/bing_search/pyproject.toml create mode 100644 llama_stack/providers/remote/tool_runtime/brave_search/pyproject.toml create mode 100644 llama_stack/providers/remote/tool_runtime/model_context_protocol/pyproject.toml create mode 100644 llama_stack/providers/remote/tool_runtime/tavily_search/pyproject.toml create mode 100644 llama_stack/providers/remote/tool_runtime/wolfram_alpha/pyproject.toml create mode 100644 llama_stack/providers/remote/vector_io/chroma/pyproject.toml create mode 100644 llama_stack/providers/remote/vector_io/milvus/pyproject.toml create mode 100644 llama_stack/providers/remote/vector_io/pgvector/pyproject.toml create mode 100644 llama_stack/providers/remote/vector_io/qdrant/pyproject.toml create mode 100644 llama_stack/providers/remote/vector_io/weaviate/pyproject.toml diff --git a/llama_stack/providers/datatypes.py b/llama_stack/providers/datatypes.py index 5e15dd8e1..508ef66ac 100644 --- a/llama_stack/providers/datatypes.py +++ b/llama_stack/providers/datatypes.py @@ -132,7 +132,6 @@ class ProviderSpec(BaseModel): ) is_external: bool = Field(default=False, description="Notes whether this provider is an external provider.") - # used internally by the resolver; this is a hack for now deps__: list[str] = Field(default_factory=list) @@ -182,7 +181,7 @@ A description of the provider. This is used to display in the documentation. class InlineProviderSpec(ProviderSpec): pip_packages: list[str] = Field( default_factory=list, - description="The pip dependencies needed for this implementation", + description="The pip dependencies needed for this implementation (deprecated - use package_name instead)", ) container_image: str | None = Field( default=None, diff --git a/llama_stack/providers/inline/agents/meta_reference/pyproject.toml b/llama_stack/providers/inline/agents/meta_reference/pyproject.toml new file mode 100644 index 000000000..35cd5fcde --- /dev/null +++ b/llama_stack/providers/inline/agents/meta_reference/pyproject.toml @@ -0,0 +1,24 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-agents-meta-reference" +version = "0.1.0" +description = "Meta's reference implementation of an agent system that can use tools, access vector databases, and perform complex reasoning tasks" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "matplotlib", + "pillow", + "pandas", + "scikit-learn", + "mcp>=1.8.1", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/inline/datasetio/localfs/pyproject.toml b/llama_stack/providers/inline/datasetio/localfs/pyproject.toml new file mode 100644 index 000000000..2e0c33eac --- /dev/null +++ b/llama_stack/providers/inline/datasetio/localfs/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-datasetio-localfs" +version = "0.1.0" +description = "Local filesystem-based dataset I/O provider for reading and writing datasets to local storage" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "pandas", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/inline/eval/meta_reference/pyproject.toml b/llama_stack/providers/inline/eval/meta_reference/pyproject.toml new file mode 100644 index 000000000..d0a0a5e38 --- /dev/null +++ b/llama_stack/providers/inline/eval/meta_reference/pyproject.toml @@ -0,0 +1,24 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-eval-meta-reference" +version = "0.1.0" +description = "Meta's reference implementation of evaluation tasks with support for multiple languages and evaluation metrics" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "tree_sitter", + "pythainlp", + "langdetect", + "emoji", + "nltk", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/inline/files/localfs/pyproject.toml b/llama_stack/providers/inline/files/localfs/pyproject.toml new file mode 100644 index 000000000..814d2e71d --- /dev/null +++ b/llama_stack/providers/inline/files/localfs/pyproject.toml @@ -0,0 +1,18 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-files-localfs" +version = "0.1.0" +description = "Local filesystem-based file storage provider for managing files and documents locally" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/inline/inference/meta_reference/pyproject.toml b/llama_stack/providers/inline/inference/meta_reference/pyproject.toml new file mode 100644 index 000000000..9618e8f0b --- /dev/null +++ b/llama_stack/providers/inline/inference/meta_reference/pyproject.toml @@ -0,0 +1,29 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-inference-meta-reference" +version = "0.1.0" +description = "Meta's reference implementation of inference with support for various model formats and optimization techniques" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "accelerate", + "fairscale", + "torch", + "torchvision", + "transformers", + "zmq", + "lm-format-enforcer", + "sentence-transformers", + "torchao==0.8.0", + "fbgemm-gpu-genai==1.1.2", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/inline/inference/sentence_transformers/pyproject.toml b/llama_stack/providers/inline/inference/sentence_transformers/pyproject.toml new file mode 100644 index 000000000..2221cb6cb --- /dev/null +++ b/llama_stack/providers/inline/inference/sentence_transformers/pyproject.toml @@ -0,0 +1,22 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-inference-sentence-transformers" +version = "0.1.0" +description = "Sentence Transformers inference provider for text embeddings and similarity search" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "torch", + "torchvision", + "sentence-transformers", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/inline/post_training/huggingface/pyproject.toml b/llama_stack/providers/inline/post_training/huggingface/pyproject.toml new file mode 100644 index 000000000..5188b9189 --- /dev/null +++ b/llama_stack/providers/inline/post_training/huggingface/pyproject.toml @@ -0,0 +1,24 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-post-training-huggingface" +version = "0.1.0" +description = "HuggingFace-based post-training provider for fine-tuning models using the HuggingFace ecosystem" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "torch", + "trl", + "transformers", + "peft", + "datasets", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/inline/post_training/torchtune/pyproject.toml b/llama_stack/providers/inline/post_training/torchtune/pyproject.toml new file mode 100644 index 000000000..a3a7a887c --- /dev/null +++ b/llama_stack/providers/inline/post_training/torchtune/pyproject.toml @@ -0,0 +1,23 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-post-training-torchtune" +version = "0.1.0" +description = "TorchTune-based post-training provider for fine-tuning and optimizing models using Meta's TorchTune framework" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "torch", + "torchtune==0.5.0", + "torchao==0.8.0", + "numpy", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/inline/safety/code_scanner/pyproject.toml b/llama_stack/providers/inline/safety/code_scanner/pyproject.toml new file mode 100644 index 000000000..0cd40a88b --- /dev/null +++ b/llama_stack/providers/inline/safety/code_scanner/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-safety-code-scanner" +version = "0.1.0" +description = "Code Scanner safety provider for detecting security vulnerabilities and unsafe code patterns" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "codeshield", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/inline/safety/llama_guard/pyproject.toml b/llama_stack/providers/inline/safety/llama_guard/pyproject.toml new file mode 100644 index 000000000..71df3238c --- /dev/null +++ b/llama_stack/providers/inline/safety/llama_guard/pyproject.toml @@ -0,0 +1,18 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-safety-llama-guard" +version = "0.1.0" +description = "Llama Guard safety provider for content moderation and safety filtering using Meta's Llama Guard model" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/inline/safety/prompt_guard/pyproject.toml b/llama_stack/providers/inline/safety/prompt_guard/pyproject.toml new file mode 100644 index 000000000..cee17b302 --- /dev/null +++ b/llama_stack/providers/inline/safety/prompt_guard/pyproject.toml @@ -0,0 +1,21 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-safety-prompt-guard" +version = "0.1.0" +description = "Prompt Guard safety provider for detecting and filtering unsafe prompts and content" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "transformers[accelerate]", + "torch", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/inline/scoring/basic/pyproject.toml b/llama_stack/providers/inline/scoring/basic/pyproject.toml new file mode 100644 index 000000000..1fc86c36c --- /dev/null +++ b/llama_stack/providers/inline/scoring/basic/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-scoring-basic" +version = "0.1.0" +description = "Basic scoring provider for simple evaluation metrics and scoring functions" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "requests", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/inline/scoring/braintrust/pyproject.toml b/llama_stack/providers/inline/scoring/braintrust/pyproject.toml new file mode 100644 index 000000000..c5d0780c8 --- /dev/null +++ b/llama_stack/providers/inline/scoring/braintrust/pyproject.toml @@ -0,0 +1,21 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-scoring-braintrust" +version = "0.1.0" +description = "Braintrust scoring provider for evaluation and scoring using the Braintrust platform" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "autoevals", + "openai", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/inline/scoring/llm_as_judge/pyproject.toml b/llama_stack/providers/inline/scoring/llm_as_judge/pyproject.toml new file mode 100644 index 000000000..46cdc96ea --- /dev/null +++ b/llama_stack/providers/inline/scoring/llm_as_judge/pyproject.toml @@ -0,0 +1,18 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-scoring-llm-as-judge" +version = "0.1.0" +description = "LLM-as-judge scoring provider that uses language models to evaluate and score responses" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/inline/telemetry/meta_reference/pyproject.toml b/llama_stack/providers/inline/telemetry/meta_reference/pyproject.toml new file mode 100644 index 000000000..23d41f7ba --- /dev/null +++ b/llama_stack/providers/inline/telemetry/meta_reference/pyproject.toml @@ -0,0 +1,21 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-telemetry-meta-reference" +version = "0.1.0" +description = "Meta's reference implementation of telemetry and observability using OpenTelemetry" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "opentelemetry-sdk", + "opentelemetry-exporter-otlp-proto-http", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/inline/tool_runtime/rag/pyproject.toml b/llama_stack/providers/inline/tool_runtime/rag/pyproject.toml new file mode 100644 index 000000000..53510b2d4 --- /dev/null +++ b/llama_stack/providers/inline/tool_runtime/rag/pyproject.toml @@ -0,0 +1,28 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-tool-runtime-rag" +version = "0.1.0" +description = "RAG (Retrieval-Augmented Generation) tool runtime for document ingestion, chunking, and semantic search" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "chardet", + "pypdf", + "tqdm", + "numpy", + "scikit-learn", + "scipy", + "nltk", + "sentencepiece", + "transformers", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/inline/vector_io/chroma/pyproject.toml b/llama_stack/providers/inline/vector_io/chroma/pyproject.toml new file mode 100644 index 000000000..dd33ed593 --- /dev/null +++ b/llama_stack/providers/inline/vector_io/chroma/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-vector-io-chroma" +version = "0.1.0" +description = "Chroma inline vector database provider for Llama Stack" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "chromadb", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/inline/vector_io/faiss/pyproject.toml b/llama_stack/providers/inline/vector_io/faiss/pyproject.toml new file mode 100644 index 000000000..712d70ba2 --- /dev/null +++ b/llama_stack/providers/inline/vector_io/faiss/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-vector-io-faiss" +version = "0.1.0" +description = "Faiss inline vector database provider for Llama Stack" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "faiss-cpu", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/inline/vector_io/milvus/pyproject.toml b/llama_stack/providers/inline/vector_io/milvus/pyproject.toml new file mode 100644 index 000000000..3d0ea94c9 --- /dev/null +++ b/llama_stack/providers/inline/vector_io/milvus/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-vector-io-milvus" +version = "0.1.0" +description = "Milvus inline vector database provider for Llama Stack" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "pymilvus>=2.4.10", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/inline/vector_io/qdrant/pyproject.toml b/llama_stack/providers/inline/vector_io/qdrant/pyproject.toml new file mode 100644 index 000000000..c2b7ee82d --- /dev/null +++ b/llama_stack/providers/inline/vector_io/qdrant/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-vector-io-qdrant" +version = "0.1.0" +description = "Qdrant inline vector database provider for Llama Stack" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "qdrant-client", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/inline/vector_io/sqlite_vec/pyproject.toml b/llama_stack/providers/inline/vector_io/sqlite_vec/pyproject.toml new file mode 100644 index 000000000..37cb0bf67 --- /dev/null +++ b/llama_stack/providers/inline/vector_io/sqlite_vec/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-vector-io-sqlite-vec" +version = "0.1.0" +description = "SQLite-Vec inline vector database provider for Llama Stack" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "sqlite-vec", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/registry/datasetio.py b/llama_stack/providers/registry/datasetio.py index 43cde83fb..797185588 100644 --- a/llama_stack/providers/registry/datasetio.py +++ b/llama_stack/providers/registry/datasetio.py @@ -19,7 +19,6 @@ def available_providers() -> list[ProviderSpec]: InlineProviderSpec( api=Api.datasetio, provider_type="inline::localfs", - pip_packages=["pandas"], module="llama_stack.providers.inline.datasetio.localfs", config_class="llama_stack.providers.inline.datasetio.localfs.LocalFSDatasetIOConfig", api_dependencies=[], diff --git a/llama_stack/providers/registry/eval.py b/llama_stack/providers/registry/eval.py index 9f0d17916..a9f008098 100644 --- a/llama_stack/providers/registry/eval.py +++ b/llama_stack/providers/registry/eval.py @@ -13,7 +13,6 @@ def available_providers() -> list[ProviderSpec]: InlineProviderSpec( api=Api.eval, provider_type="inline::meta-reference", - pip_packages=["tree_sitter", "pythainlp", "langdetect", "emoji", "nltk"], module="llama_stack.providers.inline.eval.meta_reference", config_class="llama_stack.providers.inline.eval.meta_reference.MetaReferenceEvalConfig", api_dependencies=[ diff --git a/llama_stack/providers/registry/inference.py b/llama_stack/providers/registry/inference.py index 1801cdcad..3d6f01da0 100644 --- a/llama_stack/providers/registry/inference.py +++ b/llama_stack/providers/registry/inference.py @@ -32,7 +32,6 @@ def available_providers() -> list[ProviderSpec]: InlineProviderSpec( api=Api.inference, provider_type="inline::meta-reference", - pip_packages=META_REFERENCE_DEPS, module="llama_stack.providers.inline.inference.meta_reference", config_class="llama_stack.providers.inline.inference.meta_reference.MetaReferenceInferenceConfig", description="Meta's reference implementation of inference with support for various model formats and optimization techniques.", @@ -40,10 +39,6 @@ def available_providers() -> list[ProviderSpec]: InlineProviderSpec( api=Api.inference, provider_type="inline::sentence-transformers", - pip_packages=[ - "torch torchvision --index-url https://download.pytorch.org/whl/cpu", - "sentence-transformers --no-deps", - ], module="llama_stack.providers.inline.inference.sentence_transformers", config_class="llama_stack.providers.inline.inference.sentence_transformers.config.SentenceTransformersInferenceConfig", description="Sentence Transformers inference provider for text embeddings and similarity search.", @@ -52,9 +47,6 @@ def available_providers() -> list[ProviderSpec]: api=Api.inference, adapter=AdapterSpec( adapter_type="cerebras", - pip_packages=[ - "cerebras_cloud_sdk", - ], module="llama_stack.providers.remote.inference.cerebras", config_class="llama_stack.providers.remote.inference.cerebras.CerebrasImplConfig", description="Cerebras inference provider for running models on Cerebras Cloud platform.", @@ -64,7 +56,6 @@ def available_providers() -> list[ProviderSpec]: api=Api.inference, adapter=AdapterSpec( adapter_type="ollama", - pip_packages=["ollama", "aiohttp", "h11>=0.16.0"], config_class="llama_stack.providers.remote.inference.ollama.OllamaImplConfig", module="llama_stack.providers.remote.inference.ollama", description="Ollama inference provider for running local models through the Ollama runtime.", @@ -74,7 +65,6 @@ def available_providers() -> list[ProviderSpec]: api=Api.inference, adapter=AdapterSpec( adapter_type="vllm", - pip_packages=["openai"], module="llama_stack.providers.remote.inference.vllm", config_class="llama_stack.providers.remote.inference.vllm.VLLMInferenceAdapterConfig", description="Remote vLLM inference provider for connecting to vLLM servers.", @@ -84,7 +74,6 @@ def available_providers() -> list[ProviderSpec]: api=Api.inference, adapter=AdapterSpec( adapter_type="tgi", - pip_packages=["huggingface_hub", "aiohttp"], module="llama_stack.providers.remote.inference.tgi", config_class="llama_stack.providers.remote.inference.tgi.TGIImplConfig", description="Text Generation Inference (TGI) provider for HuggingFace model serving.", @@ -94,7 +83,6 @@ def available_providers() -> list[ProviderSpec]: api=Api.inference, adapter=AdapterSpec( adapter_type="hf::serverless", - pip_packages=["huggingface_hub", "aiohttp"], module="llama_stack.providers.remote.inference.tgi", config_class="llama_stack.providers.remote.inference.tgi.InferenceAPIImplConfig", description="HuggingFace Inference API serverless provider for on-demand model inference.", @@ -104,7 +92,6 @@ def available_providers() -> list[ProviderSpec]: api=Api.inference, adapter=AdapterSpec( adapter_type="hf::endpoint", - pip_packages=["huggingface_hub", "aiohttp"], module="llama_stack.providers.remote.inference.tgi", config_class="llama_stack.providers.remote.inference.tgi.InferenceEndpointImplConfig", description="HuggingFace Inference Endpoints provider for dedicated model serving.", @@ -114,9 +101,6 @@ def available_providers() -> list[ProviderSpec]: api=Api.inference, adapter=AdapterSpec( adapter_type="fireworks", - pip_packages=[ - "fireworks-ai", - ], module="llama_stack.providers.remote.inference.fireworks", config_class="llama_stack.providers.remote.inference.fireworks.FireworksImplConfig", provider_data_validator="llama_stack.providers.remote.inference.fireworks.FireworksProviderDataValidator", @@ -127,9 +111,6 @@ def available_providers() -> list[ProviderSpec]: api=Api.inference, adapter=AdapterSpec( adapter_type="together", - pip_packages=[ - "together", - ], module="llama_stack.providers.remote.inference.together", config_class="llama_stack.providers.remote.inference.together.TogetherImplConfig", provider_data_validator="llama_stack.providers.remote.inference.together.TogetherProviderDataValidator", @@ -140,7 +121,6 @@ def available_providers() -> list[ProviderSpec]: api=Api.inference, adapter=AdapterSpec( adapter_type="bedrock", - pip_packages=["boto3"], module="llama_stack.providers.remote.inference.bedrock", config_class="llama_stack.providers.remote.inference.bedrock.BedrockConfig", description="AWS Bedrock inference provider for accessing various AI models through AWS's managed service.", @@ -150,9 +130,6 @@ def available_providers() -> list[ProviderSpec]: api=Api.inference, adapter=AdapterSpec( adapter_type="databricks", - pip_packages=[ - "openai", - ], module="llama_stack.providers.remote.inference.databricks", config_class="llama_stack.providers.remote.inference.databricks.DatabricksImplConfig", description="Databricks inference provider for running models on Databricks' unified analytics platform.", @@ -162,9 +139,6 @@ def available_providers() -> list[ProviderSpec]: api=Api.inference, adapter=AdapterSpec( adapter_type="nvidia", - pip_packages=[ - "openai", - ], module="llama_stack.providers.remote.inference.nvidia", config_class="llama_stack.providers.remote.inference.nvidia.NVIDIAConfig", description="NVIDIA inference provider for accessing NVIDIA NIM models and AI services.", @@ -174,7 +148,6 @@ def available_providers() -> list[ProviderSpec]: api=Api.inference, adapter=AdapterSpec( adapter_type="runpod", - pip_packages=["openai"], module="llama_stack.providers.remote.inference.runpod", config_class="llama_stack.providers.remote.inference.runpod.RunpodImplConfig", description="RunPod inference provider for running models on RunPod's cloud GPU platform.", @@ -184,7 +157,6 @@ def available_providers() -> list[ProviderSpec]: api=Api.inference, adapter=AdapterSpec( adapter_type="openai", - pip_packages=["litellm"], module="llama_stack.providers.remote.inference.openai", config_class="llama_stack.providers.remote.inference.openai.OpenAIConfig", provider_data_validator="llama_stack.providers.remote.inference.openai.config.OpenAIProviderDataValidator", @@ -195,7 +167,6 @@ def available_providers() -> list[ProviderSpec]: api=Api.inference, adapter=AdapterSpec( adapter_type="anthropic", - pip_packages=["litellm"], module="llama_stack.providers.remote.inference.anthropic", config_class="llama_stack.providers.remote.inference.anthropic.AnthropicConfig", provider_data_validator="llama_stack.providers.remote.inference.anthropic.config.AnthropicProviderDataValidator", @@ -206,7 +177,6 @@ def available_providers() -> list[ProviderSpec]: api=Api.inference, adapter=AdapterSpec( adapter_type="gemini", - pip_packages=["litellm"], module="llama_stack.providers.remote.inference.gemini", config_class="llama_stack.providers.remote.inference.gemini.GeminiConfig", provider_data_validator="llama_stack.providers.remote.inference.gemini.config.GeminiProviderDataValidator", @@ -217,7 +187,6 @@ def available_providers() -> list[ProviderSpec]: api=Api.inference, adapter=AdapterSpec( adapter_type="vertexai", - pip_packages=["litellm", "google-cloud-aiplatform"], module="llama_stack.providers.remote.inference.vertexai", config_class="llama_stack.providers.remote.inference.vertexai.VertexAIConfig", provider_data_validator="llama_stack.providers.remote.inference.vertexai.config.VertexAIProviderDataValidator", @@ -247,7 +216,6 @@ Available Models: api=Api.inference, adapter=AdapterSpec( adapter_type="groq", - pip_packages=["litellm"], module="llama_stack.providers.remote.inference.groq", config_class="llama_stack.providers.remote.inference.groq.GroqConfig", provider_data_validator="llama_stack.providers.remote.inference.groq.config.GroqProviderDataValidator", @@ -258,7 +226,6 @@ Available Models: api=Api.inference, adapter=AdapterSpec( adapter_type="llama-openai-compat", - pip_packages=["litellm"], module="llama_stack.providers.remote.inference.llama_openai_compat", config_class="llama_stack.providers.remote.inference.llama_openai_compat.config.LlamaCompatConfig", provider_data_validator="llama_stack.providers.remote.inference.llama_openai_compat.config.LlamaProviderDataValidator", @@ -269,7 +236,6 @@ Available Models: api=Api.inference, adapter=AdapterSpec( adapter_type="sambanova", - pip_packages=["litellm"], module="llama_stack.providers.remote.inference.sambanova", config_class="llama_stack.providers.remote.inference.sambanova.SambaNovaImplConfig", provider_data_validator="llama_stack.providers.remote.inference.sambanova.config.SambaNovaProviderDataValidator", @@ -280,7 +246,6 @@ Available Models: api=Api.inference, adapter=AdapterSpec( adapter_type="passthrough", - pip_packages=[], module="llama_stack.providers.remote.inference.passthrough", config_class="llama_stack.providers.remote.inference.passthrough.PassthroughImplConfig", provider_data_validator="llama_stack.providers.remote.inference.passthrough.PassthroughProviderDataValidator", @@ -291,7 +256,6 @@ Available Models: api=Api.inference, adapter=AdapterSpec( adapter_type="watsonx", - pip_packages=["ibm_watson_machine_learning"], module="llama_stack.providers.remote.inference.watsonx", config_class="llama_stack.providers.remote.inference.watsonx.WatsonXConfig", provider_data_validator="llama_stack.providers.remote.inference.watsonx.WatsonXProviderDataValidator", diff --git a/llama_stack/providers/registry/post_training.py b/llama_stack/providers/registry/post_training.py index ffd64ef7c..3df45ded0 100644 --- a/llama_stack/providers/registry/post_training.py +++ b/llama_stack/providers/registry/post_training.py @@ -13,7 +13,6 @@ def available_providers() -> list[ProviderSpec]: InlineProviderSpec( api=Api.post_training, provider_type="inline::torchtune", - pip_packages=["torch", "torchtune==0.5.0", "torchao==0.8.0", "numpy"], module="llama_stack.providers.inline.post_training.torchtune", config_class="llama_stack.providers.inline.post_training.torchtune.TorchtunePostTrainingConfig", api_dependencies=[ @@ -25,7 +24,6 @@ def available_providers() -> list[ProviderSpec]: InlineProviderSpec( api=Api.post_training, provider_type="inline::huggingface", - pip_packages=["torch", "trl", "transformers", "peft", "datasets"], module="llama_stack.providers.inline.post_training.huggingface", config_class="llama_stack.providers.inline.post_training.huggingface.HuggingFacePostTrainingConfig", api_dependencies=[ @@ -38,7 +36,6 @@ def available_providers() -> list[ProviderSpec]: api=Api.post_training, adapter=AdapterSpec( adapter_type="nvidia", - pip_packages=["requests", "aiohttp"], module="llama_stack.providers.remote.post_training.nvidia", config_class="llama_stack.providers.remote.post_training.nvidia.NvidiaPostTrainingConfig", description="NVIDIA's post-training provider for fine-tuning models on NVIDIA's platform.", diff --git a/llama_stack/providers/registry/safety.py b/llama_stack/providers/registry/safety.py index 9dd791bd8..f39f277a7 100644 --- a/llama_stack/providers/registry/safety.py +++ b/llama_stack/providers/registry/safety.py @@ -30,7 +30,6 @@ def available_providers() -> list[ProviderSpec]: InlineProviderSpec( api=Api.safety, provider_type="inline::llama-guard", - pip_packages=[], module="llama_stack.providers.inline.safety.llama_guard", config_class="llama_stack.providers.inline.safety.llama_guard.LlamaGuardConfig", api_dependencies=[ @@ -52,7 +51,6 @@ def available_providers() -> list[ProviderSpec]: api=Api.safety, adapter=AdapterSpec( adapter_type="bedrock", - pip_packages=["boto3"], module="llama_stack.providers.remote.safety.bedrock", config_class="llama_stack.providers.remote.safety.bedrock.BedrockSafetyConfig", description="AWS Bedrock safety provider for content moderation using AWS's safety services.", @@ -62,7 +60,6 @@ def available_providers() -> list[ProviderSpec]: api=Api.safety, adapter=AdapterSpec( adapter_type="nvidia", - pip_packages=["requests"], module="llama_stack.providers.remote.safety.nvidia", config_class="llama_stack.providers.remote.safety.nvidia.NVIDIASafetyConfig", description="NVIDIA's safety provider for content moderation and safety filtering.", @@ -72,7 +69,6 @@ def available_providers() -> list[ProviderSpec]: api=Api.safety, adapter=AdapterSpec( adapter_type="sambanova", - pip_packages=["litellm", "requests"], module="llama_stack.providers.remote.safety.sambanova", config_class="llama_stack.providers.remote.safety.sambanova.SambaNovaSafetyConfig", provider_data_validator="llama_stack.providers.remote.safety.sambanova.config.SambaNovaProviderDataValidator", diff --git a/llama_stack/providers/registry/scoring.py b/llama_stack/providers/registry/scoring.py index 79293d888..1c8367a0d 100644 --- a/llama_stack/providers/registry/scoring.py +++ b/llama_stack/providers/registry/scoring.py @@ -13,7 +13,6 @@ def available_providers() -> list[ProviderSpec]: InlineProviderSpec( api=Api.scoring, provider_type="inline::basic", - pip_packages=["requests"], module="llama_stack.providers.inline.scoring.basic", config_class="llama_stack.providers.inline.scoring.basic.BasicScoringConfig", api_dependencies=[ @@ -25,7 +24,6 @@ def available_providers() -> list[ProviderSpec]: InlineProviderSpec( api=Api.scoring, provider_type="inline::llm-as-judge", - pip_packages=[], module="llama_stack.providers.inline.scoring.llm_as_judge", config_class="llama_stack.providers.inline.scoring.llm_as_judge.LlmAsJudgeScoringConfig", api_dependencies=[ @@ -38,7 +36,6 @@ def available_providers() -> list[ProviderSpec]: InlineProviderSpec( api=Api.scoring, provider_type="inline::braintrust", - pip_packages=["autoevals", "openai"], module="llama_stack.providers.inline.scoring.braintrust", config_class="llama_stack.providers.inline.scoring.braintrust.BraintrustScoringConfig", api_dependencies=[ diff --git a/llama_stack/providers/registry/tool_runtime.py b/llama_stack/providers/registry/tool_runtime.py index 661851443..cc57bb7b6 100644 --- a/llama_stack/providers/registry/tool_runtime.py +++ b/llama_stack/providers/registry/tool_runtime.py @@ -41,7 +41,6 @@ def available_providers() -> list[ProviderSpec]: adapter_type="brave-search", module="llama_stack.providers.remote.tool_runtime.brave_search", config_class="llama_stack.providers.remote.tool_runtime.brave_search.config.BraveSearchToolConfig", - pip_packages=["requests"], provider_data_validator="llama_stack.providers.remote.tool_runtime.brave_search.BraveSearchToolProviderDataValidator", description="Brave Search tool for web search capabilities with privacy-focused results.", ), @@ -52,7 +51,6 @@ def available_providers() -> list[ProviderSpec]: adapter_type="bing-search", module="llama_stack.providers.remote.tool_runtime.bing_search", config_class="llama_stack.providers.remote.tool_runtime.bing_search.config.BingSearchToolConfig", - pip_packages=["requests"], provider_data_validator="llama_stack.providers.remote.tool_runtime.bing_search.BingSearchToolProviderDataValidator", description="Bing Search tool for web search capabilities using Microsoft's search engine.", ), @@ -63,7 +61,6 @@ def available_providers() -> list[ProviderSpec]: adapter_type="tavily-search", module="llama_stack.providers.remote.tool_runtime.tavily_search", config_class="llama_stack.providers.remote.tool_runtime.tavily_search.config.TavilySearchToolConfig", - pip_packages=["requests"], provider_data_validator="llama_stack.providers.remote.tool_runtime.tavily_search.TavilySearchToolProviderDataValidator", description="Tavily Search tool for AI-optimized web search with structured results.", ), @@ -74,7 +71,6 @@ def available_providers() -> list[ProviderSpec]: adapter_type="wolfram-alpha", module="llama_stack.providers.remote.tool_runtime.wolfram_alpha", config_class="llama_stack.providers.remote.tool_runtime.wolfram_alpha.config.WolframAlphaToolConfig", - pip_packages=["requests"], provider_data_validator="llama_stack.providers.remote.tool_runtime.wolfram_alpha.WolframAlphaToolProviderDataValidator", description="Wolfram Alpha tool for computational knowledge and mathematical calculations.", ), @@ -85,7 +81,6 @@ def available_providers() -> list[ProviderSpec]: adapter_type="model-context-protocol", module="llama_stack.providers.remote.tool_runtime.model_context_protocol", config_class="llama_stack.providers.remote.tool_runtime.model_context_protocol.config.MCPProviderConfig", - pip_packages=["mcp>=1.8.1"], provider_data_validator="llama_stack.providers.remote.tool_runtime.model_context_protocol.config.MCPProviderDataValidator", description="Model Context Protocol (MCP) tool for standardized tool calling and context management.", ), diff --git a/llama_stack/providers/registry/vector_io.py b/llama_stack/providers/registry/vector_io.py index 70148eb15..99db30892 100644 --- a/llama_stack/providers/registry/vector_io.py +++ b/llama_stack/providers/registry/vector_io.py @@ -19,7 +19,6 @@ def available_providers() -> list[ProviderSpec]: InlineProviderSpec( api=Api.vector_io, provider_type="inline::meta-reference", - pip_packages=["faiss-cpu"], module="llama_stack.providers.inline.vector_io.faiss", config_class="llama_stack.providers.inline.vector_io.faiss.FaissVectorIOConfig", deprecation_warning="Please use the `inline::faiss` provider instead.", @@ -30,7 +29,6 @@ def available_providers() -> list[ProviderSpec]: InlineProviderSpec( api=Api.vector_io, provider_type="inline::faiss", - pip_packages=["faiss-cpu"], module="llama_stack.providers.inline.vector_io.faiss", config_class="llama_stack.providers.inline.vector_io.faiss.FaissVectorIOConfig", api_dependencies=[Api.inference], @@ -83,7 +81,6 @@ more details about Faiss in general. InlineProviderSpec( api=Api.vector_io, provider_type="inline::sqlite-vec", - pip_packages=["sqlite-vec"], module="llama_stack.providers.inline.vector_io.sqlite_vec", config_class="llama_stack.providers.inline.vector_io.sqlite_vec.SQLiteVectorIOConfig", api_dependencies=[Api.inference], @@ -290,7 +287,6 @@ See [sqlite-vec's GitHub repo](https://github.com/asg017/sqlite-vec/tree/main) f InlineProviderSpec( api=Api.vector_io, provider_type="inline::sqlite_vec", - pip_packages=["sqlite-vec"], module="llama_stack.providers.inline.vector_io.sqlite_vec", config_class="llama_stack.providers.inline.vector_io.sqlite_vec.SQLiteVectorIOConfig", deprecation_warning="Please use the `inline::sqlite-vec` provider (notice the hyphen instead of underscore) instead.", @@ -304,7 +300,6 @@ Please refer to the sqlite-vec provider documentation. Api.vector_io, AdapterSpec( adapter_type="chromadb", - pip_packages=["chromadb-client"], module="llama_stack.providers.remote.vector_io.chroma", config_class="llama_stack.providers.remote.vector_io.chroma.ChromaVectorIOConfig", description=""" @@ -347,7 +342,6 @@ See [Chroma's documentation](https://docs.trychroma.com/docs/overview/introducti InlineProviderSpec( api=Api.vector_io, provider_type="inline::chromadb", - pip_packages=["chromadb"], module="llama_stack.providers.inline.vector_io.chroma", config_class="llama_stack.providers.inline.vector_io.chroma.ChromaVectorIOConfig", api_dependencies=[Api.inference], @@ -391,7 +385,6 @@ See [Chroma's documentation](https://docs.trychroma.com/docs/overview/introducti Api.vector_io, AdapterSpec( adapter_type="pgvector", - pip_packages=["psycopg2-binary"], module="llama_stack.providers.remote.vector_io.pgvector", config_class="llama_stack.providers.remote.vector_io.pgvector.PGVectorVectorIOConfig", description=""" @@ -430,7 +423,6 @@ See [PGVector's documentation](https://github.com/pgvector/pgvector) for more de Api.vector_io, AdapterSpec( adapter_type="weaviate", - pip_packages=["weaviate-client"], module="llama_stack.providers.remote.vector_io.weaviate", config_class="llama_stack.providers.remote.vector_io.weaviate.WeaviateVectorIOConfig", provider_data_validator="llama_stack.providers.remote.vector_io.weaviate.WeaviateRequestProviderData", @@ -471,7 +463,6 @@ See [Weaviate's documentation](https://weaviate.io/developers/weaviate) for more InlineProviderSpec( api=Api.vector_io, provider_type="inline::qdrant", - pip_packages=["qdrant-client"], module="llama_stack.providers.inline.vector_io.qdrant", config_class="llama_stack.providers.inline.vector_io.qdrant.QdrantVectorIOConfig", api_dependencies=[Api.inference], @@ -524,7 +515,6 @@ See the [Qdrant documentation](https://qdrant.tech/documentation/) for more deta Api.vector_io, AdapterSpec( adapter_type="qdrant", - pip_packages=["qdrant-client"], module="llama_stack.providers.remote.vector_io.qdrant", config_class="llama_stack.providers.remote.vector_io.qdrant.QdrantVectorIOConfig", description=""" @@ -538,7 +528,6 @@ Please refer to the inline provider documentation. Api.vector_io, AdapterSpec( adapter_type="milvus", - pip_packages=["pymilvus>=2.4.10"], module="llama_stack.providers.remote.vector_io.milvus", config_class="llama_stack.providers.remote.vector_io.milvus.MilvusVectorIOConfig", description=""" @@ -739,7 +728,6 @@ For more details on TLS configuration, refer to the [TLS setup guide](https://mi InlineProviderSpec( api=Api.vector_io, provider_type="inline::milvus", - pip_packages=["pymilvus>=2.4.10"], module="llama_stack.providers.inline.vector_io.milvus", config_class="llama_stack.providers.inline.vector_io.milvus.MilvusVectorIOConfig", api_dependencies=[Api.inference], diff --git a/llama_stack/providers/remote/datasetio/huggingface/pyproject.toml b/llama_stack/providers/remote/datasetio/huggingface/pyproject.toml new file mode 100644 index 000000000..b525f3b97 --- /dev/null +++ b/llama_stack/providers/remote/datasetio/huggingface/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-datasetio-huggingface" +version = "0.1.0" +description = "HuggingFace datasets provider for accessing and managing datasets from the HuggingFace Hub" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "datasets", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/datasetio/nvidia/pyproject.toml b/llama_stack/providers/remote/datasetio/nvidia/pyproject.toml new file mode 100644 index 000000000..6dc98aec4 --- /dev/null +++ b/llama_stack/providers/remote/datasetio/nvidia/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-datasetio-nvidia" +version = "0.1.0" +description = "NVIDIA's dataset I/O provider for accessing datasets from NVIDIA's data platform" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "datasets", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/eval/nvidia/pyproject.toml b/llama_stack/providers/remote/eval/nvidia/pyproject.toml new file mode 100644 index 000000000..969c5930e --- /dev/null +++ b/llama_stack/providers/remote/eval/nvidia/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-eval-nvidia" +version = "0.1.0" +description = "NVIDIA's evaluation provider for running evaluation tasks on NVIDIA's platform" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "requests", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/inference/anthropic/pyproject.toml b/llama_stack/providers/remote/inference/anthropic/pyproject.toml new file mode 100644 index 000000000..dbceed308 --- /dev/null +++ b/llama_stack/providers/remote/inference/anthropic/pyproject.toml @@ -0,0 +1,21 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-inference-anthropic" +version = "0.1.0" +description = "Anthropic inference provider for accessing Claude models and Anthropic's AI services" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "litellm", + +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/inference/bedrock/pyproject.toml b/llama_stack/providers/remote/inference/bedrock/pyproject.toml new file mode 100644 index 000000000..341c4ffc7 --- /dev/null +++ b/llama_stack/providers/remote/inference/bedrock/pyproject.toml @@ -0,0 +1,21 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-inference-bedrock" +version = "0.1.0" +description = "AWS Bedrock inference provider for accessing various AI models through AWS's managed service" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "boto3", + +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/inference/cerebras/pyproject.toml b/llama_stack/providers/remote/inference/cerebras/pyproject.toml new file mode 100644 index 000000000..3ad34dbe0 --- /dev/null +++ b/llama_stack/providers/remote/inference/cerebras/pyproject.toml @@ -0,0 +1,21 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-inference-cerebras" +version = "0.1.0" +description = "Cerebras inference provider for running models on Cerebras Cloud platform" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "cerebras_cloud_sdk", + +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/inference/databricks/pyproject.toml b/llama_stack/providers/remote/inference/databricks/pyproject.toml new file mode 100644 index 000000000..1bb551d65 --- /dev/null +++ b/llama_stack/providers/remote/inference/databricks/pyproject.toml @@ -0,0 +1,21 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-inference-databricks" +version = "0.1.0" +description = "Databricks inference provider for running models on Databricks' unified analytics platform" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "openai", + +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/inference/fireworks/pyproject.toml b/llama_stack/providers/remote/inference/fireworks/pyproject.toml new file mode 100644 index 000000000..6a75e17cc --- /dev/null +++ b/llama_stack/providers/remote/inference/fireworks/pyproject.toml @@ -0,0 +1,21 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-inference-fireworks" +version = "0.1.0" +description = "Fireworks AI inference provider for Llama models and other AI models on the Fireworks platform" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "fireworks-ai", + +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/inference/gemini/pyproject.toml b/llama_stack/providers/remote/inference/gemini/pyproject.toml new file mode 100644 index 000000000..37938260b --- /dev/null +++ b/llama_stack/providers/remote/inference/gemini/pyproject.toml @@ -0,0 +1,21 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-inference-gemini" +version = "0.1.0" +description = "Google Gemini inference provider for accessing Gemini models and Google's AI services" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "litellm", + +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/inference/groq/pyproject.toml b/llama_stack/providers/remote/inference/groq/pyproject.toml new file mode 100644 index 000000000..590b33e38 --- /dev/null +++ b/llama_stack/providers/remote/inference/groq/pyproject.toml @@ -0,0 +1,21 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-inference-groq" +version = "0.1.0" +description = "Groq inference provider for ultra-fast inference using Groq's LPU technology" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "litellm", + +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/inference/llama_openai_compat/pyproject.toml b/llama_stack/providers/remote/inference/llama_openai_compat/pyproject.toml new file mode 100644 index 000000000..b8f45e7db --- /dev/null +++ b/llama_stack/providers/remote/inference/llama_openai_compat/pyproject.toml @@ -0,0 +1,21 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-inference-llama-openai-compat" +version = "0.1.0" +description = "Llama OpenAI-compatible provider for using Llama models with OpenAI API format" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "litellm", + +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/inference/nvidia/pyproject.toml b/llama_stack/providers/remote/inference/nvidia/pyproject.toml new file mode 100644 index 000000000..6123a32b5 --- /dev/null +++ b/llama_stack/providers/remote/inference/nvidia/pyproject.toml @@ -0,0 +1,21 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-inference-nvidia" +version = "0.1.0" +description = "NVIDIA inference provider for accessing NVIDIA NIM models and AI services" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "openai", + +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/inference/ollama/pyproject.toml b/llama_stack/providers/remote/inference/ollama/pyproject.toml new file mode 100644 index 000000000..89390a612 --- /dev/null +++ b/llama_stack/providers/remote/inference/ollama/pyproject.toml @@ -0,0 +1,23 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-inference-ollama" +version = "0.1.0" +description = "Ollama inference provider for running local models through the Ollama runtime" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "ollama", + "aiohttp", + "h11>=0.16.0", + +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/inference/openai/pyproject.toml b/llama_stack/providers/remote/inference/openai/pyproject.toml new file mode 100644 index 000000000..471d02571 --- /dev/null +++ b/llama_stack/providers/remote/inference/openai/pyproject.toml @@ -0,0 +1,21 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-inference-openai" +version = "0.1.0" +description = "OpenAI inference provider for accessing GPT models and other OpenAI services" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "litellm", + +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/inference/passthrough/pyproject.toml b/llama_stack/providers/remote/inference/passthrough/pyproject.toml new file mode 100644 index 000000000..57765d81e --- /dev/null +++ b/llama_stack/providers/remote/inference/passthrough/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-inference-passthrough" +version = "0.1.0" +description = "Passthrough inference provider for connecting to any external inference service not directly supported" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/inference/runpod/pyproject.toml b/llama_stack/providers/remote/inference/runpod/pyproject.toml new file mode 100644 index 000000000..433991028 --- /dev/null +++ b/llama_stack/providers/remote/inference/runpod/pyproject.toml @@ -0,0 +1,21 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-inference-runpod" +version = "0.1.0" +description = "RunPod inference provider for running models on RunPod's cloud GPU platform" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "openai", + +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/inference/sambanova/pyproject.toml b/llama_stack/providers/remote/inference/sambanova/pyproject.toml new file mode 100644 index 000000000..0d8318e7c --- /dev/null +++ b/llama_stack/providers/remote/inference/sambanova/pyproject.toml @@ -0,0 +1,21 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-inference-sambanova" +version = "0.1.0" +description = "SambaNova inference provider for running models on SambaNova's dataflow architecture" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "litellm", + +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/inference/tgi/pyproject.toml b/llama_stack/providers/remote/inference/tgi/pyproject.toml new file mode 100644 index 000000000..4d63b3518 --- /dev/null +++ b/llama_stack/providers/remote/inference/tgi/pyproject.toml @@ -0,0 +1,22 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-inference-tgi" +version = "0.1.0" +description = "Text Generation Inference (TGI) provider for HuggingFace model serving" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "huggingface_hub", + "aiohttp", + +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/inference/together/pyproject.toml b/llama_stack/providers/remote/inference/together/pyproject.toml new file mode 100644 index 000000000..9cb290789 --- /dev/null +++ b/llama_stack/providers/remote/inference/together/pyproject.toml @@ -0,0 +1,21 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-inference-together" +version = "0.1.0" +description = "Together AI inference provider for open-source models and collaborative AI development" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "together", + +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/inference/vertexai/pyproject.toml b/llama_stack/providers/remote/inference/vertexai/pyproject.toml new file mode 100644 index 000000000..1b92abb97 --- /dev/null +++ b/llama_stack/providers/remote/inference/vertexai/pyproject.toml @@ -0,0 +1,19 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-inference-vertexai" +version = "0.1.0" +description = "Google VertexAI Remote Inference Provider" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "litellm", + "google-cloud-aiplatform" +] + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/inference/vllm/pyproject.toml b/llama_stack/providers/remote/inference/vllm/pyproject.toml new file mode 100644 index 000000000..7a74daa9f --- /dev/null +++ b/llama_stack/providers/remote/inference/vllm/pyproject.toml @@ -0,0 +1,21 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-inference-vllm" +version = "0.1.0" +description = "Remote vLLM inference provider for connecting to vLLM servers" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "openai", + +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/inference/watsonx/pyproject.toml b/llama_stack/providers/remote/inference/watsonx/pyproject.toml new file mode 100644 index 000000000..6928566ac --- /dev/null +++ b/llama_stack/providers/remote/inference/watsonx/pyproject.toml @@ -0,0 +1,21 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-inference-watsonx" +version = "0.1.0" +description = "IBM WatsonX inference provider for accessing AI models on IBM's WatsonX platform" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "ibm_watson_machine_learning", + +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/post_training/nvidia/pyproject.toml b/llama_stack/providers/remote/post_training/nvidia/pyproject.toml new file mode 100644 index 000000000..e94aeb707 --- /dev/null +++ b/llama_stack/providers/remote/post_training/nvidia/pyproject.toml @@ -0,0 +1,21 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-post-training-nvidia" +version = "0.1.0" +description = "NVIDIA's post-training provider for fine-tuning models on NVIDIA's platform" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "requests", + "aiohttp", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/safety/bedrock/pyproject.toml b/llama_stack/providers/remote/safety/bedrock/pyproject.toml new file mode 100644 index 000000000..c998cc3ee --- /dev/null +++ b/llama_stack/providers/remote/safety/bedrock/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-safety-bedrock" +version = "0.1.0" +description = "AWS Bedrock safety provider for content moderation using AWS's safety services" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "boto3", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/safety/nvidia/pyproject.toml b/llama_stack/providers/remote/safety/nvidia/pyproject.toml new file mode 100644 index 000000000..668dfc641 --- /dev/null +++ b/llama_stack/providers/remote/safety/nvidia/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-safety-nvidia" +version = "0.1.0" +description = "NVIDIA's safety provider for content moderation and safety filtering" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "requests", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/safety/sambanova/pyproject.toml b/llama_stack/providers/remote/safety/sambanova/pyproject.toml new file mode 100644 index 000000000..a1c147093 --- /dev/null +++ b/llama_stack/providers/remote/safety/sambanova/pyproject.toml @@ -0,0 +1,21 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-safety-sambanova" +version = "0.1.0" +description = "SambaNova's safety provider for content moderation and safety filtering" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "litellm", + "requests", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/tool_runtime/bing_search/pyproject.toml b/llama_stack/providers/remote/tool_runtime/bing_search/pyproject.toml new file mode 100644 index 000000000..b2995778a --- /dev/null +++ b/llama_stack/providers/remote/tool_runtime/bing_search/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-tool-runtime-bing-search" +version = "0.1.0" +description = "Bing Search tool for web search capabilities using Microsoft's search engine" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "requests", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/tool_runtime/brave_search/pyproject.toml b/llama_stack/providers/remote/tool_runtime/brave_search/pyproject.toml new file mode 100644 index 000000000..ef00f8777 --- /dev/null +++ b/llama_stack/providers/remote/tool_runtime/brave_search/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-tool-runtime-brave-search" +version = "0.1.0" +description = "Brave Search tool for web search capabilities with privacy-focused results" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "requests", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/tool_runtime/model_context_protocol/pyproject.toml b/llama_stack/providers/remote/tool_runtime/model_context_protocol/pyproject.toml new file mode 100644 index 000000000..a930942d1 --- /dev/null +++ b/llama_stack/providers/remote/tool_runtime/model_context_protocol/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-tool-runtime-model-context-protocol" +version = "0.1.0" +description = "Model Context Protocol (MCP) tool for standardized tool calling and context management" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "mcp>=1.8.1", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/tool_runtime/tavily_search/pyproject.toml b/llama_stack/providers/remote/tool_runtime/tavily_search/pyproject.toml new file mode 100644 index 000000000..55e169614 --- /dev/null +++ b/llama_stack/providers/remote/tool_runtime/tavily_search/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-tool-runtime-tavily-search" +version = "0.1.0" +description = "Tavily Search tool for AI-optimized web search with structured results" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "requests", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/tool_runtime/wolfram_alpha/pyproject.toml b/llama_stack/providers/remote/tool_runtime/wolfram_alpha/pyproject.toml new file mode 100644 index 000000000..866d77f1b --- /dev/null +++ b/llama_stack/providers/remote/tool_runtime/wolfram_alpha/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-tool-runtime-wolfram-alpha" +version = "0.1.0" +description = "Wolfram Alpha tool for computational knowledge and mathematical calculations" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "requests", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/vector_io/chroma/pyproject.toml b/llama_stack/providers/remote/vector_io/chroma/pyproject.toml new file mode 100644 index 000000000..ba728ecb5 --- /dev/null +++ b/llama_stack/providers/remote/vector_io/chroma/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-vector-io-chroma-remote" +version = "0.1.0" +description = "Chroma remote vector database provider for Llama Stack" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "chromadb-client", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/vector_io/milvus/pyproject.toml b/llama_stack/providers/remote/vector_io/milvus/pyproject.toml new file mode 100644 index 000000000..66a208604 --- /dev/null +++ b/llama_stack/providers/remote/vector_io/milvus/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-vector-io-milvus-remote" +version = "0.1.0" +description = "Milvus remote vector database provider for Llama Stack" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "pymilvus>=2.4.10", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/vector_io/pgvector/pyproject.toml b/llama_stack/providers/remote/vector_io/pgvector/pyproject.toml new file mode 100644 index 000000000..9d761dc8d --- /dev/null +++ b/llama_stack/providers/remote/vector_io/pgvector/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-vector-io-pgvector" +version = "0.1.0" +description = "PGVector remote vector database provider for Llama Stack" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "psycopg2-binary", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/vector_io/qdrant/pyproject.toml b/llama_stack/providers/remote/vector_io/qdrant/pyproject.toml new file mode 100644 index 000000000..f52b91471 --- /dev/null +++ b/llama_stack/providers/remote/vector_io/qdrant/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-vector-io-qdrant-remote" +version = "0.1.0" +description = "Qdrant remote vector database provider for Llama Stack" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "qdrant-client", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/vector_io/weaviate/pyproject.toml b/llama_stack/providers/remote/vector_io/weaviate/pyproject.toml new file mode 100644 index 000000000..811e99c97 --- /dev/null +++ b/llama_stack/providers/remote/vector_io/weaviate/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-vector-io-weaviate" +version = "0.1.0" +description = "Weaviate remote vector database provider for Llama Stack" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "weaviate-client", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] From df4d85efb030ed64d377addd69202a9751ce35fc Mon Sep 17 00:00:00 2001 From: Charlie Doern Date: Wed, 30 Jul 2025 18:55:22 -0400 Subject: [PATCH 2/4] feat: llama stack show `llama stack show` prints all required pip dependencies. It does this by using the `module` in the provider spec as the installation directory and gets all deps using the new `pyproject.toml`, providers can be installed as a package using the dependencies in the pyproject Signed-off-by: Charlie Doern --- llama_stack/cli/stack/_show.py | 206 +++++++++++++++++++++ llama_stack/cli/stack/show.py | 75 ++++++++ llama_stack/cli/stack/stack.py | 4 +- llama_stack/cli/stack/utils.py | 109 +++++++++++ llama_stack/core/build.py | 11 +- tests/unit/distribution/test_build_path.py | 2 +- 6 files changed, 402 insertions(+), 5 deletions(-) create mode 100644 llama_stack/cli/stack/_show.py create mode 100644 llama_stack/cli/stack/show.py diff --git a/llama_stack/cli/stack/_show.py b/llama_stack/cli/stack/_show.py new file mode 100644 index 000000000..fb7160b7d --- /dev/null +++ b/llama_stack/cli/stack/_show.py @@ -0,0 +1,206 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +import argparse +import importlib.resources +import json +import os +import shutil +import sys +import textwrap +from pathlib import Path + +import yaml +from prompt_toolkit import prompt +from prompt_toolkit.completion import WordCompleter +from prompt_toolkit.validation import Validator +from termcolor import colored, cprint + +from llama_stack.cli.stack.utils import ImageType, available_templates_specs, generate_run_config +from llama_stack.core.build import get_provider_dependencies +from llama_stack.core.datatypes import ( + BuildConfig, + BuildProvider, + DistributionSpec, +) +from llama_stack.core.distribution import get_provider_registry +from llama_stack.core.external import load_external_apis +from llama_stack.core.stack import replace_env_vars +from llama_stack.core.utils.config_dirs import DISTRIBS_BASE_DIR +from llama_stack.core.utils.exec import run_command +from llama_stack.log import get_logger +from llama_stack.providers.datatypes import Api + +TEMPLATES_PATH = Path(__file__).parent.parent.parent / "templates" + +logger = get_logger(name=__name__, category="cli") + + +# These are the dependencies needed by the distribution server. +# `llama-stack` is automatically installed by the installation script. +SERVER_DEPENDENCIES = [ + "aiosqlite", + "fastapi", + "fire", + "httpx", + "uvicorn", + "opentelemetry-sdk", + "opentelemetry-exporter-otlp-proto-http", +] + + +def run_stack_show_command(args: argparse.Namespace) -> None: + current_venv = os.environ.get("VIRTUAL_ENV") + env_name = args.env_name or current_venv + + if args.distro: + available_templates = available_templates_specs() + if args.distro not in available_templates: + cprint( + f"Could not find template {args.distro}. Please run `llama stack show --list-distros` to check out the available templates", + color="red", + file=sys.stderr, + ) + sys.exit(1) + build_config = available_templates[args.distro] + # always venv, conda is gone and container is separate. + build_config.image_type = ImageType.VENV.value + elif args.providers: + provider_list: dict[str, list[BuildProvider]] = dict() + for api_provider in args.providers.split(","): + if "=" not in api_provider: + cprint( + "Could not parse `--providers`. Please ensure the list is in the format api1=provider1,api2=provider2", + color="red", + file=sys.stderr, + ) + sys.exit(1) + api, provider_type = api_provider.split("=") + providers_for_api = get_provider_registry().get(Api(api), None) + if providers_for_api is None: + cprint( + f"{api} is not a valid API.", + color="red", + file=sys.stderr, + ) + sys.exit(1) + if provider_type in providers_for_api: + provider = BuildProvider( + provider_type=provider_type, + module=None, + ) + provider_list.setdefault(api, []).append(provider) + else: + cprint( + f"{provider} is not a valid provider for the {api} API.", + color="red", + file=sys.stderr, + ) + sys.exit(1) + distribution_spec = DistributionSpec( + providers=provider_list, + description=",".join(args.providers), + ) + build_config = BuildConfig(image_type=ImageType.VENV.value, distribution_spec=distribution_spec) + elif not args.config and not args.distro: + name = prompt( + "> Enter a name for your Llama Stack (e.g. my-local-stack): ", + validator=Validator.from_callable( + lambda x: len(x) > 0, + error_message="Name cannot be empty, please enter a name", + ), + ) + + image_type = prompt( + "> Enter the image type you want your Llama Stack to be built as (use to see options): ", + completer=WordCompleter([e.value for e in ImageType]), + complete_while_typing=True, + validator=Validator.from_callable( + lambda x: x in [e.value for e in ImageType], + error_message="Invalid image type. Use to see options", + ), + ) + + env_name = f"llamastack-{name}" + + cprint( + textwrap.dedent( + """ + Llama Stack is composed of several APIs working together. Let's select + the provider types (implementations) you want to use for these APIs. + """, + ), + color="green", + file=sys.stderr, + ) + + cprint("Tip: use to see options for the providers.\n", color="green", file=sys.stderr) + + providers: dict[str, list[BuildProvider]] = dict() + for api, providers_for_api in get_provider_registry().items(): + available_providers = [x for x in providers_for_api.keys() if x not in ("remote", "remote::sample")] + if not available_providers: + continue + api_provider = prompt( + f"> Enter provider for API {api.value}: ", + completer=WordCompleter(available_providers), + complete_while_typing=True, + validator=Validator.from_callable( + lambda x: x in available_providers, # noqa: B023 - see https://github.com/astral-sh/ruff/issues/7847 + error_message="Invalid provider, use to see options", + ), + ) + + string_providers = api_provider.split(" ") + + for provider in string_providers: + providers.setdefault(api.value, []).append(BuildProvider(provider_type=provider)) + + description = prompt( + "\n > (Optional) Enter a short description for your Llama Stack: ", + default="", + ) + + distribution_spec = DistributionSpec( + providers=providers, + description=description, + ) + + build_config = BuildConfig(image_type=image_type, distribution_spec=distribution_spec) + else: + with open(args.config) as f: + try: + contents = yaml.safe_load(f) + contents = replace_env_vars(contents) + build_config = BuildConfig(**contents) + build_config.image_type = "venv" + except Exception as e: + cprint( + f"Could not parse config file {args.config}: {e}", + color="red", + file=sys.stderr, + ) + sys.exit(1) + + print(f"# Dependencies for {args.distro or args.config or env_name}") + + normal_deps, special_deps, external_provider_dependencies = get_provider_dependencies(build_config) + normal_deps += SERVER_DEPENDENCIES + + # Quote deps with commas + quoted_normal_deps = [quote_if_needed(dep) for dep in normal_deps] + print(f"uv pip install {' '.join(quoted_normal_deps)}") + + for special_dep in special_deps: + print(f"uv pip install {quote_if_needed(special_dep)}") + + for external_dep in external_provider_dependencies: + print(f"uv pip install {quote_if_needed(external_dep)}") + + +def quote_if_needed(dep): + # Add quotes if the dependency contains a comma (likely version specifier) + return f"'{dep}'" if "," in dep else dep diff --git a/llama_stack/cli/stack/show.py b/llama_stack/cli/stack/show.py new file mode 100644 index 000000000..9c8c2c90e --- /dev/null +++ b/llama_stack/cli/stack/show.py @@ -0,0 +1,75 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. +import argparse +import textwrap + +from llama_stack.cli.stack.utils import ImageType +from llama_stack.cli.subcommand import Subcommand + + +class StackShow(Subcommand): + def __init__(self, subparsers: argparse._SubParsersAction): + super().__init__() + self.parser = subparsers.add_parser( + "show", + prog="llama stack show", + description="show the dependencies for a llama stack distribution", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + self._add_arguments() + self.parser.set_defaults(func=self._run_stack_show_command) + + def _add_arguments(self): + self.parser.add_argument( + "--config", + type=str, + default=None, + help="Path to a config file to use for the build. You can find example configs in llama_stack/distributions/**/build.yaml. If this argument is not provided, you will be prompted to enter information interactively", + ) + + self.parser.add_argument( + "--distro", + type=str, + default=None, + help="Name of the distro config to use for show. You may use `llama stack show --list-distros` to check out the available distros", + ) + + self.parser.add_argument( + "--list-distros", + action="store_true", + default=False, + help="Show the available templates for building a Llama Stack distribution", + ) + + self.parser.add_argument( + "--env-name", + type=str, + help=textwrap.dedent( + f"""[for image-type={"|".join(e.value for e in ImageType)}] Name of the conda or virtual environment to use for +the build. If not specified, currently active environment will be used if found. + """ + ), + default=None, + ) + self.parser.add_argument( + "--print-deps-only", + default=False, + action="store_true", + help="Print the dependencies for the stack only, without building the stack", + ) + self.parser.add_argument( + "--providers", + type=str, + default=None, + help="sync dependencies for a list of providers and only those providers. This list is formatted like: api1=provider1,api2=provider2. Where there can be multiple providers per API.", + ) + + def _run_stack_show_command(self, args: argparse.Namespace) -> None: + # always keep implementation completely silo-ed away from CLI so CLI + # can be fast to load and reduces dependencies + from ._show import run_stack_show_command + + return run_stack_show_command(args) diff --git a/llama_stack/cli/stack/stack.py b/llama_stack/cli/stack/stack.py index 3aff78e23..85365989c 100644 --- a/llama_stack/cli/stack/stack.py +++ b/llama_stack/cli/stack/stack.py @@ -11,11 +11,11 @@ from llama_stack.cli.stack.list_stacks import StackListBuilds from llama_stack.cli.stack.utils import print_subcommand_description from llama_stack.cli.subcommand import Subcommand -from .build import StackBuild from .list_apis import StackListApis from .list_providers import StackListProviders from .remove import StackRemove from .run import StackRun +from .show import StackShow class StackParser(Subcommand): @@ -39,7 +39,7 @@ class StackParser(Subcommand): subparsers = self.parser.add_subparsers(title="stack_subcommands") # Add sub-commands - StackBuild.create(subparsers) + StackShow.create(subparsers) StackListApis.create(subparsers) StackListProviders.create(subparsers) StackRun.create(subparsers) diff --git a/llama_stack/cli/stack/utils.py b/llama_stack/cli/stack/utils.py index fdf9e1761..4d4c1b538 100644 --- a/llama_stack/cli/stack/utils.py +++ b/llama_stack/cli/stack/utils.py @@ -4,7 +4,28 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. +import json +import sys from enum import Enum +from functools import lru_cache +from pathlib import Path + +import yaml +from termcolor import cprint + +from llama_stack.core.datatypes import ( + BuildConfig, + Provider, + StackRunConfig, +) +from llama_stack.core.distribution import get_provider_registry +from llama_stack.core.resolver import InvalidProviderError +from llama_stack.core.utils.config_dirs import EXTERNAL_PROVIDERS_DIR +from llama_stack.core.utils.dynamic import instantiate_class_type +from llama_stack.core.utils.image_types import LlamaStackImageType +from llama_stack.providers.datatypes import Api + +TEMPLATES_PATH = Path(__file__).parent.parent.parent / "distributions" class ImageType(Enum): @@ -19,3 +40,91 @@ def print_subcommand_description(parser, subparsers): description = subcommand.description description_text += f" {name:<21} {description}\n" parser.epilog = description_text + + +def generate_run_config( + build_config: BuildConfig, + build_dir: Path, + image_name: str, +) -> Path: + """ + Generate a run.yaml template file for user to edit from a build.yaml file + """ + apis = list(build_config.distribution_spec.providers.keys()) + run_config = StackRunConfig( + container_image=(image_name if build_config.image_type == LlamaStackImageType.CONTAINER.value else None), + image_name=image_name, + apis=apis, + providers={}, + external_providers_dir=build_config.external_providers_dir + if build_config.external_providers_dir + else EXTERNAL_PROVIDERS_DIR, + ) + # build providers dict + provider_registry = get_provider_registry(build_config) + for api in apis: + run_config.providers[api] = [] + providers = build_config.distribution_spec.providers[api] + + for provider in providers: + pid = provider.provider_type.split("::")[-1] + + p = provider_registry[Api(api)][provider.provider_type] + if p.deprecation_error: + raise InvalidProviderError(p.deprecation_error) + + try: + config_type = instantiate_class_type(provider_registry[Api(api)][provider.provider_type].config_class) + except (ModuleNotFoundError, ValueError) as exc: + # HACK ALERT: + # This code executes after building is done, the import cannot work since the + # package is either available in the venv or container - not available on the host. + # TODO: use a "is_external" flag in ProviderSpec to check if the provider is + # external + cprint( + f"Failed to import provider {provider.provider_type} for API {api} - assuming it's external, skipping: {exc}", + color="yellow", + file=sys.stderr, + ) + # Set config_type to None to avoid UnboundLocalError + config_type = None + + if config_type is not None and hasattr(config_type, "sample_run_config"): + config = config_type.sample_run_config(__distro_dir__=f"~/.llama/distributions/{image_name}") + else: + config = {} + + p_spec = Provider( + provider_id=pid, + provider_type=provider.provider_type, + config=config, + module=provider.module, + ) + run_config.providers[api].append(p_spec) + + run_config_file = build_dir / f"{image_name}-run.yaml" + + with open(run_config_file, "w") as f: + to_write = json.loads(run_config.model_dump_json()) + f.write(yaml.dump(to_write, sort_keys=False)) + + # Only print this message for non-container builds since it will be displayed before the + # container is built + # For non-container builds, the run.yaml is generated at the very end of the build process so it + # makes sense to display this message + if build_config.image_type != LlamaStackImageType.CONTAINER.value: + cprint(f"You can now run your stack with `llama stack run {run_config_file}`", color="green", file=sys.stderr) + return run_config_file + + +@lru_cache +def available_templates_specs() -> dict[str, BuildConfig]: + import yaml + + template_specs = {} + for p in TEMPLATES_PATH.rglob("*build.yaml"): + template_name = p.parent.name + with open(p) as f: + build_config = BuildConfig(**yaml.safe_load(f)) + template_specs[template_name] = build_config + return template_specs diff --git a/llama_stack/core/build.py b/llama_stack/core/build.py index 4b20588fd..5586b1dd8 100644 --- a/llama_stack/core/build.py +++ b/llama_stack/core/build.py @@ -7,6 +7,8 @@ import importlib.resources import logging import sys +import tomllib +from pathlib import Path from pydantic import BaseModel from termcolor import cprint @@ -72,8 +74,13 @@ def get_provider_dependencies( external_provider_deps.append(provider_spec.module) else: external_provider_deps.extend(provider_spec.module) - if hasattr(provider_spec, "pip_packages"): - deps.extend(provider_spec.pip_packages) + + pyproject = Path(provider_spec.module.replace(".", "/")) / "pyproject.toml" + with open(pyproject, "rb") as f: + data = tomllib.load(f) + + dependencies = data.get("project", {}).get("dependencies", []) + deps.extend(dependencies) if hasattr(provider_spec, "container_image") and provider_spec.container_image: raise ValueError("A stack's dependencies cannot have a container image") diff --git a/tests/unit/distribution/test_build_path.py b/tests/unit/distribution/test_build_path.py index 52a71286b..b4094618e 100644 --- a/tests/unit/distribution/test_build_path.py +++ b/tests/unit/distribution/test_build_path.py @@ -6,7 +6,7 @@ from pathlib import Path -from llama_stack.cli.stack._build import ( +from llama_stack.cli.stack._sync import ( _run_stack_build_command_from_build_config, ) from llama_stack.core.datatypes import BuildConfig, DistributionSpec From 7521a27a50a3e2a3e5377003a467471d43d2807d Mon Sep 17 00:00:00 2001 From: Charlie Doern Date: Wed, 30 Jul 2025 19:45:01 -0400 Subject: [PATCH 3/4] chore: remove `llama stack build` remove in favor of `llama stack sync` Signed-off-by: Charlie Doern --- llama_stack/cli/stack/_build.py | 471 -------------------------------- llama_stack/cli/stack/build.py | 100 ------- 2 files changed, 571 deletions(-) delete mode 100644 llama_stack/cli/stack/_build.py delete mode 100644 llama_stack/cli/stack/build.py diff --git a/llama_stack/cli/stack/_build.py b/llama_stack/cli/stack/_build.py deleted file mode 100644 index c6e204773..000000000 --- a/llama_stack/cli/stack/_build.py +++ /dev/null @@ -1,471 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. - -import argparse -import importlib.resources -import json -import os -import shutil -import sys -import textwrap -from functools import lru_cache -from importlib.abc import Traversable -from pathlib import Path - -import yaml -from prompt_toolkit import prompt -from prompt_toolkit.completion import WordCompleter -from prompt_toolkit.validation import Validator -from termcolor import colored, cprint - -from llama_stack.cli.stack.utils import ImageType -from llama_stack.cli.table import print_table -from llama_stack.core.build import ( - SERVER_DEPENDENCIES, - build_image, - get_provider_dependencies, -) -from llama_stack.core.configure import parse_and_maybe_upgrade_config -from llama_stack.core.datatypes import ( - BuildConfig, - BuildProvider, - DistributionSpec, - Provider, - StackRunConfig, -) -from llama_stack.core.distribution import get_provider_registry -from llama_stack.core.external import load_external_apis -from llama_stack.core.resolver import InvalidProviderError -from llama_stack.core.stack import replace_env_vars -from llama_stack.core.utils.config_dirs import DISTRIBS_BASE_DIR, EXTERNAL_PROVIDERS_DIR -from llama_stack.core.utils.dynamic import instantiate_class_type -from llama_stack.core.utils.exec import formulate_run_args, run_command -from llama_stack.core.utils.image_types import LlamaStackImageType -from llama_stack.providers.datatypes import Api - -DISTRIBS_PATH = Path(__file__).parent.parent.parent / "distributions" - - -@lru_cache -def available_distros_specs() -> dict[str, BuildConfig]: - import yaml - - distro_specs = {} - for p in DISTRIBS_PATH.rglob("*build.yaml"): - distro_name = p.parent.name - with open(p) as f: - build_config = BuildConfig(**yaml.safe_load(f)) - distro_specs[distro_name] = build_config - return distro_specs - - -def run_stack_build_command(args: argparse.Namespace) -> None: - if args.list_distros: - return _run_distro_list_cmd() - - if args.image_type == ImageType.VENV.value: - current_venv = os.environ.get("VIRTUAL_ENV") - image_name = args.image_name or current_venv - else: - image_name = args.image_name - - if args.template: - cprint( - "The --template argument is deprecated. Please use --distro instead.", - color="red", - file=sys.stderr, - ) - distro_name = args.template - else: - distro_name = args.distribution - - if distro_name: - available_distros = available_distros_specs() - if distro_name not in available_distros: - cprint( - f"Could not find distribution {distro_name}. Please run `llama stack build --list-distros` to check out the available distributions", - color="red", - file=sys.stderr, - ) - sys.exit(1) - build_config = available_distros[distro_name] - if args.image_type: - build_config.image_type = args.image_type - else: - cprint( - f"Please specify a image-type ({' | '.join(e.value for e in ImageType)}) for {distro_name}", - color="red", - file=sys.stderr, - ) - sys.exit(1) - elif args.providers: - provider_list: dict[str, list[BuildProvider]] = dict() - for api_provider in args.providers.split(","): - if "=" not in api_provider: - cprint( - "Could not parse `--providers`. Please ensure the list is in the format api1=provider1,api2=provider2", - color="red", - file=sys.stderr, - ) - sys.exit(1) - api, provider_type = api_provider.split("=") - providers_for_api = get_provider_registry().get(Api(api), None) - if providers_for_api is None: - cprint( - f"{api} is not a valid API.", - color="red", - file=sys.stderr, - ) - sys.exit(1) - if provider_type in providers_for_api: - provider = BuildProvider( - provider_type=provider_type, - module=None, - ) - provider_list.setdefault(api, []).append(provider) - else: - cprint( - f"{provider} is not a valid provider for the {api} API.", - color="red", - file=sys.stderr, - ) - sys.exit(1) - distribution_spec = DistributionSpec( - providers=provider_list, - description=",".join(args.providers), - ) - if not args.image_type: - cprint( - f"Please specify a image-type (container | venv) for {args.template}", - color="red", - file=sys.stderr, - ) - sys.exit(1) - - build_config = BuildConfig(image_type=args.image_type, distribution_spec=distribution_spec) - elif not args.config and not distro_name: - name = prompt( - "> Enter a name for your Llama Stack (e.g. my-local-stack): ", - validator=Validator.from_callable( - lambda x: len(x) > 0, - error_message="Name cannot be empty, please enter a name", - ), - ) - - image_type = prompt( - "> Enter the image type you want your Llama Stack to be built as (use to see options): ", - completer=WordCompleter([e.value for e in ImageType]), - complete_while_typing=True, - validator=Validator.from_callable( - lambda x: x in [e.value for e in ImageType], - error_message="Invalid image type. Use to see options", - ), - ) - - image_name = f"llamastack-{name}" - - cprint( - textwrap.dedent( - """ - Llama Stack is composed of several APIs working together. Let's select - the provider types (implementations) you want to use for these APIs. - """, - ), - color="green", - file=sys.stderr, - ) - - cprint("Tip: use to see options for the providers.\n", color="green", file=sys.stderr) - - providers: dict[str, list[BuildProvider]] = dict() - for api, providers_for_api in get_provider_registry().items(): - available_providers = [x for x in providers_for_api.keys() if x not in ("remote", "remote::sample")] - if not available_providers: - continue - api_provider = prompt( - f"> Enter provider for API {api.value}: ", - completer=WordCompleter(available_providers), - complete_while_typing=True, - validator=Validator.from_callable( - lambda x: x in available_providers, # noqa: B023 - see https://github.com/astral-sh/ruff/issues/7847 - error_message="Invalid provider, use to see options", - ), - ) - - string_providers = api_provider.split(" ") - - for provider in string_providers: - providers.setdefault(api.value, []).append(BuildProvider(provider_type=provider)) - - description = prompt( - "\n > (Optional) Enter a short description for your Llama Stack: ", - default="", - ) - - distribution_spec = DistributionSpec( - providers=providers, - description=description, - ) - - build_config = BuildConfig(image_type=image_type, distribution_spec=distribution_spec) - else: - with open(args.config) as f: - try: - contents = yaml.safe_load(f) - contents = replace_env_vars(contents) - build_config = BuildConfig(**contents) - if args.image_type: - build_config.image_type = args.image_type - except Exception as e: - cprint( - f"Could not parse config file {args.config}: {e}", - color="red", - file=sys.stderr, - ) - sys.exit(1) - - if args.print_deps_only: - print(f"# Dependencies for {distro_name or args.config or image_name}") - normal_deps, special_deps, external_provider_dependencies = get_provider_dependencies(build_config) - normal_deps += SERVER_DEPENDENCIES - print(f"uv pip install {' '.join(normal_deps)}") - for special_dep in special_deps: - print(f"uv pip install {special_dep}") - for external_dep in external_provider_dependencies: - print(f"uv pip install {external_dep}") - return - - try: - run_config = _run_stack_build_command_from_build_config( - build_config, - image_name=image_name, - config_path=args.config, - distro_name=distro_name, - ) - - except (Exception, RuntimeError) as exc: - import traceback - - cprint( - f"Error building stack: {exc}", - color="red", - file=sys.stderr, - ) - cprint("Stack trace:", color="red", file=sys.stderr) - traceback.print_exc() - sys.exit(1) - - if run_config is None: - cprint( - "Run config path is empty", - color="red", - file=sys.stderr, - ) - sys.exit(1) - - if args.run: - config_dict = yaml.safe_load(run_config.read_text()) - config = parse_and_maybe_upgrade_config(config_dict) - if config.external_providers_dir and not config.external_providers_dir.exists(): - config.external_providers_dir.mkdir(exist_ok=True) - run_args = formulate_run_args(args.image_type, image_name or config.image_name) - run_args.extend([str(os.getenv("LLAMA_STACK_PORT", 8321)), "--config", str(run_config)]) - run_command(run_args) - - -def _generate_run_config( - build_config: BuildConfig, - build_dir: Path, - image_name: str, -) -> Path: - """ - Generate a run.yaml template file for user to edit from a build.yaml file - """ - apis = list(build_config.distribution_spec.providers.keys()) - run_config = StackRunConfig( - container_image=(image_name if build_config.image_type == LlamaStackImageType.CONTAINER.value else None), - image_name=image_name, - apis=apis, - providers={}, - external_providers_dir=build_config.external_providers_dir - if build_config.external_providers_dir - else EXTERNAL_PROVIDERS_DIR, - ) - # build providers dict - provider_registry = get_provider_registry(build_config) - for api in apis: - run_config.providers[api] = [] - providers = build_config.distribution_spec.providers[api] - - for provider in providers: - pid = provider.provider_type.split("::")[-1] - - p = provider_registry[Api(api)][provider.provider_type] - if p.deprecation_error: - raise InvalidProviderError(p.deprecation_error) - - try: - config_type = instantiate_class_type(provider_registry[Api(api)][provider.provider_type].config_class) - except (ModuleNotFoundError, ValueError) as exc: - # HACK ALERT: - # This code executes after building is done, the import cannot work since the - # package is either available in the venv or container - not available on the host. - # TODO: use a "is_external" flag in ProviderSpec to check if the provider is - # external - cprint( - f"Failed to import provider {provider.provider_type} for API {api} - assuming it's external, skipping: {exc}", - color="yellow", - file=sys.stderr, - ) - # Set config_type to None to avoid UnboundLocalError - config_type = None - - if config_type is not None and hasattr(config_type, "sample_run_config"): - config = config_type.sample_run_config(__distro_dir__=f"~/.llama/distributions/{image_name}") - else: - config = {} - - p_spec = Provider( - provider_id=pid, - provider_type=provider.provider_type, - config=config, - module=provider.module, - ) - run_config.providers[api].append(p_spec) - - run_config_file = build_dir / f"{image_name}-run.yaml" - - with open(run_config_file, "w") as f: - to_write = json.loads(run_config.model_dump_json()) - f.write(yaml.dump(to_write, sort_keys=False)) - - # Only print this message for non-container builds since it will be displayed before the - # container is built - # For non-container builds, the run.yaml is generated at the very end of the build process so it - # makes sense to display this message - if build_config.image_type != LlamaStackImageType.CONTAINER.value: - cprint(f"You can now run your stack with `llama stack run {run_config_file}`", color="green", file=sys.stderr) - return run_config_file - - -def _run_stack_build_command_from_build_config( - build_config: BuildConfig, - image_name: str | None = None, - distro_name: str | None = None, - config_path: str | None = None, -) -> Path | Traversable: - image_name = image_name or build_config.image_name - if build_config.image_type == LlamaStackImageType.CONTAINER.value: - if distro_name: - image_name = f"distribution-{distro_name}" - else: - if not image_name: - raise ValueError("Please specify an image name when building a container image without a template") - else: - if not image_name and os.environ.get("UV_SYSTEM_PYTHON"): - image_name = "__system__" - if not image_name: - raise ValueError("Please specify an image name when building a venv image") - - # At this point, image_name should be guaranteed to be a string - if image_name is None: - raise ValueError("image_name should not be None after validation") - - if distro_name: - build_dir = DISTRIBS_BASE_DIR / distro_name - build_file_path = build_dir / f"{distro_name}-build.yaml" - else: - if image_name is None: - raise ValueError("image_name cannot be None") - build_dir = DISTRIBS_BASE_DIR / image_name - build_file_path = build_dir / f"{image_name}-build.yaml" - - os.makedirs(build_dir, exist_ok=True) - run_config_file = None - # Generate the run.yaml so it can be included in the container image with the proper entrypoint - # Only do this if we're building a container image and we're not using a template - if build_config.image_type == LlamaStackImageType.CONTAINER.value and not distro_name and config_path: - cprint("Generating run.yaml file", color="yellow", file=sys.stderr) - run_config_file = _generate_run_config(build_config, build_dir, image_name) - - with open(build_file_path, "w") as f: - to_write = json.loads(build_config.model_dump_json(exclude_none=True)) - f.write(yaml.dump(to_write, sort_keys=False)) - - # We first install the external APIs so that the build process can use them and discover the - # providers dependencies - if build_config.external_apis_dir: - cprint("Installing external APIs", color="yellow", file=sys.stderr) - external_apis = load_external_apis(build_config) - if external_apis: - # install the external APIs - packages = [] - for _, api_spec in external_apis.items(): - if api_spec.pip_packages: - packages.extend(api_spec.pip_packages) - cprint( - f"Installing {api_spec.name} with pip packages {api_spec.pip_packages}", - color="yellow", - file=sys.stderr, - ) - return_code = run_command(["uv", "pip", "install", *packages]) - if return_code != 0: - packages_str = ", ".join(packages) - raise RuntimeError( - f"Failed to install external APIs packages: {packages_str} (return code: {return_code})" - ) - - return_code = build_image( - build_config, - image_name, - distro_or_config=distro_name or config_path or str(build_file_path), - run_config=run_config_file.as_posix() if run_config_file else None, - ) - if return_code != 0: - raise RuntimeError(f"Failed to build image {image_name}") - - if distro_name: - # copy run.yaml from distribution to build_dir instead of generating it again - distro_path = importlib.resources.files("llama_stack") / f"distributions/{distro_name}/run.yaml" - run_config_file = build_dir / f"{distro_name}-run.yaml" - - with importlib.resources.as_file(distro_path) as path: - shutil.copy(path, run_config_file) - - cprint("Build Successful!", color="green", file=sys.stderr) - cprint(f"You can find the newly-built distribution here: {run_config_file}", color="blue", file=sys.stderr) - cprint( - "You can run the new Llama Stack distro via: " - + colored(f"llama stack run {run_config_file} --image-type {build_config.image_type}", "blue"), - color="green", - file=sys.stderr, - ) - return distro_path - else: - return _generate_run_config(build_config, build_dir, image_name) - - -def _run_distro_list_cmd() -> None: - headers = [ - "Distribution Name", - # "Providers", - "Description", - ] - - rows = [] - for distro_name, spec in available_distros_specs().items(): - rows.append( - [ - distro_name, - # json.dumps(spec.distribution_spec.providers, indent=2), - spec.distribution_spec.description, - ] - ) - print_table( - rows, - headers, - separate_rows=True, - ) diff --git a/llama_stack/cli/stack/build.py b/llama_stack/cli/stack/build.py deleted file mode 100644 index 80cf6fb38..000000000 --- a/llama_stack/cli/stack/build.py +++ /dev/null @@ -1,100 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. -import argparse -import textwrap - -from llama_stack.cli.stack.utils import ImageType -from llama_stack.cli.subcommand import Subcommand - - -class StackBuild(Subcommand): - def __init__(self, subparsers: argparse._SubParsersAction): - super().__init__() - self.parser = subparsers.add_parser( - "build", - prog="llama stack build", - description="Build a Llama stack container", - formatter_class=argparse.ArgumentDefaultsHelpFormatter, - ) - self._add_arguments() - self.parser.set_defaults(func=self._run_stack_build_command) - - def _add_arguments(self): - self.parser.add_argument( - "--config", - type=str, - default=None, - help="Path to a config file to use for the build. You can find example configs in llama_stack.cores/**/build.yaml. If this argument is not provided, you will be prompted to enter information interactively", - ) - - self.parser.add_argument( - "--template", - type=str, - default=None, - help="""(deprecated) Name of the example template config to use for build. You may use `llama stack build --list-distros` to check out the available distributions""", - ) - self.parser.add_argument( - "--distro", - "--distribution", - dest="distribution", - type=str, - default=None, - help="""Name of the distribution to use for build. You may use `llama stack build --list-distros` to check out the available distributions""", - ) - - self.parser.add_argument( - "--list-distros", - "--list-distributions", - action="store_true", - dest="list_distros", - default=False, - help="Show the available distributions for building a Llama Stack distribution", - ) - - self.parser.add_argument( - "--image-type", - type=str, - help="Image Type to use for the build. If not specified, will use the image type from the template config.", - choices=[e.value for e in ImageType], - default=None, # no default so we can detect if a user specified --image-type and override image_type in the config - ) - - self.parser.add_argument( - "--image-name", - type=str, - help=textwrap.dedent( - f"""[for image-type={"|".join(e.value for e in ImageType)}] Name of the virtual environment to use for -the build. If not specified, currently active environment will be used if found. - """ - ), - default=None, - ) - self.parser.add_argument( - "--print-deps-only", - default=False, - action="store_true", - help="Print the dependencies for the stack only, without building the stack", - ) - - self.parser.add_argument( - "--run", - action="store_true", - default=False, - help="Run the stack after building using the same image type, name, and other applicable arguments", - ) - self.parser.add_argument( - "--providers", - type=str, - default=None, - help="Build a config for a list of providers and only those providers. This list is formatted like: api1=provider1,api2=provider2. Where there can be multiple providers per API.", - ) - - def _run_stack_build_command(self, args: argparse.Namespace) -> None: - # always keep implementation completely silo-ed away from CLI so CLI - # can be fast to load and reduces dependencies - from ._build import run_stack_build_command - - return run_stack_build_command(args) From 60f71514c8c3007cf223838aa5d867b1aad84cdf Mon Sep 17 00:00:00 2001 From: Charlie Doern Date: Thu, 31 Jul 2025 20:14:37 -0400 Subject: [PATCH 4/4] test: convert tests to use sync Signed-off-by: Charlie Doern --- .../actions/setup-test-environment/action.yml | 2 +- .github/workflows/README.md | 2 +- .../workflows/integration-vector-io-tests.yml | 2 +- .github/workflows/providers-build.yml | 154 ------------------ .github/workflows/providers-show.yml | 105 ++++++++++++ llama_stack/cli/stack/_show.py | 10 +- .../remote/inference/fireworks/pyproject.toml | 2 +- tests/unit/distribution/test_build_path.py | 40 ----- tests/unit/distribution/test_stack_show.py | 21 +++ uv.lock | 38 ++--- 10 files changed, 151 insertions(+), 225 deletions(-) delete mode 100644 .github/workflows/providers-build.yml create mode 100644 .github/workflows/providers-show.yml delete mode 100644 tests/unit/distribution/test_build_path.py create mode 100644 tests/unit/distribution/test_stack_show.py diff --git a/.github/actions/setup-test-environment/action.yml b/.github/actions/setup-test-environment/action.yml index 30b9b0130..3d99dc590 100644 --- a/.github/actions/setup-test-environment/action.yml +++ b/.github/actions/setup-test-environment/action.yml @@ -42,7 +42,7 @@ runs: - name: Build Llama Stack shell: bash run: | - uv run llama stack build --template ci-tests --image-type venv + uv run llama stack show --template ci-tests | sh - name: Configure git for commits shell: bash diff --git a/.github/workflows/README.md b/.github/workflows/README.md index 3c3d93dc2..7e0c1df34 100644 --- a/.github/workflows/README.md +++ b/.github/workflows/README.md @@ -11,7 +11,7 @@ Llama Stack uses GitHub Actions for Continuous Integration (CI). Below is a tabl | Integration Tests (Replay) | [integration-tests.yml](integration-tests.yml) | Run the integration test suite from tests/integration in replay mode | | Vector IO Integration Tests | [integration-vector-io-tests.yml](integration-vector-io-tests.yml) | Run the integration test suite with various VectorIO providers | | Pre-commit | [pre-commit.yml](pre-commit.yml) | Run pre-commit checks | -| Test Llama Stack Build | [providers-build.yml](providers-build.yml) | Test llama stack build | +| Test Llama Stack Show | [providers-show.yml](providers-show.yml) | Test llama stack Show | | Python Package Build Test | [python-build-test.yml](python-build-test.yml) | Test building the llama-stack PyPI project | | Integration Tests (Record) | [record-integration-tests.yml](record-integration-tests.yml) | Run the integration test suite from tests/integration | | Check semantic PR titles | [semantic-pr.yml](semantic-pr.yml) | Ensure that PR titles follow the conventional commit spec | diff --git a/.github/workflows/integration-vector-io-tests.yml b/.github/workflows/integration-vector-io-tests.yml index f4d28e407..912925fff 100644 --- a/.github/workflows/integration-vector-io-tests.yml +++ b/.github/workflows/integration-vector-io-tests.yml @@ -141,7 +141,7 @@ jobs: - name: Build Llama Stack run: | - uv run llama stack build --template ci-tests --image-type venv + uv run llama stack sync --template ci-tests | sh - name: Check Storage and Memory Available Before Tests if: ${{ always() }} diff --git a/.github/workflows/providers-build.yml b/.github/workflows/providers-build.yml deleted file mode 100644 index 929d76760..000000000 --- a/.github/workflows/providers-build.yml +++ /dev/null @@ -1,154 +0,0 @@ -name: Test Llama Stack Build - -run-name: Test llama stack build - -on: - push: - branches: - - main - paths: - - 'llama_stack/cli/stack/build.py' - - 'llama_stack/cli/stack/_build.py' - - 'llama_stack/core/build.*' - - 'llama_stack/core/*.sh' - - '.github/workflows/providers-build.yml' - - 'llama_stack/distributions/**' - - 'pyproject.toml' - - pull_request: - paths: - - 'llama_stack/cli/stack/build.py' - - 'llama_stack/cli/stack/_build.py' - - 'llama_stack/core/build.*' - - 'llama_stack/core/*.sh' - - '.github/workflows/providers-build.yml' - - 'llama_stack/distributions/**' - - 'pyproject.toml' - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -jobs: - generate-matrix: - runs-on: ubuntu-latest - outputs: - distros: ${{ steps.set-matrix.outputs.distros }} - steps: - - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - - name: Generate Distribution List - id: set-matrix - run: | - distros=$(ls llama_stack/distributions/*/*build.yaml | awk -F'/' '{print $(NF-1)}' | jq -R -s -c 'split("\n")[:-1]') - echo "distros=$distros" >> "$GITHUB_OUTPUT" - - build: - needs: generate-matrix - runs-on: ubuntu-latest - strategy: - matrix: - distro: ${{ fromJson(needs.generate-matrix.outputs.distros) }} - image-type: [venv, container] - fail-fast: false # We want to run all jobs even if some fail - - steps: - - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - - name: Install dependencies - uses: ./.github/actions/setup-runner - - - name: Print build dependencies - run: | - uv run llama stack build --distro ${{ matrix.distro }} --image-type ${{ matrix.image-type }} --image-name test --print-deps-only - - - name: Run Llama Stack Build - run: | - # USE_COPY_NOT_MOUNT is set to true since mounting is not supported by docker buildx, we use COPY instead - # LLAMA_STACK_DIR is set to the current directory so we are building from the source - USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack build --distro ${{ matrix.distro }} --image-type ${{ matrix.image-type }} --image-name test - - - name: Print dependencies in the image - if: matrix.image-type == 'venv' - run: | - uv pip list - - build-single-provider: - runs-on: ubuntu-latest - steps: - - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - - name: Install dependencies - uses: ./.github/actions/setup-runner - - - name: Build a single provider - run: | - USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack build --image-type venv --image-name test --providers inference=remote::ollama - - build-custom-container-distribution: - runs-on: ubuntu-latest - steps: - - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - - name: Install dependencies - uses: ./.github/actions/setup-runner - - - name: Build a single provider - run: | - yq -i '.image_type = "container"' llama_stack/distributions/ci-tests/build.yaml - yq -i '.image_name = "test"' llama_stack/distributions/ci-tests/build.yaml - USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack build --config llama_stack/distributions/ci-tests/build.yaml - - - name: Inspect the container image entrypoint - run: | - IMAGE_ID=$(docker images --format "{{.Repository}}:{{.Tag}}" | head -n 1) - entrypoint=$(docker inspect --format '{{ .Config.Entrypoint }}' $IMAGE_ID) - echo "Entrypoint: $entrypoint" - if [ "$entrypoint" != "[python -m llama_stack.core.server.server /app/run.yaml]" ]; then - echo "Entrypoint is not correct" - exit 1 - fi - - build-ubi9-container-distribution: - runs-on: ubuntu-latest - steps: - - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - - name: Install dependencies - uses: ./.github/actions/setup-runner - - - name: Pin distribution to UBI9 base - run: | - yq -i ' - .image_type = "container" | - .image_name = "ubi9-test" | - .distribution_spec.container_image = "registry.access.redhat.com/ubi9:latest" - ' llama_stack/distributions/ci-tests/build.yaml - - - name: Build dev container (UBI9) - env: - USE_COPY_NOT_MOUNT: "true" - LLAMA_STACK_DIR: "." - run: | - uv run llama stack build --config llama_stack/distributions/ci-tests/build.yaml - - - name: Inspect UBI9 image - run: | - IMAGE_ID=$(docker images --format "{{.Repository}}:{{.Tag}}" | head -n 1) - entrypoint=$(docker inspect --format '{{ .Config.Entrypoint }}' $IMAGE_ID) - echo "Entrypoint: $entrypoint" - if [ "$entrypoint" != "[python -m llama_stack.core.server.server /app/run.yaml]" ]; then - echo "Entrypoint is not correct" - exit 1 - fi - - echo "Checking /etc/os-release in $IMAGE_ID" - docker run --rm --entrypoint sh "$IMAGE_ID" -c \ - 'source /etc/os-release && echo "$ID"' \ - | grep -qE '^(rhel|ubi)$' \ - || { echo "Base image is not UBI 9!"; exit 1; } diff --git a/.github/workflows/providers-show.yml b/.github/workflows/providers-show.yml new file mode 100644 index 000000000..4218fa614 --- /dev/null +++ b/.github/workflows/providers-show.yml @@ -0,0 +1,105 @@ +name: Test Llama Stack Show + +run-name: Test llama stack Show + +on: + push: + branches: + - main + paths: + - 'llama_stack/cli/stack/show.py' + - 'llama_stack/cli/stack/_show.py' + - 'llama_stack/core/build.*' + - 'llama_stack/core/*.sh' + - '.github/workflows/providers-show.yml' + - 'llama_stack/templates/**' + - 'pyproject.toml' + + pull_request: + paths: + - 'llama_stack/cli/stack/show.py' + - 'llama_stack/cli/stack/_show.py' + - 'llama_stack/core/build.*' + - 'llama_stack/core/*.sh' + - '.github/workflows/providers-show.yml' + - 'llama_stack/templates/**' + - 'pyproject.toml' + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + generate-matrix: + runs-on: ubuntu-latest + outputs: + distros: ${{ steps.set-matrix.outputs.distros }} + steps: + - name: Checkout repository + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Generate Distribution List + id: set-matrix + run: | + distros=$(ls llama_stack/distributions/*/*build.yaml | awk -F'/' '{print $(NF-1)}' | jq -R -s -c 'split("\n")[:-1]') + echo "distros=$distros" >> "$GITHUB_OUTPUT" + + show: + needs: generate-matrix + runs-on: ubuntu-latest + strategy: + matrix: + distro: ${{ fromJson(needs.generate-matrix.outputs.distros) }} + image-type: [venv, container] + fail-fast: false # We want to run all jobs even if some fail + + steps: + - name: Checkout repository + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Install dependencies + uses: ./.github/actions/setup-runner + + - name: Print dependencies + run: | + uv run llama stack show --distro ${{ matrix.template }} + + - name: Install Distro using Llama Stack Show + run: | + # USE_COPY_NOT_MOUNT is set to true since mounting is not supported by docker buildx, we use COPY instead + # LLAMA_STACK_DIR is set to the current directory so we are building from the source + USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack show --distro ${{ matrix.template }} | + + - name: Print dependencies in the image + if: matrix.image-type == 'venv' + run: | + uv pip list + + show-single-provider: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Install dependencies + uses: ./.github/actions/setup-runner + + - name: Show a single provider + run: | + USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack show --env-name test --providers inference=remote::ollama + + show-from-config: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Install dependencies + uses: ./.github/actions/setup-runner + + - name: Show from Config + env: + USE_COPY_NOT_MOUNT: "true" + LLAMA_STACK_DIR: "." + run: | + uv run llama stack show --config llama_stack/templates/ci-tests/build.yaml diff --git a/llama_stack/cli/stack/_show.py b/llama_stack/cli/stack/_show.py index fb7160b7d..9c3ae1c8d 100644 --- a/llama_stack/cli/stack/_show.py +++ b/llama_stack/cli/stack/_show.py @@ -5,10 +5,7 @@ # the root directory of this source tree. import argparse -import importlib.resources -import json import os -import shutil import sys import textwrap from pathlib import Path @@ -17,9 +14,9 @@ import yaml from prompt_toolkit import prompt from prompt_toolkit.completion import WordCompleter from prompt_toolkit.validation import Validator -from termcolor import colored, cprint +from termcolor import cprint -from llama_stack.cli.stack.utils import ImageType, available_templates_specs, generate_run_config +from llama_stack.cli.stack.utils import ImageType, available_templates_specs from llama_stack.core.build import get_provider_dependencies from llama_stack.core.datatypes import ( BuildConfig, @@ -27,10 +24,7 @@ from llama_stack.core.datatypes import ( DistributionSpec, ) from llama_stack.core.distribution import get_provider_registry -from llama_stack.core.external import load_external_apis from llama_stack.core.stack import replace_env_vars -from llama_stack.core.utils.config_dirs import DISTRIBS_BASE_DIR -from llama_stack.core.utils.exec import run_command from llama_stack.log import get_logger from llama_stack.providers.datatypes import Api diff --git a/llama_stack/providers/remote/inference/fireworks/pyproject.toml b/llama_stack/providers/remote/inference/fireworks/pyproject.toml index 6a75e17cc..b9d6e14fc 100644 --- a/llama_stack/providers/remote/inference/fireworks/pyproject.toml +++ b/llama_stack/providers/remote/inference/fireworks/pyproject.toml @@ -11,7 +11,7 @@ requires-python = ">=3.12" license = { "text" = "MIT" } dependencies = [ "fireworks-ai", - + "grpcio>=1.67.1,<1.71.0", # Pin grpcio version for compatibility ] diff --git a/tests/unit/distribution/test_build_path.py b/tests/unit/distribution/test_build_path.py deleted file mode 100644 index b4094618e..000000000 --- a/tests/unit/distribution/test_build_path.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. - -from pathlib import Path - -from llama_stack.cli.stack._sync import ( - _run_stack_build_command_from_build_config, -) -from llama_stack.core.datatypes import BuildConfig, DistributionSpec -from llama_stack.core.utils.image_types import LlamaStackImageType - - -def test_container_build_passes_path(monkeypatch, tmp_path): - called_with = {} - - def spy_build_image(build_config, image_name, distro_or_config, run_config=None): - called_with["path"] = distro_or_config - called_with["run_config"] = run_config - return 0 - - monkeypatch.setattr( - "llama_stack.cli.stack._build.build_image", - spy_build_image, - raising=True, - ) - - cfg = BuildConfig( - image_type=LlamaStackImageType.CONTAINER.value, - distribution_spec=DistributionSpec(providers={}, description=""), - ) - - _run_stack_build_command_from_build_config(cfg, image_name="dummy") - - assert "path" in called_with - assert isinstance(called_with["path"], str) - assert Path(called_with["path"]).exists() - assert called_with["run_config"] is None diff --git a/tests/unit/distribution/test_stack_show.py b/tests/unit/distribution/test_stack_show.py new file mode 100644 index 000000000..ec4c7e205 --- /dev/null +++ b/tests/unit/distribution/test_stack_show.py @@ -0,0 +1,21 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + + +from llama_stack.cli.stack._show import ( + run_stack_show_command, +) +from llama_stack.core.datatypes import BuildConfig, DistributionSpec +from llama_stack.core.utils.image_types import LlamaStackImageType + + +def test_stack_show_basic(): + cfg = BuildConfig( + image_type=LlamaStackImageType.CONTAINER.value, + distribution_spec=DistributionSpec(providers={}, description=""), + ) + + run_stack_show_command(cfg) diff --git a/uv.lock b/uv.lock index 9f4ba4adb..543ca00b7 100644 --- a/uv.lock +++ b/uv.lock @@ -3663,27 +3663,27 @@ wheels = [ [[package]] name = "ruff" -version = "0.12.5" +version = "0.9.10" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/30/cd/01015eb5034605fd98d829c5839ec2c6b4582b479707f7c1c2af861e8258/ruff-0.12.5.tar.gz", hash = "sha256:b209db6102b66f13625940b7f8c7d0f18e20039bb7f6101fbdac935c9612057e", size = 5170722, upload-time = "2025-07-24T13:26:37.456Z" } +sdist = { url = "https://files.pythonhosted.org/packages/20/8e/fafaa6f15c332e73425d9c44ada85360501045d5ab0b81400076aff27cf6/ruff-0.9.10.tar.gz", hash = "sha256:9bacb735d7bada9cfb0f2c227d3658fc443d90a727b47f206fb33f52f3c0eac7", size = 3759776, upload-time = "2025-03-07T15:27:44.363Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d4/de/ad2f68f0798ff15dd8c0bcc2889558970d9a685b3249565a937cd820ad34/ruff-0.12.5-py3-none-linux_armv6l.whl", hash = "sha256:1de2c887e9dec6cb31fcb9948299de5b2db38144e66403b9660c9548a67abd92", size = 11819133, upload-time = "2025-07-24T13:25:56.369Z" }, - { url = "https://files.pythonhosted.org/packages/f8/fc/c6b65cd0e7fbe60f17e7ad619dca796aa49fbca34bb9bea5f8faf1ec2643/ruff-0.12.5-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:d1ab65e7d8152f519e7dea4de892317c9da7a108da1c56b6a3c1d5e7cf4c5e9a", size = 12501114, upload-time = "2025-07-24T13:25:59.471Z" }, - { url = "https://files.pythonhosted.org/packages/c5/de/c6bec1dce5ead9f9e6a946ea15e8d698c35f19edc508289d70a577921b30/ruff-0.12.5-py3-none-macosx_11_0_arm64.whl", hash = "sha256:962775ed5b27c7aa3fdc0d8f4d4433deae7659ef99ea20f783d666e77338b8cf", size = 11716873, upload-time = "2025-07-24T13:26:01.496Z" }, - { url = "https://files.pythonhosted.org/packages/a1/16/cf372d2ebe91e4eb5b82a2275c3acfa879e0566a7ac94d331ea37b765ac8/ruff-0.12.5-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:73b4cae449597e7195a49eb1cdca89fd9fbb16140c7579899e87f4c85bf82f73", size = 11958829, upload-time = "2025-07-24T13:26:03.721Z" }, - { url = "https://files.pythonhosted.org/packages/25/bf/cd07e8f6a3a6ec746c62556b4c4b79eeb9b0328b362bb8431b7b8afd3856/ruff-0.12.5-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8b13489c3dc50de5e2d40110c0cce371e00186b880842e245186ca862bf9a1ac", size = 11626619, upload-time = "2025-07-24T13:26:06.118Z" }, - { url = "https://files.pythonhosted.org/packages/d8/c9/c2ccb3b8cbb5661ffda6925f81a13edbb786e623876141b04919d1128370/ruff-0.12.5-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f1504fea81461cf4841778b3ef0a078757602a3b3ea4b008feb1308cb3f23e08", size = 13221894, upload-time = "2025-07-24T13:26:08.292Z" }, - { url = "https://files.pythonhosted.org/packages/6b/58/68a5be2c8e5590ecdad922b2bcd5583af19ba648f7648f95c51c3c1eca81/ruff-0.12.5-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:c7da4129016ae26c32dfcbd5b671fe652b5ab7fc40095d80dcff78175e7eddd4", size = 14163909, upload-time = "2025-07-24T13:26:10.474Z" }, - { url = "https://files.pythonhosted.org/packages/bd/d1/ef6b19622009ba8386fdb792c0743f709cf917b0b2f1400589cbe4739a33/ruff-0.12.5-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ca972c80f7ebcfd8af75a0f18b17c42d9f1ef203d163669150453f50ca98ab7b", size = 13583652, upload-time = "2025-07-24T13:26:13.381Z" }, - { url = "https://files.pythonhosted.org/packages/62/e3/1c98c566fe6809a0c83751d825a03727f242cdbe0d142c9e292725585521/ruff-0.12.5-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8dbbf9f25dfb501f4237ae7501d6364b76a01341c6f1b2cd6764fe449124bb2a", size = 12700451, upload-time = "2025-07-24T13:26:15.488Z" }, - { url = "https://files.pythonhosted.org/packages/24/ff/96058f6506aac0fbc0d0fc0d60b0d0bd746240a0594657a2d94ad28033ba/ruff-0.12.5-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c47dea6ae39421851685141ba9734767f960113d51e83fd7bb9958d5be8763a", size = 12937465, upload-time = "2025-07-24T13:26:17.808Z" }, - { url = "https://files.pythonhosted.org/packages/eb/d3/68bc5e7ab96c94b3589d1789f2dd6dd4b27b263310019529ac9be1e8f31b/ruff-0.12.5-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:c5076aa0e61e30f848846f0265c873c249d4b558105b221be1828f9f79903dc5", size = 11771136, upload-time = "2025-07-24T13:26:20.422Z" }, - { url = "https://files.pythonhosted.org/packages/52/75/7356af30a14584981cabfefcf6106dea98cec9a7af4acb5daaf4b114845f/ruff-0.12.5-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:a5a4c7830dadd3d8c39b1cc85386e2c1e62344f20766be6f173c22fb5f72f293", size = 11601644, upload-time = "2025-07-24T13:26:22.928Z" }, - { url = "https://files.pythonhosted.org/packages/c2/67/91c71d27205871737cae11025ee2b098f512104e26ffd8656fd93d0ada0a/ruff-0.12.5-py3-none-musllinux_1_2_i686.whl", hash = "sha256:46699f73c2b5b137b9dc0fc1a190b43e35b008b398c6066ea1350cce6326adcb", size = 12478068, upload-time = "2025-07-24T13:26:26.134Z" }, - { url = "https://files.pythonhosted.org/packages/34/04/b6b00383cf2f48e8e78e14eb258942fdf2a9bf0287fbf5cdd398b749193a/ruff-0.12.5-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:5a655a0a0d396f0f072faafc18ebd59adde8ca85fb848dc1b0d9f024b9c4d3bb", size = 12991537, upload-time = "2025-07-24T13:26:28.533Z" }, - { url = "https://files.pythonhosted.org/packages/3e/b9/053d6445dc7544fb6594785056d8ece61daae7214859ada4a152ad56b6e0/ruff-0.12.5-py3-none-win32.whl", hash = "sha256:dfeb2627c459b0b78ca2bbdc38dd11cc9a0a88bf91db982058b26ce41714ffa9", size = 11751575, upload-time = "2025-07-24T13:26:30.835Z" }, - { url = "https://files.pythonhosted.org/packages/bc/0f/ab16e8259493137598b9149734fec2e06fdeda9837e6f634f5c4e35916da/ruff-0.12.5-py3-none-win_amd64.whl", hash = "sha256:ae0d90cf5f49466c954991b9d8b953bd093c32c27608e409ae3564c63c5306a5", size = 12882273, upload-time = "2025-07-24T13:26:32.929Z" }, - { url = "https://files.pythonhosted.org/packages/00/db/c376b0661c24cf770cb8815268190668ec1330eba8374a126ceef8c72d55/ruff-0.12.5-py3-none-win_arm64.whl", hash = "sha256:48cdbfc633de2c5c37d9f090ba3b352d1576b0015bfc3bc98eaf230275b7e805", size = 11951564, upload-time = "2025-07-24T13:26:34.994Z" }, + { url = "https://files.pythonhosted.org/packages/73/b2/af7c2cc9e438cbc19fafeec4f20bfcd72165460fe75b2b6e9a0958c8c62b/ruff-0.9.10-py3-none-linux_armv6l.whl", hash = "sha256:eb4d25532cfd9fe461acc83498361ec2e2252795b4f40b17e80692814329e42d", size = 10049494, upload-time = "2025-03-07T15:26:51.268Z" }, + { url = "https://files.pythonhosted.org/packages/6d/12/03f6dfa1b95ddd47e6969f0225d60d9d7437c91938a310835feb27927ca0/ruff-0.9.10-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:188a6638dab1aa9bb6228a7302387b2c9954e455fb25d6b4470cb0641d16759d", size = 10853584, upload-time = "2025-03-07T15:26:56.104Z" }, + { url = "https://files.pythonhosted.org/packages/02/49/1c79e0906b6ff551fb0894168763f705bf980864739572b2815ecd3c9df0/ruff-0.9.10-py3-none-macosx_11_0_arm64.whl", hash = "sha256:5284dcac6b9dbc2fcb71fdfc26a217b2ca4ede6ccd57476f52a587451ebe450d", size = 10155692, upload-time = "2025-03-07T15:27:01.385Z" }, + { url = "https://files.pythonhosted.org/packages/5b/01/85e8082e41585e0e1ceb11e41c054e9e36fed45f4b210991052d8a75089f/ruff-0.9.10-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:47678f39fa2a3da62724851107f438c8229a3470f533894b5568a39b40029c0c", size = 10369760, upload-time = "2025-03-07T15:27:04.023Z" }, + { url = "https://files.pythonhosted.org/packages/a1/90/0bc60bd4e5db051f12445046d0c85cc2c617095c0904f1aa81067dc64aea/ruff-0.9.10-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:99713a6e2766b7a17147b309e8c915b32b07a25c9efd12ada79f217c9c778b3e", size = 9912196, upload-time = "2025-03-07T15:27:06.93Z" }, + { url = "https://files.pythonhosted.org/packages/66/ea/0b7e8c42b1ec608033c4d5a02939c82097ddcb0b3e393e4238584b7054ab/ruff-0.9.10-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:524ee184d92f7c7304aa568e2db20f50c32d1d0caa235d8ddf10497566ea1a12", size = 11434985, upload-time = "2025-03-07T15:27:10.082Z" }, + { url = "https://files.pythonhosted.org/packages/d5/86/3171d1eff893db4f91755175a6e1163c5887be1f1e2f4f6c0c59527c2bfd/ruff-0.9.10-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:df92aeac30af821f9acf819fc01b4afc3dfb829d2782884f8739fb52a8119a16", size = 12155842, upload-time = "2025-03-07T15:27:12.727Z" }, + { url = "https://files.pythonhosted.org/packages/89/9e/700ca289f172a38eb0bca752056d0a42637fa17b81649b9331786cb791d7/ruff-0.9.10-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de42e4edc296f520bb84954eb992a07a0ec5a02fecb834498415908469854a52", size = 11613804, upload-time = "2025-03-07T15:27:15.944Z" }, + { url = "https://files.pythonhosted.org/packages/f2/92/648020b3b5db180f41a931a68b1c8575cca3e63cec86fd26807422a0dbad/ruff-0.9.10-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d257f95b65806104b6b1ffca0ea53f4ef98454036df65b1eda3693534813ecd1", size = 13823776, upload-time = "2025-03-07T15:27:18.996Z" }, + { url = "https://files.pythonhosted.org/packages/5e/a6/cc472161cd04d30a09d5c90698696b70c169eeba2c41030344194242db45/ruff-0.9.10-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b60dec7201c0b10d6d11be00e8f2dbb6f40ef1828ee75ed739923799513db24c", size = 11302673, upload-time = "2025-03-07T15:27:21.655Z" }, + { url = "https://files.pythonhosted.org/packages/6c/db/d31c361c4025b1b9102b4d032c70a69adb9ee6fde093f6c3bf29f831c85c/ruff-0.9.10-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:d838b60007da7a39c046fcdd317293d10b845001f38bcb55ba766c3875b01e43", size = 10235358, upload-time = "2025-03-07T15:27:24.72Z" }, + { url = "https://files.pythonhosted.org/packages/d1/86/d6374e24a14d4d93ebe120f45edd82ad7dcf3ef999ffc92b197d81cdc2a5/ruff-0.9.10-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:ccaf903108b899beb8e09a63ffae5869057ab649c1e9231c05ae354ebc62066c", size = 9886177, upload-time = "2025-03-07T15:27:27.282Z" }, + { url = "https://files.pythonhosted.org/packages/00/62/a61691f6eaaac1e945a1f3f59f1eea9a218513139d5b6c2b8f88b43b5b8f/ruff-0.9.10-py3-none-musllinux_1_2_i686.whl", hash = "sha256:f9567d135265d46e59d62dc60c0bfad10e9a6822e231f5b24032dba5a55be6b5", size = 10864747, upload-time = "2025-03-07T15:27:30.637Z" }, + { url = "https://files.pythonhosted.org/packages/ee/94/2c7065e1d92a8a8a46d46d9c3cf07b0aa7e0a1e0153d74baa5e6620b4102/ruff-0.9.10-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:5f202f0d93738c28a89f8ed9eaba01b7be339e5d8d642c994347eaa81c6d75b8", size = 11360441, upload-time = "2025-03-07T15:27:33.356Z" }, + { url = "https://files.pythonhosted.org/packages/a7/8f/1f545ea6f9fcd7bf4368551fb91d2064d8f0577b3079bb3f0ae5779fb773/ruff-0.9.10-py3-none-win32.whl", hash = "sha256:bfb834e87c916521ce46b1788fbb8484966e5113c02df216680102e9eb960029", size = 10247401, upload-time = "2025-03-07T15:27:35.994Z" }, + { url = "https://files.pythonhosted.org/packages/4f/18/fb703603ab108e5c165f52f5b86ee2aa9be43bb781703ec87c66a5f5d604/ruff-0.9.10-py3-none-win_amd64.whl", hash = "sha256:f2160eeef3031bf4b17df74e307d4c5fb689a6f3a26a2de3f7ef4044e3c484f1", size = 11366360, upload-time = "2025-03-07T15:27:38.66Z" }, + { url = "https://files.pythonhosted.org/packages/35/85/338e603dc68e7d9994d5d84f24adbf69bae760ba5efd3e20f5ff2cec18da/ruff-0.9.10-py3-none-win_arm64.whl", hash = "sha256:5fd804c0327a5e5ea26615550e706942f348b197d5475ff34c19733aee4b2e69", size = 10436892, upload-time = "2025-03-07T15:27:41.687Z" }, ] [[package]]