diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9e884494a..ff51a4795 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -8,6 +8,8 @@ repos: rev: v5.0.0 # Latest stable version hooks: - id: check-merge-conflict + - id: trailing-whitespace + exclude: '\.py$' # Exclude Python files as Ruff already handles them - id: check-added-large-files args: ['--maxkb=1000'] - id: end-of-file-fixer @@ -83,11 +85,8 @@ repos: - id: distro-codegen name: Distribution Template Codegen additional_dependencies: - - rich - - pydantic - - jinja2 - uv==0.6.0 - entry: uv run python -m llama_stack.scripts.distro_codegen + entry: uv run --extra codegen python -m llama_stack.scripts.distro_codegen language: python pass_filenames: false require_serial: true diff --git a/README.md b/README.md index 3946deea6..b24e69514 100644 --- a/README.md +++ b/README.md @@ -32,7 +32,7 @@ Llama Stack standardizes the core building blocks that simplify AI application d By reducing friction and complexity, Llama Stack empowers developers to focus on what they do best: building transformative generative AI applications. ### API Providers -Here is a list of the various API providers and available distributions that can help developers get started easily with Llama Stack. +Here is a list of the various API providers and available distributions that can help developers get started easily with Llama Stack. | **API Provider Builder** | **Environments** | **Agents** | **Inference** | **Memory** | **Safety** | **Telemetry** | |:------------------------:|:----------------------:|:----------:|:-------------:|:----------:|:----------:|:-------------:| diff --git a/docs/source/providers/index.md b/docs/source/providers/index.md index e039e90b0..55db9aa13 100644 --- a/docs/source/providers/index.md +++ b/docs/source/providers/index.md @@ -36,7 +36,7 @@ Evaluates the outputs of the system. Collects telemetry data from the system. ## Tool Runtime -Is associated with the ToolGroup resouces. +Is associated with the ToolGroup resouces. ## Vector IO diff --git a/docs/source/providers/vector_io/chromadb.md b/docs/source/providers/vector_io/chromadb.md index 4a7caf2e1..3f0c56f61 100644 --- a/docs/source/providers/vector_io/chromadb.md +++ b/docs/source/providers/vector_io/chromadb.md @@ -1,10 +1,10 @@ --- orphan: true --- -# Chroma +# Chroma -[Chroma](https://www.trychroma.com/) is an inline and remote vector -database provider for Llama Stack. It allows you to store and query vectors directly within a Chroma database. +[Chroma](https://www.trychroma.com/) is an inline and remote vector +database provider for Llama Stack. It allows you to store and query vectors directly within a Chroma database. That means you're not limited to storing vectors in memory or in a separate service. ## Features diff --git a/docs/source/providers/vector_io/faiss.md b/docs/source/providers/vector_io/faiss.md index f894190eb..c8a2efbe4 100644 --- a/docs/source/providers/vector_io/faiss.md +++ b/docs/source/providers/vector_io/faiss.md @@ -3,7 +3,7 @@ orphan: true --- # Faiss -[Faiss](https://github.com/facebookresearch/faiss) is an inline vector database provider for Llama Stack. It +[Faiss](https://github.com/facebookresearch/faiss) is an inline vector database provider for Llama Stack. It allows you to store and query vectors directly in memory. That means you'll get fast and efficient vector retrieval. @@ -29,5 +29,5 @@ You can install Faiss using pip: pip install faiss-cpu ``` ## Documentation -See [Faiss' documentation](https://faiss.ai/) or the [Faiss Wiki](https://github.com/facebookresearch/faiss/wiki) for +See [Faiss' documentation](https://faiss.ai/) or the [Faiss Wiki](https://github.com/facebookresearch/faiss/wiki) for more details about Faiss in general. diff --git a/docs/source/providers/vector_io/pgvector.md b/docs/source/providers/vector_io/pgvector.md index 919eb88d8..070e2c16d 100644 --- a/docs/source/providers/vector_io/pgvector.md +++ b/docs/source/providers/vector_io/pgvector.md @@ -3,7 +3,7 @@ orphan: true --- # Postgres PGVector -[PGVector](https://github.com/pgvector/pgvector) is a remote vector database provider for Llama Stack. It +[PGVector](https://github.com/pgvector/pgvector) is a remote vector database provider for Llama Stack. It allows you to store and query vectors directly in memory. That means you'll get fast and efficient vector retrieval. diff --git a/docs/source/providers/vector_io/qdrant.md b/docs/source/providers/vector_io/qdrant.md index c374ade98..a0de0be98 100644 --- a/docs/source/providers/vector_io/qdrant.md +++ b/docs/source/providers/vector_io/qdrant.md @@ -3,7 +3,7 @@ orphan: true --- # Qdrant -[Qdrant](https://qdrant.tech/documentation/) is a remote vector database provider for Llama Stack. It +[Qdrant](https://qdrant.tech/documentation/) is a remote vector database provider for Llama Stack. It allows you to store and query vectors directly in memory. That means you'll get fast and efficient vector retrieval. diff --git a/docs/source/providers/vector_io/sqlite-vec.md b/docs/source/providers/vector_io/sqlite-vec.md index f5ce4c003..9bbc4170d 100644 --- a/docs/source/providers/vector_io/sqlite-vec.md +++ b/docs/source/providers/vector_io/sqlite-vec.md @@ -3,8 +3,8 @@ orphan: true --- # SQLite-Vec -[SQLite-Vec](https://github.com/asg017/sqlite-vec) is an inline vector database provider for Llama Stack. It -allows you to store and query vectors directly within an SQLite database. +[SQLite-Vec](https://github.com/asg017/sqlite-vec) is an inline vector database provider for Llama Stack. It +allows you to store and query vectors directly within an SQLite database. That means you're not limited to storing vectors in memory or in a separate service. ## Features diff --git a/docs/source/providers/vector_io/weaviate.md b/docs/source/providers/vector_io/weaviate.md index 47321781c..78c0ddb5b 100644 --- a/docs/source/providers/vector_io/weaviate.md +++ b/docs/source/providers/vector_io/weaviate.md @@ -1,10 +1,10 @@ --- orphan: true --- -# Weaviate +# Weaviate -[Weaviate](https://weaviate.io/) is a vector database provider for Llama Stack. -It allows you to store and query vectors directly within a Weaviate database. +[Weaviate](https://weaviate.io/) is a vector database provider for Llama Stack. +It allows you to store and query vectors directly within a Weaviate database. That means you're not limited to storing vectors in memory or in a separate service. ## Features @@ -27,7 +27,7 @@ To use Weaviate in your Llama Stack project, follow these steps: ## Installation -To install Weaviate see the [Weaviate quickstart documentation](https://weaviate.io/developers/weaviate/quickstart). +To install Weaviate see the [Weaviate quickstart documentation](https://weaviate.io/developers/weaviate/quickstart). ## Documentation See [Weaviate's documentation](https://weaviate.io/developers/weaviate) for more details about Weaviate in general. diff --git a/llama_stack/providers/inline/tool_runtime/code_interpreter/__init__.py b/llama_stack/providers/inline/tool_runtime/code_interpreter/__init__.py index 663b9655b..995358d46 100644 --- a/llama_stack/providers/inline/tool_runtime/code_interpreter/__init__.py +++ b/llama_stack/providers/inline/tool_runtime/code_interpreter/__init__.py @@ -4,13 +4,14 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. -from .code_interpreter import CodeInterpreterToolRuntimeImpl from .config import CodeInterpreterToolConfig __all__ = ["CodeInterpreterToolConfig", "CodeInterpreterToolRuntimeImpl"] async def get_provider_impl(config: CodeInterpreterToolConfig, _deps): + from .code_interpreter import CodeInterpreterToolRuntimeImpl + impl = CodeInterpreterToolRuntimeImpl(config) await impl.initialize() return impl diff --git a/pyproject.toml b/pyproject.toml index 17157f2ac..937b7c174 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -75,6 +75,11 @@ docs = [ "sphinxcontrib.mermaid", "tomli", ] +codegen = [ + "rich", + "pydantic", + "jinja2", +] [project.urls] Homepage = "https://github.com/meta-llama/llama-stack" diff --git a/uv.lock b/uv.lock index 64012c4d7..c839d38a2 100644 --- a/uv.lock +++ b/uv.lock @@ -871,6 +871,11 @@ dependencies = [ ] [package.optional-dependencies] +codegen = [ + { name = "jinja2" }, + { name = "pydantic" }, + { name = "rich" }, +] dev = [ { name = "black" }, { name = "fastapi" }, @@ -923,6 +928,7 @@ requires-dist = [ { name = "groq", marker = "extra == 'test'" }, { name = "httpx" }, { name = "huggingface-hub" }, + { name = "jinja2", marker = "extra == 'codegen'" }, { name = "jsonschema" }, { name = "llama-stack-client", specifier = ">=0.1.4" }, { name = "lm-format-enforcer", marker = "extra == 'test'", specifier = ">=0.10.9" }, @@ -935,12 +941,14 @@ requires-dist = [ { name = "pre-commit", marker = "extra == 'dev'" }, { name = "prompt-toolkit" }, { name = "pydantic", specifier = ">=2" }, + { name = "pydantic", marker = "extra == 'codegen'" }, { name = "pytest", marker = "extra == 'dev'" }, { name = "pytest-asyncio", marker = "extra == 'dev'" }, { name = "pytest-html", marker = "extra == 'dev'" }, { name = "python-dotenv" }, { name = "requests" }, { name = "rich" }, + { name = "rich", marker = "extra == 'codegen'" }, { name = "ruamel-yaml", marker = "extra == 'dev'" }, { name = "ruff", marker = "extra == 'dev'" }, { name = "setuptools" },