mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-04 12:07:34 +00:00
refactor: convert providers to be installed via package
currently providers have a `pip_package` list. Rather than make our own form of python dependency management, we should use `pyproject.toml` files in each provider declaring the dependencies in a more trackable manner. Each provider can then be installed using the already in place `module` field in the ProviderSpec, pointing to the directory the provider lives in we can then simply `uv pip install` this directory as opposed to installing the dependencies one by one Signed-off-by: Charlie Doern <cdoern@redhat.com>
This commit is contained in:
parent
a1301911e4
commit
41431d8bdd
76 changed files with 1294 additions and 134 deletions
4
.github/workflows/README.md
vendored
4
.github/workflows/README.md
vendored
|
@ -13,6 +13,10 @@ Llama Stack uses GitHub Actions for Continuous Integration (CI). Below is a tabl
|
||||||
| Vector IO Integration Tests | [integration-vector-io-tests.yml](integration-vector-io-tests.yml) | Run the integration test suite with various VectorIO providers |
|
| Vector IO Integration Tests | [integration-vector-io-tests.yml](integration-vector-io-tests.yml) | Run the integration test suite with various VectorIO providers |
|
||||||
| Pre-commit | [pre-commit.yml](pre-commit.yml) | Run pre-commit checks |
|
| Pre-commit | [pre-commit.yml](pre-commit.yml) | Run pre-commit checks |
|
||||||
| Test Llama Stack Build | [providers-build.yml](providers-build.yml) | Test llama stack build |
|
| Test Llama Stack Build | [providers-build.yml](providers-build.yml) | Test llama stack build |
|
||||||
|
<<<<<<< Updated upstream
|
||||||
|
=======
|
||||||
|
| Test Llama Stack Show | [providers-show.yml](providers-show.yml) | Test llama stack Show |
|
||||||
|
>>>>>>> Stashed changes
|
||||||
| Python Package Build Test | [python-build-test.yml](python-build-test.yml) | Test building the llama-stack PyPI project |
|
| Python Package Build Test | [python-build-test.yml](python-build-test.yml) | Test building the llama-stack PyPI project |
|
||||||
| Integration Tests (Record) | [record-integration-tests.yml](record-integration-tests.yml) | Run the integration test suite from tests/integration |
|
| Integration Tests (Record) | [record-integration-tests.yml](record-integration-tests.yml) | Run the integration test suite from tests/integration |
|
||||||
| Check semantic PR titles | [semantic-pr.yml](semantic-pr.yml) | Ensure that PR titles follow the conventional commit spec |
|
| Check semantic PR titles | [semantic-pr.yml](semantic-pr.yml) | Ensure that PR titles follow the conventional commit spec |
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
## Description
|
## Description
|
||||||
|
|
||||||
TorchTune-based post-training provider for fine-tuning and optimizing models using Meta's TorchTune framework.
|
TorchTune-based post-training provider for fine-tuning and optimizing models using Meta's TorchTune framework (CPU).
|
||||||
|
|
||||||
## Configuration
|
## Configuration
|
||||||
|
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
## Description
|
## Description
|
||||||
|
|
||||||
TorchTune-based post-training provider for fine-tuning and optimizing models using Meta's TorchTune framework.
|
TorchTune-based post-training provider for fine-tuning and optimizing models using Meta's TorchTune framework (GPU).
|
||||||
|
|
||||||
## Configuration
|
## Configuration
|
||||||
|
|
||||||
|
|
|
@ -141,7 +141,10 @@ class ProviderSpec(BaseModel):
|
||||||
)
|
)
|
||||||
|
|
||||||
is_external: bool = Field(default=False, description="Notes whether this provider is an external provider.")
|
is_external: bool = Field(default=False, description="Notes whether this provider is an external provider.")
|
||||||
|
package_extras: list[str] = Field(
|
||||||
|
default_factory=list,
|
||||||
|
description="Optional package extras to install when using pyproject.toml files (e.g., ['cpu', 'gpu'])",
|
||||||
|
)
|
||||||
# used internally by the resolver; this is a hack for now
|
# used internally by the resolver; this is a hack for now
|
||||||
deps__: list[str] = Field(default_factory=list)
|
deps__: list[str] = Field(default_factory=list)
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,28 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-agents-meta-reference"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Meta's reference implementation of an agent system that can use tools, access vector databases, and perform complex reasoning tasks"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"matplotlib",
|
||||||
|
"pillow",
|
||||||
|
"pandas",
|
||||||
|
"scikit-learn",
|
||||||
|
"mcp>=1.8.1",
|
||||||
|
"aiosqlite",
|
||||||
|
"psycopg2-binary",
|
||||||
|
"redis",
|
||||||
|
"pymongo"
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
|
@ -0,0 +1,20 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-batches-meta-reference"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Meta's reference implementation of batches API with KVStore persistence."
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"openai"
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
|
@ -0,0 +1,20 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-datasetio-localfs"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Local filesystem-based dataset I/O provider for reading and writing datasets to local storage"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"pandas",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
|
@ -0,0 +1,24 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-eval-meta-reference"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Meta's reference implementation of evaluation tasks with support for multiple languages and evaluation metrics"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"tree_sitter",
|
||||||
|
"pythainlp",
|
||||||
|
"langdetect",
|
||||||
|
"emoji",
|
||||||
|
"nltk",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
18
llama_stack/providers/inline/files/localfs/pyproject.toml
Normal file
18
llama_stack/providers/inline/files/localfs/pyproject.toml
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-files-localfs"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Local filesystem-based file storage provider for managing files and documents locally"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = []
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
|
@ -0,0 +1,29 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-inference-meta-reference"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Meta's reference implementation of inference with support for various model formats and optimization techniques"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"accelerate",
|
||||||
|
"fairscale",
|
||||||
|
"torch",
|
||||||
|
"torchvision",
|
||||||
|
"transformers",
|
||||||
|
"zmq",
|
||||||
|
"lm-format-enforcer",
|
||||||
|
"sentence-transformers",
|
||||||
|
"torchao==0.8.0",
|
||||||
|
"fbgemm-gpu-genai==1.1.2",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
|
@ -0,0 +1,22 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-inference-sentence-transformers"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Sentence Transformers inference provider for text embeddings and similarity search"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"torch",
|
||||||
|
"torchvision",
|
||||||
|
"sentence-transformers",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
|
@ -0,0 +1,24 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-post-training-huggingface"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "HuggingFace-based post-training provider for fine-tuning models using the HuggingFace ecosystem"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"trl",
|
||||||
|
"transformers",
|
||||||
|
"peft",
|
||||||
|
"datasets>=4.0.0",
|
||||||
|
"torch",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
|
@ -0,0 +1,30 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-post-training-torchtune"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "TorchTune-based post-training provider for fine-tuning and optimizing models using Meta's TorchTune framework"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"torchtune>=0.5.0",
|
||||||
|
"torchao>=0.12.0",
|
||||||
|
"numpy",
|
||||||
|
]
|
||||||
|
|
||||||
|
[project.optional-dependencies]
|
||||||
|
cpu = [
|
||||||
|
"torch --extra-index-url https://download.pytorch.org/whl/cpu",
|
||||||
|
]
|
||||||
|
gpu = [
|
||||||
|
"torch",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
|
@ -0,0 +1,20 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-safety-code-scanner"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Code Scanner safety provider for detecting security vulnerabilities and unsafe code patterns"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"codeshield",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
|
@ -0,0 +1,18 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-safety-llama-guard"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Llama Guard safety provider for content moderation and safety filtering using Meta's Llama Guard model"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = []
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
|
@ -0,0 +1,21 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-safety-prompt-guard"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Prompt Guard safety provider for detecting and filtering unsafe prompts and content"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"transformers[accelerate]",
|
||||||
|
"torch",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
20
llama_stack/providers/inline/scoring/basic/pyproject.toml
Normal file
20
llama_stack/providers/inline/scoring/basic/pyproject.toml
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-scoring-basic"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Basic scoring provider for simple evaluation metrics and scoring functions"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"requests",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
|
@ -0,0 +1,21 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-scoring-braintrust"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Braintrust scoring provider for evaluation and scoring using the Braintrust platform"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"autoevals",
|
||||||
|
"openai",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
|
@ -0,0 +1,18 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-scoring-llm-as-judge"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "LLM-as-judge scoring provider that uses language models to evaluate and score responses"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = []
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
|
@ -0,0 +1,21 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-telemetry-meta-reference"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Meta's reference implementation of telemetry and observability using OpenTelemetry"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"opentelemetry-sdk",
|
||||||
|
"opentelemetry-exporter-otlp-proto-http",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
28
llama_stack/providers/inline/tool_runtime/rag/pyproject.toml
Normal file
28
llama_stack/providers/inline/tool_runtime/rag/pyproject.toml
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-tool-runtime-rag"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "RAG (Retrieval-Augmented Generation) tool runtime for document ingestion, chunking, and semantic search"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"chardet",
|
||||||
|
"pypdf",
|
||||||
|
"tqdm",
|
||||||
|
"numpy",
|
||||||
|
"scikit-learn",
|
||||||
|
"scipy",
|
||||||
|
"nltk",
|
||||||
|
"sentencepiece",
|
||||||
|
"transformers",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
20
llama_stack/providers/inline/vector_io/chroma/pyproject.toml
Normal file
20
llama_stack/providers/inline/vector_io/chroma/pyproject.toml
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-vector-io-chroma"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Chroma inline vector database provider for Llama Stack"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"chromadb",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
20
llama_stack/providers/inline/vector_io/faiss/pyproject.toml
Normal file
20
llama_stack/providers/inline/vector_io/faiss/pyproject.toml
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-vector-io-faiss"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Faiss inline vector database provider for Llama Stack"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"faiss-cpu",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
20
llama_stack/providers/inline/vector_io/milvus/pyproject.toml
Normal file
20
llama_stack/providers/inline/vector_io/milvus/pyproject.toml
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-vector-io-milvus"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Milvus inline vector database provider for Llama Stack"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"pymilvus[milvus-lite]>=2.4.10",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
20
llama_stack/providers/inline/vector_io/qdrant/pyproject.toml
Normal file
20
llama_stack/providers/inline/vector_io/qdrant/pyproject.toml
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-vector-io-qdrant"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Qdrant inline vector database provider for Llama Stack"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"qdrant-client",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
|
@ -0,0 +1,20 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-vector-io-sqlite-vec"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "SQLite-Vec inline vector database provider for Llama Stack"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"sqlite-vec",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
|
@ -10,7 +10,6 @@ from llama_stack.providers.datatypes import (
|
||||||
InlineProviderSpec,
|
InlineProviderSpec,
|
||||||
ProviderSpec,
|
ProviderSpec,
|
||||||
)
|
)
|
||||||
from llama_stack.providers.utils.kvstore import kvstore_dependencies
|
|
||||||
|
|
||||||
|
|
||||||
def available_providers() -> list[ProviderSpec]:
|
def available_providers() -> list[ProviderSpec]:
|
||||||
|
@ -18,14 +17,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
InlineProviderSpec(
|
InlineProviderSpec(
|
||||||
api=Api.agents,
|
api=Api.agents,
|
||||||
provider_type="inline::meta-reference",
|
provider_type="inline::meta-reference",
|
||||||
pip_packages=[
|
|
||||||
"matplotlib",
|
|
||||||
"pillow",
|
|
||||||
"pandas",
|
|
||||||
"scikit-learn",
|
|
||||||
"mcp>=1.8.1",
|
|
||||||
]
|
|
||||||
+ kvstore_dependencies(), # TODO make this dynamic based on the kvstore config
|
|
||||||
module="llama_stack.providers.inline.agents.meta_reference",
|
module="llama_stack.providers.inline.agents.meta_reference",
|
||||||
config_class="llama_stack.providers.inline.agents.meta_reference.MetaReferenceAgentsImplConfig",
|
config_class="llama_stack.providers.inline.agents.meta_reference.MetaReferenceAgentsImplConfig",
|
||||||
api_dependencies=[
|
api_dependencies=[
|
||||||
|
|
|
@ -13,7 +13,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
InlineProviderSpec(
|
InlineProviderSpec(
|
||||||
api=Api.batches,
|
api=Api.batches,
|
||||||
provider_type="inline::reference",
|
provider_type="inline::reference",
|
||||||
pip_packages=[],
|
|
||||||
module="llama_stack.providers.inline.batches.reference",
|
module="llama_stack.providers.inline.batches.reference",
|
||||||
config_class="llama_stack.providers.inline.batches.reference.config.ReferenceBatchesImplConfig",
|
config_class="llama_stack.providers.inline.batches.reference.config.ReferenceBatchesImplConfig",
|
||||||
api_dependencies=[
|
api_dependencies=[
|
||||||
|
|
|
@ -18,7 +18,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
InlineProviderSpec(
|
InlineProviderSpec(
|
||||||
api=Api.datasetio,
|
api=Api.datasetio,
|
||||||
provider_type="inline::localfs",
|
provider_type="inline::localfs",
|
||||||
pip_packages=["pandas"],
|
|
||||||
module="llama_stack.providers.inline.datasetio.localfs",
|
module="llama_stack.providers.inline.datasetio.localfs",
|
||||||
config_class="llama_stack.providers.inline.datasetio.localfs.LocalFSDatasetIOConfig",
|
config_class="llama_stack.providers.inline.datasetio.localfs.LocalFSDatasetIOConfig",
|
||||||
api_dependencies=[],
|
api_dependencies=[],
|
||||||
|
@ -28,9 +27,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
api=Api.datasetio,
|
api=Api.datasetio,
|
||||||
adapter_type="huggingface",
|
adapter_type="huggingface",
|
||||||
provider_type="remote::huggingface",
|
provider_type="remote::huggingface",
|
||||||
pip_packages=[
|
|
||||||
"datasets>=4.0.0",
|
|
||||||
],
|
|
||||||
module="llama_stack.providers.remote.datasetio.huggingface",
|
module="llama_stack.providers.remote.datasetio.huggingface",
|
||||||
config_class="llama_stack.providers.remote.datasetio.huggingface.HuggingfaceDatasetIOConfig",
|
config_class="llama_stack.providers.remote.datasetio.huggingface.HuggingfaceDatasetIOConfig",
|
||||||
description="HuggingFace datasets provider for accessing and managing datasets from the HuggingFace Hub.",
|
description="HuggingFace datasets provider for accessing and managing datasets from the HuggingFace Hub.",
|
||||||
|
@ -41,9 +37,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
provider_type="remote::nvidia",
|
provider_type="remote::nvidia",
|
||||||
module="llama_stack.providers.remote.datasetio.nvidia",
|
module="llama_stack.providers.remote.datasetio.nvidia",
|
||||||
config_class="llama_stack.providers.remote.datasetio.nvidia.NvidiaDatasetIOConfig",
|
config_class="llama_stack.providers.remote.datasetio.nvidia.NvidiaDatasetIOConfig",
|
||||||
pip_packages=[
|
|
||||||
"datasets>=4.0.0",
|
|
||||||
],
|
|
||||||
description="NVIDIA's dataset I/O provider for accessing datasets from NVIDIA's data platform.",
|
description="NVIDIA's dataset I/O provider for accessing datasets from NVIDIA's data platform.",
|
||||||
),
|
),
|
||||||
]
|
]
|
||||||
|
|
|
@ -13,7 +13,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
InlineProviderSpec(
|
InlineProviderSpec(
|
||||||
api=Api.eval,
|
api=Api.eval,
|
||||||
provider_type="inline::meta-reference",
|
provider_type="inline::meta-reference",
|
||||||
pip_packages=["tree_sitter", "pythainlp", "langdetect", "emoji", "nltk"],
|
|
||||||
module="llama_stack.providers.inline.eval.meta_reference",
|
module="llama_stack.providers.inline.eval.meta_reference",
|
||||||
config_class="llama_stack.providers.inline.eval.meta_reference.MetaReferenceEvalConfig",
|
config_class="llama_stack.providers.inline.eval.meta_reference.MetaReferenceEvalConfig",
|
||||||
api_dependencies=[
|
api_dependencies=[
|
||||||
|
@ -28,9 +27,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
RemoteProviderSpec(
|
RemoteProviderSpec(
|
||||||
api=Api.eval,
|
api=Api.eval,
|
||||||
adapter_type="nvidia",
|
adapter_type="nvidia",
|
||||||
pip_packages=[
|
|
||||||
"requests",
|
|
||||||
],
|
|
||||||
provider_type="remote::nvidia",
|
provider_type="remote::nvidia",
|
||||||
module="llama_stack.providers.remote.eval.nvidia",
|
module="llama_stack.providers.remote.eval.nvidia",
|
||||||
config_class="llama_stack.providers.remote.eval.nvidia.NVIDIAEvalConfig",
|
config_class="llama_stack.providers.remote.eval.nvidia.NVIDIAEvalConfig",
|
||||||
|
|
|
@ -14,7 +14,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
api=Api.files,
|
api=Api.files,
|
||||||
provider_type="inline::localfs",
|
provider_type="inline::localfs",
|
||||||
# TODO: make this dynamic according to the sql store type
|
# TODO: make this dynamic according to the sql store type
|
||||||
pip_packages=sql_store_pip_packages,
|
|
||||||
module="llama_stack.providers.inline.files.localfs",
|
module="llama_stack.providers.inline.files.localfs",
|
||||||
config_class="llama_stack.providers.inline.files.localfs.config.LocalfsFilesImplConfig",
|
config_class="llama_stack.providers.inline.files.localfs.config.LocalfsFilesImplConfig",
|
||||||
description="Local filesystem-based file storage provider for managing files and documents locally.",
|
description="Local filesystem-based file storage provider for managing files and documents locally.",
|
||||||
|
|
|
@ -31,7 +31,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
InlineProviderSpec(
|
InlineProviderSpec(
|
||||||
api=Api.inference,
|
api=Api.inference,
|
||||||
provider_type="inline::meta-reference",
|
provider_type="inline::meta-reference",
|
||||||
pip_packages=META_REFERENCE_DEPS,
|
|
||||||
module="llama_stack.providers.inline.inference.meta_reference",
|
module="llama_stack.providers.inline.inference.meta_reference",
|
||||||
config_class="llama_stack.providers.inline.inference.meta_reference.MetaReferenceInferenceConfig",
|
config_class="llama_stack.providers.inline.inference.meta_reference.MetaReferenceInferenceConfig",
|
||||||
description="Meta's reference implementation of inference with support for various model formats and optimization techniques.",
|
description="Meta's reference implementation of inference with support for various model formats and optimization techniques.",
|
||||||
|
@ -39,11 +38,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
InlineProviderSpec(
|
InlineProviderSpec(
|
||||||
api=Api.inference,
|
api=Api.inference,
|
||||||
provider_type="inline::sentence-transformers",
|
provider_type="inline::sentence-transformers",
|
||||||
# CrossEncoder depends on torchao.quantization
|
|
||||||
pip_packages=[
|
|
||||||
"torch torchvision torchao>=0.12.0 --extra-index-url https://download.pytorch.org/whl/cpu",
|
|
||||||
"sentence-transformers --no-deps",
|
|
||||||
],
|
|
||||||
module="llama_stack.providers.inline.inference.sentence_transformers",
|
module="llama_stack.providers.inline.inference.sentence_transformers",
|
||||||
config_class="llama_stack.providers.inline.inference.sentence_transformers.config.SentenceTransformersInferenceConfig",
|
config_class="llama_stack.providers.inline.inference.sentence_transformers.config.SentenceTransformersInferenceConfig",
|
||||||
description="Sentence Transformers inference provider for text embeddings and similarity search.",
|
description="Sentence Transformers inference provider for text embeddings and similarity search.",
|
||||||
|
@ -52,9 +46,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
api=Api.inference,
|
api=Api.inference,
|
||||||
adapter_type="cerebras",
|
adapter_type="cerebras",
|
||||||
provider_type="remote::cerebras",
|
provider_type="remote::cerebras",
|
||||||
pip_packages=[
|
|
||||||
"cerebras_cloud_sdk",
|
|
||||||
],
|
|
||||||
module="llama_stack.providers.remote.inference.cerebras",
|
module="llama_stack.providers.remote.inference.cerebras",
|
||||||
config_class="llama_stack.providers.remote.inference.cerebras.CerebrasImplConfig",
|
config_class="llama_stack.providers.remote.inference.cerebras.CerebrasImplConfig",
|
||||||
description="Cerebras inference provider for running models on Cerebras Cloud platform.",
|
description="Cerebras inference provider for running models on Cerebras Cloud platform.",
|
||||||
|
@ -63,7 +54,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
api=Api.inference,
|
api=Api.inference,
|
||||||
adapter_type="ollama",
|
adapter_type="ollama",
|
||||||
provider_type="remote::ollama",
|
provider_type="remote::ollama",
|
||||||
pip_packages=["ollama", "aiohttp", "h11>=0.16.0"],
|
|
||||||
config_class="llama_stack.providers.remote.inference.ollama.OllamaImplConfig",
|
config_class="llama_stack.providers.remote.inference.ollama.OllamaImplConfig",
|
||||||
module="llama_stack.providers.remote.inference.ollama",
|
module="llama_stack.providers.remote.inference.ollama",
|
||||||
description="Ollama inference provider for running local models through the Ollama runtime.",
|
description="Ollama inference provider for running local models through the Ollama runtime.",
|
||||||
|
@ -72,7 +62,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
api=Api.inference,
|
api=Api.inference,
|
||||||
adapter_type="vllm",
|
adapter_type="vllm",
|
||||||
provider_type="remote::vllm",
|
provider_type="remote::vllm",
|
||||||
pip_packages=[],
|
|
||||||
module="llama_stack.providers.remote.inference.vllm",
|
module="llama_stack.providers.remote.inference.vllm",
|
||||||
config_class="llama_stack.providers.remote.inference.vllm.VLLMInferenceAdapterConfig",
|
config_class="llama_stack.providers.remote.inference.vllm.VLLMInferenceAdapterConfig",
|
||||||
provider_data_validator="llama_stack.providers.remote.inference.vllm.VLLMProviderDataValidator",
|
provider_data_validator="llama_stack.providers.remote.inference.vllm.VLLMProviderDataValidator",
|
||||||
|
@ -82,7 +71,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
api=Api.inference,
|
api=Api.inference,
|
||||||
adapter_type="tgi",
|
adapter_type="tgi",
|
||||||
provider_type="remote::tgi",
|
provider_type="remote::tgi",
|
||||||
pip_packages=["huggingface_hub", "aiohttp"],
|
|
||||||
module="llama_stack.providers.remote.inference.tgi",
|
module="llama_stack.providers.remote.inference.tgi",
|
||||||
config_class="llama_stack.providers.remote.inference.tgi.TGIImplConfig",
|
config_class="llama_stack.providers.remote.inference.tgi.TGIImplConfig",
|
||||||
description="Text Generation Inference (TGI) provider for HuggingFace model serving.",
|
description="Text Generation Inference (TGI) provider for HuggingFace model serving.",
|
||||||
|
@ -91,7 +79,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
api=Api.inference,
|
api=Api.inference,
|
||||||
adapter_type="hf::serverless",
|
adapter_type="hf::serverless",
|
||||||
provider_type="remote::hf::serverless",
|
provider_type="remote::hf::serverless",
|
||||||
pip_packages=["huggingface_hub", "aiohttp"],
|
|
||||||
module="llama_stack.providers.remote.inference.tgi",
|
module="llama_stack.providers.remote.inference.tgi",
|
||||||
config_class="llama_stack.providers.remote.inference.tgi.InferenceAPIImplConfig",
|
config_class="llama_stack.providers.remote.inference.tgi.InferenceAPIImplConfig",
|
||||||
description="HuggingFace Inference API serverless provider for on-demand model inference.",
|
description="HuggingFace Inference API serverless provider for on-demand model inference.",
|
||||||
|
@ -100,7 +87,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
api=Api.inference,
|
api=Api.inference,
|
||||||
provider_type="remote::hf::endpoint",
|
provider_type="remote::hf::endpoint",
|
||||||
adapter_type="hf::endpoint",
|
adapter_type="hf::endpoint",
|
||||||
pip_packages=["huggingface_hub", "aiohttp"],
|
|
||||||
module="llama_stack.providers.remote.inference.tgi",
|
module="llama_stack.providers.remote.inference.tgi",
|
||||||
config_class="llama_stack.providers.remote.inference.tgi.InferenceEndpointImplConfig",
|
config_class="llama_stack.providers.remote.inference.tgi.InferenceEndpointImplConfig",
|
||||||
description="HuggingFace Inference Endpoints provider for dedicated model serving.",
|
description="HuggingFace Inference Endpoints provider for dedicated model serving.",
|
||||||
|
@ -109,9 +95,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
api=Api.inference,
|
api=Api.inference,
|
||||||
adapter_type="fireworks",
|
adapter_type="fireworks",
|
||||||
provider_type="remote::fireworks",
|
provider_type="remote::fireworks",
|
||||||
pip_packages=[
|
|
||||||
"fireworks-ai<=0.17.16",
|
|
||||||
],
|
|
||||||
module="llama_stack.providers.remote.inference.fireworks",
|
module="llama_stack.providers.remote.inference.fireworks",
|
||||||
config_class="llama_stack.providers.remote.inference.fireworks.FireworksImplConfig",
|
config_class="llama_stack.providers.remote.inference.fireworks.FireworksImplConfig",
|
||||||
provider_data_validator="llama_stack.providers.remote.inference.fireworks.FireworksProviderDataValidator",
|
provider_data_validator="llama_stack.providers.remote.inference.fireworks.FireworksProviderDataValidator",
|
||||||
|
@ -121,9 +104,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
api=Api.inference,
|
api=Api.inference,
|
||||||
adapter_type="together",
|
adapter_type="together",
|
||||||
provider_type="remote::together",
|
provider_type="remote::together",
|
||||||
pip_packages=[
|
|
||||||
"together",
|
|
||||||
],
|
|
||||||
module="llama_stack.providers.remote.inference.together",
|
module="llama_stack.providers.remote.inference.together",
|
||||||
config_class="llama_stack.providers.remote.inference.together.TogetherImplConfig",
|
config_class="llama_stack.providers.remote.inference.together.TogetherImplConfig",
|
||||||
provider_data_validator="llama_stack.providers.remote.inference.together.TogetherProviderDataValidator",
|
provider_data_validator="llama_stack.providers.remote.inference.together.TogetherProviderDataValidator",
|
||||||
|
@ -133,7 +113,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
api=Api.inference,
|
api=Api.inference,
|
||||||
adapter_type="bedrock",
|
adapter_type="bedrock",
|
||||||
provider_type="remote::bedrock",
|
provider_type="remote::bedrock",
|
||||||
pip_packages=["boto3"],
|
|
||||||
module="llama_stack.providers.remote.inference.bedrock",
|
module="llama_stack.providers.remote.inference.bedrock",
|
||||||
config_class="llama_stack.providers.remote.inference.bedrock.BedrockConfig",
|
config_class="llama_stack.providers.remote.inference.bedrock.BedrockConfig",
|
||||||
description="AWS Bedrock inference provider for accessing various AI models through AWS's managed service.",
|
description="AWS Bedrock inference provider for accessing various AI models through AWS's managed service.",
|
||||||
|
@ -142,7 +121,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
api=Api.inference,
|
api=Api.inference,
|
||||||
adapter_type="databricks",
|
adapter_type="databricks",
|
||||||
provider_type="remote::databricks",
|
provider_type="remote::databricks",
|
||||||
pip_packages=[],
|
|
||||||
module="llama_stack.providers.remote.inference.databricks",
|
module="llama_stack.providers.remote.inference.databricks",
|
||||||
config_class="llama_stack.providers.remote.inference.databricks.DatabricksImplConfig",
|
config_class="llama_stack.providers.remote.inference.databricks.DatabricksImplConfig",
|
||||||
description="Databricks inference provider for running models on Databricks' unified analytics platform.",
|
description="Databricks inference provider for running models on Databricks' unified analytics platform.",
|
||||||
|
@ -151,7 +129,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
api=Api.inference,
|
api=Api.inference,
|
||||||
adapter_type="nvidia",
|
adapter_type="nvidia",
|
||||||
provider_type="remote::nvidia",
|
provider_type="remote::nvidia",
|
||||||
pip_packages=[],
|
|
||||||
module="llama_stack.providers.remote.inference.nvidia",
|
module="llama_stack.providers.remote.inference.nvidia",
|
||||||
config_class="llama_stack.providers.remote.inference.nvidia.NVIDIAConfig",
|
config_class="llama_stack.providers.remote.inference.nvidia.NVIDIAConfig",
|
||||||
description="NVIDIA inference provider for accessing NVIDIA NIM models and AI services.",
|
description="NVIDIA inference provider for accessing NVIDIA NIM models and AI services.",
|
||||||
|
@ -160,7 +137,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
api=Api.inference,
|
api=Api.inference,
|
||||||
adapter_type="runpod",
|
adapter_type="runpod",
|
||||||
provider_type="remote::runpod",
|
provider_type="remote::runpod",
|
||||||
pip_packages=[],
|
|
||||||
module="llama_stack.providers.remote.inference.runpod",
|
module="llama_stack.providers.remote.inference.runpod",
|
||||||
config_class="llama_stack.providers.remote.inference.runpod.RunpodImplConfig",
|
config_class="llama_stack.providers.remote.inference.runpod.RunpodImplConfig",
|
||||||
description="RunPod inference provider for running models on RunPod's cloud GPU platform.",
|
description="RunPod inference provider for running models on RunPod's cloud GPU platform.",
|
||||||
|
@ -169,7 +145,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
api=Api.inference,
|
api=Api.inference,
|
||||||
adapter_type="openai",
|
adapter_type="openai",
|
||||||
provider_type="remote::openai",
|
provider_type="remote::openai",
|
||||||
pip_packages=["litellm"],
|
|
||||||
module="llama_stack.providers.remote.inference.openai",
|
module="llama_stack.providers.remote.inference.openai",
|
||||||
config_class="llama_stack.providers.remote.inference.openai.OpenAIConfig",
|
config_class="llama_stack.providers.remote.inference.openai.OpenAIConfig",
|
||||||
provider_data_validator="llama_stack.providers.remote.inference.openai.config.OpenAIProviderDataValidator",
|
provider_data_validator="llama_stack.providers.remote.inference.openai.config.OpenAIProviderDataValidator",
|
||||||
|
@ -179,7 +154,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
api=Api.inference,
|
api=Api.inference,
|
||||||
adapter_type="anthropic",
|
adapter_type="anthropic",
|
||||||
provider_type="remote::anthropic",
|
provider_type="remote::anthropic",
|
||||||
pip_packages=["litellm"],
|
|
||||||
module="llama_stack.providers.remote.inference.anthropic",
|
module="llama_stack.providers.remote.inference.anthropic",
|
||||||
config_class="llama_stack.providers.remote.inference.anthropic.AnthropicConfig",
|
config_class="llama_stack.providers.remote.inference.anthropic.AnthropicConfig",
|
||||||
provider_data_validator="llama_stack.providers.remote.inference.anthropic.config.AnthropicProviderDataValidator",
|
provider_data_validator="llama_stack.providers.remote.inference.anthropic.config.AnthropicProviderDataValidator",
|
||||||
|
@ -189,9 +163,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
api=Api.inference,
|
api=Api.inference,
|
||||||
adapter_type="gemini",
|
adapter_type="gemini",
|
||||||
provider_type="remote::gemini",
|
provider_type="remote::gemini",
|
||||||
pip_packages=[
|
|
||||||
"litellm",
|
|
||||||
],
|
|
||||||
module="llama_stack.providers.remote.inference.gemini",
|
module="llama_stack.providers.remote.inference.gemini",
|
||||||
config_class="llama_stack.providers.remote.inference.gemini.GeminiConfig",
|
config_class="llama_stack.providers.remote.inference.gemini.GeminiConfig",
|
||||||
provider_data_validator="llama_stack.providers.remote.inference.gemini.config.GeminiProviderDataValidator",
|
provider_data_validator="llama_stack.providers.remote.inference.gemini.config.GeminiProviderDataValidator",
|
||||||
|
@ -201,10 +172,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
api=Api.inference,
|
api=Api.inference,
|
||||||
adapter_type="vertexai",
|
adapter_type="vertexai",
|
||||||
provider_type="remote::vertexai",
|
provider_type="remote::vertexai",
|
||||||
pip_packages=[
|
|
||||||
"litellm",
|
|
||||||
"google-cloud-aiplatform",
|
|
||||||
],
|
|
||||||
module="llama_stack.providers.remote.inference.vertexai",
|
module="llama_stack.providers.remote.inference.vertexai",
|
||||||
config_class="llama_stack.providers.remote.inference.vertexai.VertexAIConfig",
|
config_class="llama_stack.providers.remote.inference.vertexai.VertexAIConfig",
|
||||||
provider_data_validator="llama_stack.providers.remote.inference.vertexai.config.VertexAIProviderDataValidator",
|
provider_data_validator="llama_stack.providers.remote.inference.vertexai.config.VertexAIProviderDataValidator",
|
||||||
|
@ -233,9 +200,6 @@ Available Models:
|
||||||
api=Api.inference,
|
api=Api.inference,
|
||||||
adapter_type="groq",
|
adapter_type="groq",
|
||||||
provider_type="remote::groq",
|
provider_type="remote::groq",
|
||||||
pip_packages=[
|
|
||||||
"litellm",
|
|
||||||
],
|
|
||||||
module="llama_stack.providers.remote.inference.groq",
|
module="llama_stack.providers.remote.inference.groq",
|
||||||
config_class="llama_stack.providers.remote.inference.groq.GroqConfig",
|
config_class="llama_stack.providers.remote.inference.groq.GroqConfig",
|
||||||
provider_data_validator="llama_stack.providers.remote.inference.groq.config.GroqProviderDataValidator",
|
provider_data_validator="llama_stack.providers.remote.inference.groq.config.GroqProviderDataValidator",
|
||||||
|
@ -245,7 +209,6 @@ Available Models:
|
||||||
api=Api.inference,
|
api=Api.inference,
|
||||||
adapter_type="llama-openai-compat",
|
adapter_type="llama-openai-compat",
|
||||||
provider_type="remote::llama-openai-compat",
|
provider_type="remote::llama-openai-compat",
|
||||||
pip_packages=["litellm"],
|
|
||||||
module="llama_stack.providers.remote.inference.llama_openai_compat",
|
module="llama_stack.providers.remote.inference.llama_openai_compat",
|
||||||
config_class="llama_stack.providers.remote.inference.llama_openai_compat.config.LlamaCompatConfig",
|
config_class="llama_stack.providers.remote.inference.llama_openai_compat.config.LlamaCompatConfig",
|
||||||
provider_data_validator="llama_stack.providers.remote.inference.llama_openai_compat.config.LlamaProviderDataValidator",
|
provider_data_validator="llama_stack.providers.remote.inference.llama_openai_compat.config.LlamaProviderDataValidator",
|
||||||
|
@ -255,9 +218,6 @@ Available Models:
|
||||||
api=Api.inference,
|
api=Api.inference,
|
||||||
adapter_type="sambanova",
|
adapter_type="sambanova",
|
||||||
provider_type="remote::sambanova",
|
provider_type="remote::sambanova",
|
||||||
pip_packages=[
|
|
||||||
"litellm",
|
|
||||||
],
|
|
||||||
module="llama_stack.providers.remote.inference.sambanova",
|
module="llama_stack.providers.remote.inference.sambanova",
|
||||||
config_class="llama_stack.providers.remote.inference.sambanova.SambaNovaImplConfig",
|
config_class="llama_stack.providers.remote.inference.sambanova.SambaNovaImplConfig",
|
||||||
provider_data_validator="llama_stack.providers.remote.inference.sambanova.config.SambaNovaProviderDataValidator",
|
provider_data_validator="llama_stack.providers.remote.inference.sambanova.config.SambaNovaProviderDataValidator",
|
||||||
|
@ -267,7 +227,6 @@ Available Models:
|
||||||
api=Api.inference,
|
api=Api.inference,
|
||||||
adapter_type="passthrough",
|
adapter_type="passthrough",
|
||||||
provider_type="remote::passthrough",
|
provider_type="remote::passthrough",
|
||||||
pip_packages=[],
|
|
||||||
module="llama_stack.providers.remote.inference.passthrough",
|
module="llama_stack.providers.remote.inference.passthrough",
|
||||||
config_class="llama_stack.providers.remote.inference.passthrough.PassthroughImplConfig",
|
config_class="llama_stack.providers.remote.inference.passthrough.PassthroughImplConfig",
|
||||||
provider_data_validator="llama_stack.providers.remote.inference.passthrough.PassthroughProviderDataValidator",
|
provider_data_validator="llama_stack.providers.remote.inference.passthrough.PassthroughProviderDataValidator",
|
||||||
|
@ -277,7 +236,6 @@ Available Models:
|
||||||
api=Api.inference,
|
api=Api.inference,
|
||||||
adapter_type="watsonx",
|
adapter_type="watsonx",
|
||||||
provider_type="remote::watsonx",
|
provider_type="remote::watsonx",
|
||||||
pip_packages=["ibm_watsonx_ai"],
|
|
||||||
module="llama_stack.providers.remote.inference.watsonx",
|
module="llama_stack.providers.remote.inference.watsonx",
|
||||||
config_class="llama_stack.providers.remote.inference.watsonx.WatsonXConfig",
|
config_class="llama_stack.providers.remote.inference.watsonx.WatsonXConfig",
|
||||||
provider_data_validator="llama_stack.providers.remote.inference.watsonx.WatsonXProviderDataValidator",
|
provider_data_validator="llama_stack.providers.remote.inference.watsonx.WatsonXProviderDataValidator",
|
||||||
|
@ -287,7 +245,6 @@ Available Models:
|
||||||
api=Api.inference,
|
api=Api.inference,
|
||||||
provider_type="remote::azure",
|
provider_type="remote::azure",
|
||||||
adapter_type="azure",
|
adapter_type="azure",
|
||||||
pip_packages=["litellm"],
|
|
||||||
module="llama_stack.providers.remote.inference.azure",
|
module="llama_stack.providers.remote.inference.azure",
|
||||||
config_class="llama_stack.providers.remote.inference.azure.AzureConfig",
|
config_class="llama_stack.providers.remote.inference.azure.AzureConfig",
|
||||||
provider_data_validator="llama_stack.providers.remote.inference.azure.config.AzureProviderDataValidator",
|
provider_data_validator="llama_stack.providers.remote.inference.azure.config.AzureProviderDataValidator",
|
||||||
|
|
|
@ -5,15 +5,12 @@
|
||||||
# the root directory of this source tree.
|
# the root directory of this source tree.
|
||||||
|
|
||||||
|
|
||||||
from typing import cast
|
|
||||||
|
|
||||||
from llama_stack.providers.datatypes import Api, InlineProviderSpec, ProviderSpec, RemoteProviderSpec
|
from llama_stack.providers.datatypes import Api, InlineProviderSpec, ProviderSpec, RemoteProviderSpec
|
||||||
|
|
||||||
# We provide two versions of these providers so that distributions can package the appropriate version of torch.
|
# We provide two versions of these providers so that distributions can package the appropriate version of torch.
|
||||||
# The CPU version is used for distributions that don't have GPU support -- they result in smaller container images.
|
# The CPU version is used for distributions that don't have GPU support -- they result in smaller container images.
|
||||||
torchtune_def = dict(
|
torchtune_def = dict(
|
||||||
api=Api.post_training,
|
api=Api.post_training,
|
||||||
pip_packages=["numpy"],
|
|
||||||
module="llama_stack.providers.inline.post_training.torchtune",
|
module="llama_stack.providers.inline.post_training.torchtune",
|
||||||
config_class="llama_stack.providers.inline.post_training.torchtune.TorchtunePostTrainingConfig",
|
config_class="llama_stack.providers.inline.post_training.torchtune.TorchtunePostTrainingConfig",
|
||||||
api_dependencies=[
|
api_dependencies=[
|
||||||
|
@ -27,28 +24,32 @@ torchtune_def = dict(
|
||||||
def available_providers() -> list[ProviderSpec]:
|
def available_providers() -> list[ProviderSpec]:
|
||||||
return [
|
return [
|
||||||
InlineProviderSpec(
|
InlineProviderSpec(
|
||||||
**{ # type: ignore
|
api=Api.post_training,
|
||||||
**torchtune_def,
|
provider_type="inline::torchtune-cpu",
|
||||||
"provider_type": "inline::torchtune-cpu",
|
module="llama_stack.providers.inline.post_training.torchtune",
|
||||||
"pip_packages": (
|
config_class="llama_stack.providers.inline.post_training.torchtune.TorchtunePostTrainingConfig",
|
||||||
cast(list[str], torchtune_def["pip_packages"])
|
api_dependencies=[
|
||||||
+ ["torch torchtune>=0.5.0 torchao>=0.12.0 --extra-index-url https://download.pytorch.org/whl/cpu"]
|
Api.datasetio,
|
||||||
),
|
Api.datasets,
|
||||||
},
|
],
|
||||||
|
description="TorchTune-based post-training provider for fine-tuning and optimizing models using Meta's TorchTune framework (CPU).",
|
||||||
|
package_extras=["cpu"],
|
||||||
),
|
),
|
||||||
InlineProviderSpec(
|
InlineProviderSpec(
|
||||||
**{ # type: ignore
|
api=Api.post_training,
|
||||||
**torchtune_def,
|
provider_type="inline::torchtune-gpu",
|
||||||
"provider_type": "inline::torchtune-gpu",
|
module="llama_stack.providers.inline.post_training.torchtune",
|
||||||
"pip_packages": (
|
config_class="llama_stack.providers.inline.post_training.torchtune.TorchtunePostTrainingConfig",
|
||||||
cast(list[str], torchtune_def["pip_packages"]) + ["torch torchtune>=0.5.0 torchao>=0.12.0"]
|
api_dependencies=[
|
||||||
),
|
Api.datasetio,
|
||||||
},
|
Api.datasets,
|
||||||
|
],
|
||||||
|
description="TorchTune-based post-training provider for fine-tuning and optimizing models using Meta's TorchTune framework (GPU).",
|
||||||
|
package_extras=["gpu"],
|
||||||
),
|
),
|
||||||
InlineProviderSpec(
|
InlineProviderSpec(
|
||||||
api=Api.post_training,
|
api=Api.post_training,
|
||||||
provider_type="inline::huggingface-gpu",
|
provider_type="inline::huggingface-gpu",
|
||||||
pip_packages=["trl", "transformers", "peft", "datasets>=4.0.0", "torch"],
|
|
||||||
module="llama_stack.providers.inline.post_training.huggingface",
|
module="llama_stack.providers.inline.post_training.huggingface",
|
||||||
config_class="llama_stack.providers.inline.post_training.huggingface.HuggingFacePostTrainingConfig",
|
config_class="llama_stack.providers.inline.post_training.huggingface.HuggingFacePostTrainingConfig",
|
||||||
api_dependencies=[
|
api_dependencies=[
|
||||||
|
@ -61,7 +62,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
api=Api.post_training,
|
api=Api.post_training,
|
||||||
adapter_type="nvidia",
|
adapter_type="nvidia",
|
||||||
provider_type="remote::nvidia",
|
provider_type="remote::nvidia",
|
||||||
pip_packages=["requests", "aiohttp"],
|
|
||||||
module="llama_stack.providers.remote.post_training.nvidia",
|
module="llama_stack.providers.remote.post_training.nvidia",
|
||||||
config_class="llama_stack.providers.remote.post_training.nvidia.NvidiaPostTrainingConfig",
|
config_class="llama_stack.providers.remote.post_training.nvidia.NvidiaPostTrainingConfig",
|
||||||
description="NVIDIA's post-training provider for fine-tuning models on NVIDIA's platform.",
|
description="NVIDIA's post-training provider for fine-tuning models on NVIDIA's platform.",
|
||||||
|
|
|
@ -18,10 +18,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
InlineProviderSpec(
|
InlineProviderSpec(
|
||||||
api=Api.safety,
|
api=Api.safety,
|
||||||
provider_type="inline::prompt-guard",
|
provider_type="inline::prompt-guard",
|
||||||
pip_packages=[
|
|
||||||
"transformers[accelerate]",
|
|
||||||
"torch --index-url https://download.pytorch.org/whl/cpu",
|
|
||||||
],
|
|
||||||
module="llama_stack.providers.inline.safety.prompt_guard",
|
module="llama_stack.providers.inline.safety.prompt_guard",
|
||||||
config_class="llama_stack.providers.inline.safety.prompt_guard.PromptGuardConfig",
|
config_class="llama_stack.providers.inline.safety.prompt_guard.PromptGuardConfig",
|
||||||
description="Prompt Guard safety provider for detecting and filtering unsafe prompts and content.",
|
description="Prompt Guard safety provider for detecting and filtering unsafe prompts and content.",
|
||||||
|
@ -29,7 +25,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
InlineProviderSpec(
|
InlineProviderSpec(
|
||||||
api=Api.safety,
|
api=Api.safety,
|
||||||
provider_type="inline::llama-guard",
|
provider_type="inline::llama-guard",
|
||||||
pip_packages=[],
|
|
||||||
module="llama_stack.providers.inline.safety.llama_guard",
|
module="llama_stack.providers.inline.safety.llama_guard",
|
||||||
config_class="llama_stack.providers.inline.safety.llama_guard.LlamaGuardConfig",
|
config_class="llama_stack.providers.inline.safety.llama_guard.LlamaGuardConfig",
|
||||||
api_dependencies=[
|
api_dependencies=[
|
||||||
|
@ -40,9 +35,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
InlineProviderSpec(
|
InlineProviderSpec(
|
||||||
api=Api.safety,
|
api=Api.safety,
|
||||||
provider_type="inline::code-scanner",
|
provider_type="inline::code-scanner",
|
||||||
pip_packages=[
|
|
||||||
"codeshield",
|
|
||||||
],
|
|
||||||
module="llama_stack.providers.inline.safety.code_scanner",
|
module="llama_stack.providers.inline.safety.code_scanner",
|
||||||
config_class="llama_stack.providers.inline.safety.code_scanner.CodeScannerConfig",
|
config_class="llama_stack.providers.inline.safety.code_scanner.CodeScannerConfig",
|
||||||
description="Code Scanner safety provider for detecting security vulnerabilities and unsafe code patterns.",
|
description="Code Scanner safety provider for detecting security vulnerabilities and unsafe code patterns.",
|
||||||
|
@ -51,7 +43,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
api=Api.safety,
|
api=Api.safety,
|
||||||
adapter_type="bedrock",
|
adapter_type="bedrock",
|
||||||
provider_type="remote::bedrock",
|
provider_type="remote::bedrock",
|
||||||
pip_packages=["boto3"],
|
|
||||||
module="llama_stack.providers.remote.safety.bedrock",
|
module="llama_stack.providers.remote.safety.bedrock",
|
||||||
config_class="llama_stack.providers.remote.safety.bedrock.BedrockSafetyConfig",
|
config_class="llama_stack.providers.remote.safety.bedrock.BedrockSafetyConfig",
|
||||||
description="AWS Bedrock safety provider for content moderation using AWS's safety services.",
|
description="AWS Bedrock safety provider for content moderation using AWS's safety services.",
|
||||||
|
@ -60,7 +51,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
api=Api.safety,
|
api=Api.safety,
|
||||||
adapter_type="nvidia",
|
adapter_type="nvidia",
|
||||||
provider_type="remote::nvidia",
|
provider_type="remote::nvidia",
|
||||||
pip_packages=["requests"],
|
|
||||||
module="llama_stack.providers.remote.safety.nvidia",
|
module="llama_stack.providers.remote.safety.nvidia",
|
||||||
config_class="llama_stack.providers.remote.safety.nvidia.NVIDIASafetyConfig",
|
config_class="llama_stack.providers.remote.safety.nvidia.NVIDIASafetyConfig",
|
||||||
description="NVIDIA's safety provider for content moderation and safety filtering.",
|
description="NVIDIA's safety provider for content moderation and safety filtering.",
|
||||||
|
@ -69,7 +59,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
api=Api.safety,
|
api=Api.safety,
|
||||||
adapter_type="sambanova",
|
adapter_type="sambanova",
|
||||||
provider_type="remote::sambanova",
|
provider_type="remote::sambanova",
|
||||||
pip_packages=["litellm", "requests"],
|
|
||||||
module="llama_stack.providers.remote.safety.sambanova",
|
module="llama_stack.providers.remote.safety.sambanova",
|
||||||
config_class="llama_stack.providers.remote.safety.sambanova.SambaNovaSafetyConfig",
|
config_class="llama_stack.providers.remote.safety.sambanova.SambaNovaSafetyConfig",
|
||||||
provider_data_validator="llama_stack.providers.remote.safety.sambanova.config.SambaNovaProviderDataValidator",
|
provider_data_validator="llama_stack.providers.remote.safety.sambanova.config.SambaNovaProviderDataValidator",
|
||||||
|
|
|
@ -13,7 +13,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
InlineProviderSpec(
|
InlineProviderSpec(
|
||||||
api=Api.scoring,
|
api=Api.scoring,
|
||||||
provider_type="inline::basic",
|
provider_type="inline::basic",
|
||||||
pip_packages=["requests"],
|
|
||||||
module="llama_stack.providers.inline.scoring.basic",
|
module="llama_stack.providers.inline.scoring.basic",
|
||||||
config_class="llama_stack.providers.inline.scoring.basic.BasicScoringConfig",
|
config_class="llama_stack.providers.inline.scoring.basic.BasicScoringConfig",
|
||||||
api_dependencies=[
|
api_dependencies=[
|
||||||
|
@ -25,7 +24,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
InlineProviderSpec(
|
InlineProviderSpec(
|
||||||
api=Api.scoring,
|
api=Api.scoring,
|
||||||
provider_type="inline::llm-as-judge",
|
provider_type="inline::llm-as-judge",
|
||||||
pip_packages=[],
|
|
||||||
module="llama_stack.providers.inline.scoring.llm_as_judge",
|
module="llama_stack.providers.inline.scoring.llm_as_judge",
|
||||||
config_class="llama_stack.providers.inline.scoring.llm_as_judge.LlmAsJudgeScoringConfig",
|
config_class="llama_stack.providers.inline.scoring.llm_as_judge.LlmAsJudgeScoringConfig",
|
||||||
api_dependencies=[
|
api_dependencies=[
|
||||||
|
@ -38,7 +36,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
InlineProviderSpec(
|
InlineProviderSpec(
|
||||||
api=Api.scoring,
|
api=Api.scoring,
|
||||||
provider_type="inline::braintrust",
|
provider_type="inline::braintrust",
|
||||||
pip_packages=["autoevals"],
|
|
||||||
module="llama_stack.providers.inline.scoring.braintrust",
|
module="llama_stack.providers.inline.scoring.braintrust",
|
||||||
config_class="llama_stack.providers.inline.scoring.braintrust.BraintrustScoringConfig",
|
config_class="llama_stack.providers.inline.scoring.braintrust.BraintrustScoringConfig",
|
||||||
api_dependencies=[
|
api_dependencies=[
|
||||||
|
|
|
@ -17,10 +17,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
InlineProviderSpec(
|
InlineProviderSpec(
|
||||||
api=Api.telemetry,
|
api=Api.telemetry,
|
||||||
provider_type="inline::meta-reference",
|
provider_type="inline::meta-reference",
|
||||||
pip_packages=[
|
|
||||||
"opentelemetry-sdk",
|
|
||||||
"opentelemetry-exporter-otlp-proto-http",
|
|
||||||
],
|
|
||||||
optional_api_dependencies=[Api.datasetio],
|
optional_api_dependencies=[Api.datasetio],
|
||||||
module="llama_stack.providers.inline.telemetry.meta_reference",
|
module="llama_stack.providers.inline.telemetry.meta_reference",
|
||||||
config_class="llama_stack.providers.inline.telemetry.meta_reference.config.TelemetryConfig",
|
config_class="llama_stack.providers.inline.telemetry.meta_reference.config.TelemetryConfig",
|
||||||
|
|
|
@ -18,17 +18,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
InlineProviderSpec(
|
InlineProviderSpec(
|
||||||
api=Api.tool_runtime,
|
api=Api.tool_runtime,
|
||||||
provider_type="inline::rag-runtime",
|
provider_type="inline::rag-runtime",
|
||||||
pip_packages=[
|
|
||||||
"chardet",
|
|
||||||
"pypdf",
|
|
||||||
"tqdm",
|
|
||||||
"numpy",
|
|
||||||
"scikit-learn",
|
|
||||||
"scipy",
|
|
||||||
"nltk",
|
|
||||||
"sentencepiece",
|
|
||||||
"transformers",
|
|
||||||
],
|
|
||||||
module="llama_stack.providers.inline.tool_runtime.rag",
|
module="llama_stack.providers.inline.tool_runtime.rag",
|
||||||
config_class="llama_stack.providers.inline.tool_runtime.rag.config.RagToolRuntimeConfig",
|
config_class="llama_stack.providers.inline.tool_runtime.rag.config.RagToolRuntimeConfig",
|
||||||
api_dependencies=[Api.vector_io, Api.inference, Api.files],
|
api_dependencies=[Api.vector_io, Api.inference, Api.files],
|
||||||
|
@ -40,7 +29,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
provider_type="remote::brave-search",
|
provider_type="remote::brave-search",
|
||||||
module="llama_stack.providers.remote.tool_runtime.brave_search",
|
module="llama_stack.providers.remote.tool_runtime.brave_search",
|
||||||
config_class="llama_stack.providers.remote.tool_runtime.brave_search.config.BraveSearchToolConfig",
|
config_class="llama_stack.providers.remote.tool_runtime.brave_search.config.BraveSearchToolConfig",
|
||||||
pip_packages=["requests"],
|
|
||||||
provider_data_validator="llama_stack.providers.remote.tool_runtime.brave_search.BraveSearchToolProviderDataValidator",
|
provider_data_validator="llama_stack.providers.remote.tool_runtime.brave_search.BraveSearchToolProviderDataValidator",
|
||||||
description="Brave Search tool for web search capabilities with privacy-focused results.",
|
description="Brave Search tool for web search capabilities with privacy-focused results.",
|
||||||
),
|
),
|
||||||
|
@ -50,7 +38,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
provider_type="remote::bing-search",
|
provider_type="remote::bing-search",
|
||||||
module="llama_stack.providers.remote.tool_runtime.bing_search",
|
module="llama_stack.providers.remote.tool_runtime.bing_search",
|
||||||
config_class="llama_stack.providers.remote.tool_runtime.bing_search.config.BingSearchToolConfig",
|
config_class="llama_stack.providers.remote.tool_runtime.bing_search.config.BingSearchToolConfig",
|
||||||
pip_packages=["requests"],
|
|
||||||
provider_data_validator="llama_stack.providers.remote.tool_runtime.bing_search.BingSearchToolProviderDataValidator",
|
provider_data_validator="llama_stack.providers.remote.tool_runtime.bing_search.BingSearchToolProviderDataValidator",
|
||||||
description="Bing Search tool for web search capabilities using Microsoft's search engine.",
|
description="Bing Search tool for web search capabilities using Microsoft's search engine.",
|
||||||
),
|
),
|
||||||
|
@ -60,7 +47,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
provider_type="remote::tavily-search",
|
provider_type="remote::tavily-search",
|
||||||
module="llama_stack.providers.remote.tool_runtime.tavily_search",
|
module="llama_stack.providers.remote.tool_runtime.tavily_search",
|
||||||
config_class="llama_stack.providers.remote.tool_runtime.tavily_search.config.TavilySearchToolConfig",
|
config_class="llama_stack.providers.remote.tool_runtime.tavily_search.config.TavilySearchToolConfig",
|
||||||
pip_packages=["requests"],
|
|
||||||
provider_data_validator="llama_stack.providers.remote.tool_runtime.tavily_search.TavilySearchToolProviderDataValidator",
|
provider_data_validator="llama_stack.providers.remote.tool_runtime.tavily_search.TavilySearchToolProviderDataValidator",
|
||||||
description="Tavily Search tool for AI-optimized web search with structured results.",
|
description="Tavily Search tool for AI-optimized web search with structured results.",
|
||||||
),
|
),
|
||||||
|
@ -70,7 +56,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
provider_type="remote::wolfram-alpha",
|
provider_type="remote::wolfram-alpha",
|
||||||
module="llama_stack.providers.remote.tool_runtime.wolfram_alpha",
|
module="llama_stack.providers.remote.tool_runtime.wolfram_alpha",
|
||||||
config_class="llama_stack.providers.remote.tool_runtime.wolfram_alpha.config.WolframAlphaToolConfig",
|
config_class="llama_stack.providers.remote.tool_runtime.wolfram_alpha.config.WolframAlphaToolConfig",
|
||||||
pip_packages=["requests"],
|
|
||||||
provider_data_validator="llama_stack.providers.remote.tool_runtime.wolfram_alpha.WolframAlphaToolProviderDataValidator",
|
provider_data_validator="llama_stack.providers.remote.tool_runtime.wolfram_alpha.WolframAlphaToolProviderDataValidator",
|
||||||
description="Wolfram Alpha tool for computational knowledge and mathematical calculations.",
|
description="Wolfram Alpha tool for computational knowledge and mathematical calculations.",
|
||||||
),
|
),
|
||||||
|
@ -80,7 +65,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
provider_type="remote::model-context-protocol",
|
provider_type="remote::model-context-protocol",
|
||||||
module="llama_stack.providers.remote.tool_runtime.model_context_protocol",
|
module="llama_stack.providers.remote.tool_runtime.model_context_protocol",
|
||||||
config_class="llama_stack.providers.remote.tool_runtime.model_context_protocol.config.MCPProviderConfig",
|
config_class="llama_stack.providers.remote.tool_runtime.model_context_protocol.config.MCPProviderConfig",
|
||||||
pip_packages=["mcp>=1.8.1"],
|
|
||||||
provider_data_validator="llama_stack.providers.remote.tool_runtime.model_context_protocol.config.MCPProviderDataValidator",
|
provider_data_validator="llama_stack.providers.remote.tool_runtime.model_context_protocol.config.MCPProviderDataValidator",
|
||||||
description="Model Context Protocol (MCP) tool for standardized tool calling and context management.",
|
description="Model Context Protocol (MCP) tool for standardized tool calling and context management.",
|
||||||
),
|
),
|
||||||
|
|
|
@ -18,7 +18,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
InlineProviderSpec(
|
InlineProviderSpec(
|
||||||
api=Api.vector_io,
|
api=Api.vector_io,
|
||||||
provider_type="inline::meta-reference",
|
provider_type="inline::meta-reference",
|
||||||
pip_packages=["faiss-cpu"],
|
|
||||||
module="llama_stack.providers.inline.vector_io.faiss",
|
module="llama_stack.providers.inline.vector_io.faiss",
|
||||||
config_class="llama_stack.providers.inline.vector_io.faiss.FaissVectorIOConfig",
|
config_class="llama_stack.providers.inline.vector_io.faiss.FaissVectorIOConfig",
|
||||||
deprecation_warning="Please use the `inline::faiss` provider instead.",
|
deprecation_warning="Please use the `inline::faiss` provider instead.",
|
||||||
|
@ -29,7 +28,6 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
InlineProviderSpec(
|
InlineProviderSpec(
|
||||||
api=Api.vector_io,
|
api=Api.vector_io,
|
||||||
provider_type="inline::faiss",
|
provider_type="inline::faiss",
|
||||||
pip_packages=["faiss-cpu"],
|
|
||||||
module="llama_stack.providers.inline.vector_io.faiss",
|
module="llama_stack.providers.inline.vector_io.faiss",
|
||||||
config_class="llama_stack.providers.inline.vector_io.faiss.FaissVectorIOConfig",
|
config_class="llama_stack.providers.inline.vector_io.faiss.FaissVectorIOConfig",
|
||||||
api_dependencies=[Api.inference],
|
api_dependencies=[Api.inference],
|
||||||
|
@ -82,7 +80,6 @@ more details about Faiss in general.
|
||||||
InlineProviderSpec(
|
InlineProviderSpec(
|
||||||
api=Api.vector_io,
|
api=Api.vector_io,
|
||||||
provider_type="inline::sqlite-vec",
|
provider_type="inline::sqlite-vec",
|
||||||
pip_packages=["sqlite-vec"],
|
|
||||||
module="llama_stack.providers.inline.vector_io.sqlite_vec",
|
module="llama_stack.providers.inline.vector_io.sqlite_vec",
|
||||||
config_class="llama_stack.providers.inline.vector_io.sqlite_vec.SQLiteVectorIOConfig",
|
config_class="llama_stack.providers.inline.vector_io.sqlite_vec.SQLiteVectorIOConfig",
|
||||||
api_dependencies=[Api.inference],
|
api_dependencies=[Api.inference],
|
||||||
|
@ -289,7 +286,6 @@ See [sqlite-vec's GitHub repo](https://github.com/asg017/sqlite-vec/tree/main) f
|
||||||
InlineProviderSpec(
|
InlineProviderSpec(
|
||||||
api=Api.vector_io,
|
api=Api.vector_io,
|
||||||
provider_type="inline::sqlite_vec",
|
provider_type="inline::sqlite_vec",
|
||||||
pip_packages=["sqlite-vec"],
|
|
||||||
module="llama_stack.providers.inline.vector_io.sqlite_vec",
|
module="llama_stack.providers.inline.vector_io.sqlite_vec",
|
||||||
config_class="llama_stack.providers.inline.vector_io.sqlite_vec.SQLiteVectorIOConfig",
|
config_class="llama_stack.providers.inline.vector_io.sqlite_vec.SQLiteVectorIOConfig",
|
||||||
deprecation_warning="Please use the `inline::sqlite-vec` provider (notice the hyphen instead of underscore) instead.",
|
deprecation_warning="Please use the `inline::sqlite-vec` provider (notice the hyphen instead of underscore) instead.",
|
||||||
|
@ -303,7 +299,6 @@ Please refer to the sqlite-vec provider documentation.
|
||||||
api=Api.vector_io,
|
api=Api.vector_io,
|
||||||
adapter_type="chromadb",
|
adapter_type="chromadb",
|
||||||
provider_type="remote::chromadb",
|
provider_type="remote::chromadb",
|
||||||
pip_packages=["chromadb-client"],
|
|
||||||
module="llama_stack.providers.remote.vector_io.chroma",
|
module="llama_stack.providers.remote.vector_io.chroma",
|
||||||
config_class="llama_stack.providers.remote.vector_io.chroma.ChromaVectorIOConfig",
|
config_class="llama_stack.providers.remote.vector_io.chroma.ChromaVectorIOConfig",
|
||||||
api_dependencies=[Api.inference],
|
api_dependencies=[Api.inference],
|
||||||
|
@ -345,7 +340,6 @@ See [Chroma's documentation](https://docs.trychroma.com/docs/overview/introducti
|
||||||
InlineProviderSpec(
|
InlineProviderSpec(
|
||||||
api=Api.vector_io,
|
api=Api.vector_io,
|
||||||
provider_type="inline::chromadb",
|
provider_type="inline::chromadb",
|
||||||
pip_packages=["chromadb"],
|
|
||||||
module="llama_stack.providers.inline.vector_io.chroma",
|
module="llama_stack.providers.inline.vector_io.chroma",
|
||||||
config_class="llama_stack.providers.inline.vector_io.chroma.ChromaVectorIOConfig",
|
config_class="llama_stack.providers.inline.vector_io.chroma.ChromaVectorIOConfig",
|
||||||
api_dependencies=[Api.inference],
|
api_dependencies=[Api.inference],
|
||||||
|
@ -389,7 +383,6 @@ See [Chroma's documentation](https://docs.trychroma.com/docs/overview/introducti
|
||||||
api=Api.vector_io,
|
api=Api.vector_io,
|
||||||
adapter_type="pgvector",
|
adapter_type="pgvector",
|
||||||
provider_type="remote::pgvector",
|
provider_type="remote::pgvector",
|
||||||
pip_packages=["psycopg2-binary"],
|
|
||||||
module="llama_stack.providers.remote.vector_io.pgvector",
|
module="llama_stack.providers.remote.vector_io.pgvector",
|
||||||
config_class="llama_stack.providers.remote.vector_io.pgvector.PGVectorVectorIOConfig",
|
config_class="llama_stack.providers.remote.vector_io.pgvector.PGVectorVectorIOConfig",
|
||||||
api_dependencies=[Api.inference],
|
api_dependencies=[Api.inference],
|
||||||
|
@ -500,7 +493,6 @@ See [PGVector's documentation](https://github.com/pgvector/pgvector) for more de
|
||||||
api=Api.vector_io,
|
api=Api.vector_io,
|
||||||
adapter_type="weaviate",
|
adapter_type="weaviate",
|
||||||
provider_type="remote::weaviate",
|
provider_type="remote::weaviate",
|
||||||
pip_packages=["weaviate-client"],
|
|
||||||
module="llama_stack.providers.remote.vector_io.weaviate",
|
module="llama_stack.providers.remote.vector_io.weaviate",
|
||||||
config_class="llama_stack.providers.remote.vector_io.weaviate.WeaviateVectorIOConfig",
|
config_class="llama_stack.providers.remote.vector_io.weaviate.WeaviateVectorIOConfig",
|
||||||
provider_data_validator="llama_stack.providers.remote.vector_io.weaviate.WeaviateRequestProviderData",
|
provider_data_validator="llama_stack.providers.remote.vector_io.weaviate.WeaviateRequestProviderData",
|
||||||
|
@ -541,7 +533,6 @@ See [Weaviate's documentation](https://weaviate.io/developers/weaviate) for more
|
||||||
InlineProviderSpec(
|
InlineProviderSpec(
|
||||||
api=Api.vector_io,
|
api=Api.vector_io,
|
||||||
provider_type="inline::qdrant",
|
provider_type="inline::qdrant",
|
||||||
pip_packages=["qdrant-client"],
|
|
||||||
module="llama_stack.providers.inline.vector_io.qdrant",
|
module="llama_stack.providers.inline.vector_io.qdrant",
|
||||||
config_class="llama_stack.providers.inline.vector_io.qdrant.QdrantVectorIOConfig",
|
config_class="llama_stack.providers.inline.vector_io.qdrant.QdrantVectorIOConfig",
|
||||||
api_dependencies=[Api.inference],
|
api_dependencies=[Api.inference],
|
||||||
|
@ -594,7 +585,6 @@ See the [Qdrant documentation](https://qdrant.tech/documentation/) for more deta
|
||||||
api=Api.vector_io,
|
api=Api.vector_io,
|
||||||
adapter_type="qdrant",
|
adapter_type="qdrant",
|
||||||
provider_type="remote::qdrant",
|
provider_type="remote::qdrant",
|
||||||
pip_packages=["qdrant-client"],
|
|
||||||
module="llama_stack.providers.remote.vector_io.qdrant",
|
module="llama_stack.providers.remote.vector_io.qdrant",
|
||||||
config_class="llama_stack.providers.remote.vector_io.qdrant.QdrantVectorIOConfig",
|
config_class="llama_stack.providers.remote.vector_io.qdrant.QdrantVectorIOConfig",
|
||||||
api_dependencies=[Api.inference],
|
api_dependencies=[Api.inference],
|
||||||
|
@ -607,7 +597,6 @@ Please refer to the inline provider documentation.
|
||||||
api=Api.vector_io,
|
api=Api.vector_io,
|
||||||
adapter_type="milvus",
|
adapter_type="milvus",
|
||||||
provider_type="remote::milvus",
|
provider_type="remote::milvus",
|
||||||
pip_packages=["pymilvus>=2.4.10"],
|
|
||||||
module="llama_stack.providers.remote.vector_io.milvus",
|
module="llama_stack.providers.remote.vector_io.milvus",
|
||||||
config_class="llama_stack.providers.remote.vector_io.milvus.MilvusVectorIOConfig",
|
config_class="llama_stack.providers.remote.vector_io.milvus.MilvusVectorIOConfig",
|
||||||
api_dependencies=[Api.inference],
|
api_dependencies=[Api.inference],
|
||||||
|
@ -813,7 +802,6 @@ For more details on TLS configuration, refer to the [TLS setup guide](https://mi
|
||||||
InlineProviderSpec(
|
InlineProviderSpec(
|
||||||
api=Api.vector_io,
|
api=Api.vector_io,
|
||||||
provider_type="inline::milvus",
|
provider_type="inline::milvus",
|
||||||
pip_packages=["pymilvus[milvus-lite]>=2.4.10"],
|
|
||||||
module="llama_stack.providers.inline.vector_io.milvus",
|
module="llama_stack.providers.inline.vector_io.milvus",
|
||||||
config_class="llama_stack.providers.inline.vector_io.milvus.MilvusVectorIOConfig",
|
config_class="llama_stack.providers.inline.vector_io.milvus.MilvusVectorIOConfig",
|
||||||
api_dependencies=[Api.inference],
|
api_dependencies=[Api.inference],
|
||||||
|
|
|
@ -0,0 +1,20 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-datasetio-huggingface"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "HuggingFace datasets provider for accessing and managing datasets from the HuggingFace Hub"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"datasets",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
20
llama_stack/providers/remote/datasetio/nvidia/pyproject.toml
Normal file
20
llama_stack/providers/remote/datasetio/nvidia/pyproject.toml
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-datasetio-nvidia"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "NVIDIA's dataset I/O provider for accessing datasets from NVIDIA's data platform"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"datasets",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
20
llama_stack/providers/remote/eval/nvidia/pyproject.toml
Normal file
20
llama_stack/providers/remote/eval/nvidia/pyproject.toml
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-eval-nvidia"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "NVIDIA's evaluation provider for running evaluation tasks on NVIDIA's platform"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"requests",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
23
llama_stack/providers/remote/files/s3/pyproject.toml
Normal file
23
llama_stack/providers/remote/files/s3/pyproject.toml
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-files-s3"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Local filesystem-based file storage provider for managing files and documents locally"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"boto3",
|
||||||
|
"sqlalchemy[asyncio]",
|
||||||
|
"aiosqlite",
|
||||||
|
"asyncpg",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
|
@ -0,0 +1,21 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-inference-anthropic"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Anthropic inference provider for accessing Claude models and Anthropic's AI services"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"litellm",
|
||||||
|
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
18
llama_stack/providers/remote/inference/azure/pyproject.toml
Normal file
18
llama_stack/providers/remote/inference/azure/pyproject.toml
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-inference-azure"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Azure OpenAI inference provider for accessing GPT models and other Azure services. Provider documentation https://learn.microsoft.com/en-us/azure/ai-foundry/openai/overview"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"litellm",
|
||||||
|
]
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
|
@ -0,0 +1,21 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-inference-bedrock"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "AWS Bedrock inference provider for accessing various AI models through AWS's managed service"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"boto3",
|
||||||
|
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
|
@ -0,0 +1,21 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-inference-cerebras"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Cerebras inference provider for running models on Cerebras Cloud platform"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"cerebras_cloud_sdk",
|
||||||
|
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
|
@ -0,0 +1,21 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-inference-databricks"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Databricks inference provider for running models on Databricks' unified analytics platform"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"openai",
|
||||||
|
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
|
@ -0,0 +1,21 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-inference-fireworks"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Fireworks AI inference provider for Llama models and other AI models on the Fireworks platform"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"fireworks-ai",
|
||||||
|
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
21
llama_stack/providers/remote/inference/gemini/pyproject.toml
Normal file
21
llama_stack/providers/remote/inference/gemini/pyproject.toml
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-inference-gemini"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Google Gemini inference provider for accessing Gemini models and Google's AI services"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"litellm",
|
||||||
|
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
21
llama_stack/providers/remote/inference/groq/pyproject.toml
Normal file
21
llama_stack/providers/remote/inference/groq/pyproject.toml
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-inference-groq"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Groq inference provider for ultra-fast inference using Groq's LPU technology"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"litellm",
|
||||||
|
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
|
@ -0,0 +1,21 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-inference-llama-openai-compat"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Llama OpenAI-compatible provider for using Llama models with OpenAI API format"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"litellm",
|
||||||
|
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
21
llama_stack/providers/remote/inference/nvidia/pyproject.toml
Normal file
21
llama_stack/providers/remote/inference/nvidia/pyproject.toml
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-inference-nvidia"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "NVIDIA inference provider for accessing NVIDIA NIM models and AI services"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"openai",
|
||||||
|
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
23
llama_stack/providers/remote/inference/ollama/pyproject.toml
Normal file
23
llama_stack/providers/remote/inference/ollama/pyproject.toml
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-inference-ollama"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Ollama inference provider for running local models through the Ollama runtime"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"ollama",
|
||||||
|
"aiohttp",
|
||||||
|
"h11>=0.16.0",
|
||||||
|
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
21
llama_stack/providers/remote/inference/openai/pyproject.toml
Normal file
21
llama_stack/providers/remote/inference/openai/pyproject.toml
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-inference-openai"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "OpenAI inference provider for accessing GPT models and other OpenAI services"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"litellm",
|
||||||
|
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
|
@ -0,0 +1,20 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-inference-passthrough"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Passthrough inference provider for connecting to any external inference service not directly supported"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
21
llama_stack/providers/remote/inference/runpod/pyproject.toml
Normal file
21
llama_stack/providers/remote/inference/runpod/pyproject.toml
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-inference-runpod"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "RunPod inference provider for running models on RunPod's cloud GPU platform"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"openai",
|
||||||
|
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
|
@ -0,0 +1,21 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-inference-sambanova"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "SambaNova inference provider for running models on SambaNova's dataflow architecture"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"litellm",
|
||||||
|
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
22
llama_stack/providers/remote/inference/tgi/pyproject.toml
Normal file
22
llama_stack/providers/remote/inference/tgi/pyproject.toml
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-inference-tgi"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Text Generation Inference (TGI) provider for HuggingFace model serving"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"huggingface_hub",
|
||||||
|
"aiohttp",
|
||||||
|
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
|
@ -0,0 +1,21 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-inference-together"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Together AI inference provider for open-source models and collaborative AI development"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"together",
|
||||||
|
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
|
@ -0,0 +1,19 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-inference-vertexai"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Google VertexAI Remote Inference Provider"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"litellm",
|
||||||
|
"google-cloud-aiplatform"
|
||||||
|
]
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
21
llama_stack/providers/remote/inference/vllm/pyproject.toml
Normal file
21
llama_stack/providers/remote/inference/vllm/pyproject.toml
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-inference-vllm"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Remote vLLM inference provider for connecting to vLLM servers"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"openai",
|
||||||
|
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
|
@ -0,0 +1,21 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-inference-watsonx"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "IBM WatsonX inference provider for accessing AI models on IBM's WatsonX platform"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"ibm_watson_machine_learning",
|
||||||
|
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
|
@ -0,0 +1,21 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-post-training-nvidia"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "NVIDIA's post-training provider for fine-tuning models on NVIDIA's platform"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"requests",
|
||||||
|
"aiohttp",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
20
llama_stack/providers/remote/safety/bedrock/pyproject.toml
Normal file
20
llama_stack/providers/remote/safety/bedrock/pyproject.toml
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-safety-bedrock"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "AWS Bedrock safety provider for content moderation using AWS's safety services"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"boto3",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
20
llama_stack/providers/remote/safety/nvidia/pyproject.toml
Normal file
20
llama_stack/providers/remote/safety/nvidia/pyproject.toml
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-safety-nvidia"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "NVIDIA's safety provider for content moderation and safety filtering"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"requests",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
21
llama_stack/providers/remote/safety/sambanova/pyproject.toml
Normal file
21
llama_stack/providers/remote/safety/sambanova/pyproject.toml
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-safety-sambanova"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "SambaNova's safety provider for content moderation and safety filtering"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"litellm",
|
||||||
|
"requests",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
|
@ -0,0 +1,20 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-tool-runtime-bing-search"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Bing Search tool for web search capabilities using Microsoft's search engine"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"requests",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
|
@ -0,0 +1,20 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-tool-runtime-brave-search"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Brave Search tool for web search capabilities with privacy-focused results"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"requests",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
|
@ -0,0 +1,20 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-tool-runtime-model-context-protocol"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Model Context Protocol (MCP) tool for standardized tool calling and context management"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"mcp>=1.8.1",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
|
@ -0,0 +1,20 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-tool-runtime-tavily-search"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Tavily Search tool for AI-optimized web search with structured results"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"requests",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
|
@ -0,0 +1,20 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-tool-runtime-wolfram-alpha"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Wolfram Alpha tool for computational knowledge and mathematical calculations"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"requests",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
20
llama_stack/providers/remote/vector_io/chroma/pyproject.toml
Normal file
20
llama_stack/providers/remote/vector_io/chroma/pyproject.toml
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-vector-io-chroma-remote"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Chroma remote vector database provider for Llama Stack"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"chromadb-client",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
20
llama_stack/providers/remote/vector_io/milvus/pyproject.toml
Normal file
20
llama_stack/providers/remote/vector_io/milvus/pyproject.toml
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-vector-io-milvus-remote"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Milvus remote vector database provider for Llama Stack"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"pymilvus>=2.4.10",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
|
@ -0,0 +1,20 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-vector-io-pgvector"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "PGVector remote vector database provider for Llama Stack"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"psycopg2-binary",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
20
llama_stack/providers/remote/vector_io/qdrant/pyproject.toml
Normal file
20
llama_stack/providers/remote/vector_io/qdrant/pyproject.toml
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-vector-io-qdrant-remote"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Qdrant remote vector database provider for Llama Stack"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"qdrant-client",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
|
@ -0,0 +1,20 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "llama-stack-provider-vector-io-weaviate"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Weaviate remote vector database provider for Llama Stack"
|
||||||
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
license = { "text" = "MIT" }
|
||||||
|
dependencies = [
|
||||||
|
"weaviate-client",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["."]
|
||||||
|
include = ["llama_stack*"]
|
Loading…
Add table
Add a link
Reference in a new issue