mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-08 21:04:39 +00:00
refactor: convert providers to be installed via package
currently providers have a `pip_package` list. Rather than make our own form of python dependency management, we should use `pyproject.toml` files in each provider declaring the dependencies in a more trackable manner. Each provider can then be installed using the already in place `module` field in the ProviderSpec, pointing to the directory the provider lives in we can then simply `uv pip install` this directory as opposed to installing the dependencies one by one Signed-off-by: Charlie Doern <cdoern@redhat.com>
This commit is contained in:
parent
a1301911e4
commit
41431d8bdd
76 changed files with 1294 additions and 134 deletions
|
@ -0,0 +1,20 @@
|
|||
[build-system]
|
||||
requires = ["setuptools>=61.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "llama-stack-provider-datasetio-huggingface"
|
||||
version = "0.1.0"
|
||||
description = "HuggingFace datasets provider for accessing and managing datasets from the HuggingFace Hub"
|
||||
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||
requires-python = ">=3.12"
|
||||
license = { "text" = "MIT" }
|
||||
dependencies = [
|
||||
"datasets",
|
||||
]
|
||||
|
||||
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
include = ["llama_stack*"]
|
20
llama_stack/providers/remote/datasetio/nvidia/pyproject.toml
Normal file
20
llama_stack/providers/remote/datasetio/nvidia/pyproject.toml
Normal file
|
@ -0,0 +1,20 @@
|
|||
[build-system]
|
||||
requires = ["setuptools>=61.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "llama-stack-provider-datasetio-nvidia"
|
||||
version = "0.1.0"
|
||||
description = "NVIDIA's dataset I/O provider for accessing datasets from NVIDIA's data platform"
|
||||
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||
requires-python = ">=3.12"
|
||||
license = { "text" = "MIT" }
|
||||
dependencies = [
|
||||
"datasets",
|
||||
]
|
||||
|
||||
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
include = ["llama_stack*"]
|
20
llama_stack/providers/remote/eval/nvidia/pyproject.toml
Normal file
20
llama_stack/providers/remote/eval/nvidia/pyproject.toml
Normal file
|
@ -0,0 +1,20 @@
|
|||
[build-system]
|
||||
requires = ["setuptools>=61.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "llama-stack-provider-eval-nvidia"
|
||||
version = "0.1.0"
|
||||
description = "NVIDIA's evaluation provider for running evaluation tasks on NVIDIA's platform"
|
||||
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||
requires-python = ">=3.12"
|
||||
license = { "text" = "MIT" }
|
||||
dependencies = [
|
||||
"requests",
|
||||
]
|
||||
|
||||
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
include = ["llama_stack*"]
|
23
llama_stack/providers/remote/files/s3/pyproject.toml
Normal file
23
llama_stack/providers/remote/files/s3/pyproject.toml
Normal file
|
@ -0,0 +1,23 @@
|
|||
[build-system]
|
||||
requires = ["setuptools>=61.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "llama-stack-provider-files-s3"
|
||||
version = "0.1.0"
|
||||
description = "Local filesystem-based file storage provider for managing files and documents locally"
|
||||
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||
requires-python = ">=3.12"
|
||||
license = { "text" = "MIT" }
|
||||
dependencies = [
|
||||
"boto3",
|
||||
"sqlalchemy[asyncio]",
|
||||
"aiosqlite",
|
||||
"asyncpg",
|
||||
]
|
||||
|
||||
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
include = ["llama_stack*"]
|
|
@ -0,0 +1,21 @@
|
|||
[build-system]
|
||||
requires = ["setuptools>=61.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "llama-stack-provider-inference-anthropic"
|
||||
version = "0.1.0"
|
||||
description = "Anthropic inference provider for accessing Claude models and Anthropic's AI services"
|
||||
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||
requires-python = ">=3.12"
|
||||
license = { "text" = "MIT" }
|
||||
dependencies = [
|
||||
"litellm",
|
||||
|
||||
]
|
||||
|
||||
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
include = ["llama_stack*"]
|
18
llama_stack/providers/remote/inference/azure/pyproject.toml
Normal file
18
llama_stack/providers/remote/inference/azure/pyproject.toml
Normal file
|
@ -0,0 +1,18 @@
|
|||
[build-system]
|
||||
requires = ["setuptools>=61.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "llama-stack-provider-inference-azure"
|
||||
version = "0.1.0"
|
||||
description = "Azure OpenAI inference provider for accessing GPT models and other Azure services. Provider documentation https://learn.microsoft.com/en-us/azure/ai-foundry/openai/overview"
|
||||
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||
requires-python = ">=3.12"
|
||||
license = { "text" = "MIT" }
|
||||
dependencies = [
|
||||
"litellm",
|
||||
]
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
include = ["llama_stack*"]
|
|
@ -0,0 +1,21 @@
|
|||
[build-system]
|
||||
requires = ["setuptools>=61.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "llama-stack-provider-inference-bedrock"
|
||||
version = "0.1.0"
|
||||
description = "AWS Bedrock inference provider for accessing various AI models through AWS's managed service"
|
||||
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||
requires-python = ">=3.12"
|
||||
license = { "text" = "MIT" }
|
||||
dependencies = [
|
||||
"boto3",
|
||||
|
||||
]
|
||||
|
||||
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
include = ["llama_stack*"]
|
|
@ -0,0 +1,21 @@
|
|||
[build-system]
|
||||
requires = ["setuptools>=61.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "llama-stack-provider-inference-cerebras"
|
||||
version = "0.1.0"
|
||||
description = "Cerebras inference provider for running models on Cerebras Cloud platform"
|
||||
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||
requires-python = ">=3.12"
|
||||
license = { "text" = "MIT" }
|
||||
dependencies = [
|
||||
"cerebras_cloud_sdk",
|
||||
|
||||
]
|
||||
|
||||
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
include = ["llama_stack*"]
|
|
@ -0,0 +1,21 @@
|
|||
[build-system]
|
||||
requires = ["setuptools>=61.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "llama-stack-provider-inference-databricks"
|
||||
version = "0.1.0"
|
||||
description = "Databricks inference provider for running models on Databricks' unified analytics platform"
|
||||
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||
requires-python = ">=3.12"
|
||||
license = { "text" = "MIT" }
|
||||
dependencies = [
|
||||
"openai",
|
||||
|
||||
]
|
||||
|
||||
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
include = ["llama_stack*"]
|
|
@ -0,0 +1,21 @@
|
|||
[build-system]
|
||||
requires = ["setuptools>=61.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "llama-stack-provider-inference-fireworks"
|
||||
version = "0.1.0"
|
||||
description = "Fireworks AI inference provider for Llama models and other AI models on the Fireworks platform"
|
||||
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||
requires-python = ">=3.12"
|
||||
license = { "text" = "MIT" }
|
||||
dependencies = [
|
||||
"fireworks-ai",
|
||||
|
||||
]
|
||||
|
||||
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
include = ["llama_stack*"]
|
21
llama_stack/providers/remote/inference/gemini/pyproject.toml
Normal file
21
llama_stack/providers/remote/inference/gemini/pyproject.toml
Normal file
|
@ -0,0 +1,21 @@
|
|||
[build-system]
|
||||
requires = ["setuptools>=61.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "llama-stack-provider-inference-gemini"
|
||||
version = "0.1.0"
|
||||
description = "Google Gemini inference provider for accessing Gemini models and Google's AI services"
|
||||
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||
requires-python = ">=3.12"
|
||||
license = { "text" = "MIT" }
|
||||
dependencies = [
|
||||
"litellm",
|
||||
|
||||
]
|
||||
|
||||
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
include = ["llama_stack*"]
|
21
llama_stack/providers/remote/inference/groq/pyproject.toml
Normal file
21
llama_stack/providers/remote/inference/groq/pyproject.toml
Normal file
|
@ -0,0 +1,21 @@
|
|||
[build-system]
|
||||
requires = ["setuptools>=61.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "llama-stack-provider-inference-groq"
|
||||
version = "0.1.0"
|
||||
description = "Groq inference provider for ultra-fast inference using Groq's LPU technology"
|
||||
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||
requires-python = ">=3.12"
|
||||
license = { "text" = "MIT" }
|
||||
dependencies = [
|
||||
"litellm",
|
||||
|
||||
]
|
||||
|
||||
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
include = ["llama_stack*"]
|
|
@ -0,0 +1,21 @@
|
|||
[build-system]
|
||||
requires = ["setuptools>=61.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "llama-stack-provider-inference-llama-openai-compat"
|
||||
version = "0.1.0"
|
||||
description = "Llama OpenAI-compatible provider for using Llama models with OpenAI API format"
|
||||
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||
requires-python = ">=3.12"
|
||||
license = { "text" = "MIT" }
|
||||
dependencies = [
|
||||
"litellm",
|
||||
|
||||
]
|
||||
|
||||
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
include = ["llama_stack*"]
|
21
llama_stack/providers/remote/inference/nvidia/pyproject.toml
Normal file
21
llama_stack/providers/remote/inference/nvidia/pyproject.toml
Normal file
|
@ -0,0 +1,21 @@
|
|||
[build-system]
|
||||
requires = ["setuptools>=61.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "llama-stack-provider-inference-nvidia"
|
||||
version = "0.1.0"
|
||||
description = "NVIDIA inference provider for accessing NVIDIA NIM models and AI services"
|
||||
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||
requires-python = ">=3.12"
|
||||
license = { "text" = "MIT" }
|
||||
dependencies = [
|
||||
"openai",
|
||||
|
||||
]
|
||||
|
||||
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
include = ["llama_stack*"]
|
23
llama_stack/providers/remote/inference/ollama/pyproject.toml
Normal file
23
llama_stack/providers/remote/inference/ollama/pyproject.toml
Normal file
|
@ -0,0 +1,23 @@
|
|||
[build-system]
|
||||
requires = ["setuptools>=61.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "llama-stack-provider-inference-ollama"
|
||||
version = "0.1.0"
|
||||
description = "Ollama inference provider for running local models through the Ollama runtime"
|
||||
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||
requires-python = ">=3.12"
|
||||
license = { "text" = "MIT" }
|
||||
dependencies = [
|
||||
"ollama",
|
||||
"aiohttp",
|
||||
"h11>=0.16.0",
|
||||
|
||||
]
|
||||
|
||||
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
include = ["llama_stack*"]
|
21
llama_stack/providers/remote/inference/openai/pyproject.toml
Normal file
21
llama_stack/providers/remote/inference/openai/pyproject.toml
Normal file
|
@ -0,0 +1,21 @@
|
|||
[build-system]
|
||||
requires = ["setuptools>=61.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "llama-stack-provider-inference-openai"
|
||||
version = "0.1.0"
|
||||
description = "OpenAI inference provider for accessing GPT models and other OpenAI services"
|
||||
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||
requires-python = ">=3.12"
|
||||
license = { "text" = "MIT" }
|
||||
dependencies = [
|
||||
"litellm",
|
||||
|
||||
]
|
||||
|
||||
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
include = ["llama_stack*"]
|
|
@ -0,0 +1,20 @@
|
|||
[build-system]
|
||||
requires = ["setuptools>=61.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "llama-stack-provider-inference-passthrough"
|
||||
version = "0.1.0"
|
||||
description = "Passthrough inference provider for connecting to any external inference service not directly supported"
|
||||
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||
requires-python = ">=3.12"
|
||||
license = { "text" = "MIT" }
|
||||
dependencies = [
|
||||
|
||||
]
|
||||
|
||||
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
include = ["llama_stack*"]
|
21
llama_stack/providers/remote/inference/runpod/pyproject.toml
Normal file
21
llama_stack/providers/remote/inference/runpod/pyproject.toml
Normal file
|
@ -0,0 +1,21 @@
|
|||
[build-system]
|
||||
requires = ["setuptools>=61.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "llama-stack-provider-inference-runpod"
|
||||
version = "0.1.0"
|
||||
description = "RunPod inference provider for running models on RunPod's cloud GPU platform"
|
||||
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||
requires-python = ">=3.12"
|
||||
license = { "text" = "MIT" }
|
||||
dependencies = [
|
||||
"openai",
|
||||
|
||||
]
|
||||
|
||||
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
include = ["llama_stack*"]
|
|
@ -0,0 +1,21 @@
|
|||
[build-system]
|
||||
requires = ["setuptools>=61.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "llama-stack-provider-inference-sambanova"
|
||||
version = "0.1.0"
|
||||
description = "SambaNova inference provider for running models on SambaNova's dataflow architecture"
|
||||
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||
requires-python = ">=3.12"
|
||||
license = { "text" = "MIT" }
|
||||
dependencies = [
|
||||
"litellm",
|
||||
|
||||
]
|
||||
|
||||
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
include = ["llama_stack*"]
|
22
llama_stack/providers/remote/inference/tgi/pyproject.toml
Normal file
22
llama_stack/providers/remote/inference/tgi/pyproject.toml
Normal file
|
@ -0,0 +1,22 @@
|
|||
[build-system]
|
||||
requires = ["setuptools>=61.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "llama-stack-provider-inference-tgi"
|
||||
version = "0.1.0"
|
||||
description = "Text Generation Inference (TGI) provider for HuggingFace model serving"
|
||||
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||
requires-python = ">=3.12"
|
||||
license = { "text" = "MIT" }
|
||||
dependencies = [
|
||||
"huggingface_hub",
|
||||
"aiohttp",
|
||||
|
||||
]
|
||||
|
||||
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
include = ["llama_stack*"]
|
|
@ -0,0 +1,21 @@
|
|||
[build-system]
|
||||
requires = ["setuptools>=61.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "llama-stack-provider-inference-together"
|
||||
version = "0.1.0"
|
||||
description = "Together AI inference provider for open-source models and collaborative AI development"
|
||||
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||
requires-python = ">=3.12"
|
||||
license = { "text" = "MIT" }
|
||||
dependencies = [
|
||||
"together",
|
||||
|
||||
]
|
||||
|
||||
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
include = ["llama_stack*"]
|
|
@ -0,0 +1,19 @@
|
|||
[build-system]
|
||||
requires = ["setuptools>=61.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "llama-stack-provider-inference-vertexai"
|
||||
version = "0.1.0"
|
||||
description = "Google VertexAI Remote Inference Provider"
|
||||
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||
requires-python = ">=3.12"
|
||||
license = { "text" = "MIT" }
|
||||
dependencies = [
|
||||
"litellm",
|
||||
"google-cloud-aiplatform"
|
||||
]
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
include = ["llama_stack*"]
|
21
llama_stack/providers/remote/inference/vllm/pyproject.toml
Normal file
21
llama_stack/providers/remote/inference/vllm/pyproject.toml
Normal file
|
@ -0,0 +1,21 @@
|
|||
[build-system]
|
||||
requires = ["setuptools>=61.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "llama-stack-provider-inference-vllm"
|
||||
version = "0.1.0"
|
||||
description = "Remote vLLM inference provider for connecting to vLLM servers"
|
||||
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||
requires-python = ">=3.12"
|
||||
license = { "text" = "MIT" }
|
||||
dependencies = [
|
||||
"openai",
|
||||
|
||||
]
|
||||
|
||||
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
include = ["llama_stack*"]
|
|
@ -0,0 +1,21 @@
|
|||
[build-system]
|
||||
requires = ["setuptools>=61.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "llama-stack-provider-inference-watsonx"
|
||||
version = "0.1.0"
|
||||
description = "IBM WatsonX inference provider for accessing AI models on IBM's WatsonX platform"
|
||||
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||
requires-python = ">=3.12"
|
||||
license = { "text" = "MIT" }
|
||||
dependencies = [
|
||||
"ibm_watson_machine_learning",
|
||||
|
||||
]
|
||||
|
||||
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
include = ["llama_stack*"]
|
|
@ -0,0 +1,21 @@
|
|||
[build-system]
|
||||
requires = ["setuptools>=61.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "llama-stack-provider-post-training-nvidia"
|
||||
version = "0.1.0"
|
||||
description = "NVIDIA's post-training provider for fine-tuning models on NVIDIA's platform"
|
||||
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||
requires-python = ">=3.12"
|
||||
license = { "text" = "MIT" }
|
||||
dependencies = [
|
||||
"requests",
|
||||
"aiohttp",
|
||||
]
|
||||
|
||||
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
include = ["llama_stack*"]
|
20
llama_stack/providers/remote/safety/bedrock/pyproject.toml
Normal file
20
llama_stack/providers/remote/safety/bedrock/pyproject.toml
Normal file
|
@ -0,0 +1,20 @@
|
|||
[build-system]
|
||||
requires = ["setuptools>=61.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "llama-stack-provider-safety-bedrock"
|
||||
version = "0.1.0"
|
||||
description = "AWS Bedrock safety provider for content moderation using AWS's safety services"
|
||||
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||
requires-python = ">=3.12"
|
||||
license = { "text" = "MIT" }
|
||||
dependencies = [
|
||||
"boto3",
|
||||
]
|
||||
|
||||
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
include = ["llama_stack*"]
|
20
llama_stack/providers/remote/safety/nvidia/pyproject.toml
Normal file
20
llama_stack/providers/remote/safety/nvidia/pyproject.toml
Normal file
|
@ -0,0 +1,20 @@
|
|||
[build-system]
|
||||
requires = ["setuptools>=61.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "llama-stack-provider-safety-nvidia"
|
||||
version = "0.1.0"
|
||||
description = "NVIDIA's safety provider for content moderation and safety filtering"
|
||||
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||
requires-python = ">=3.12"
|
||||
license = { "text" = "MIT" }
|
||||
dependencies = [
|
||||
"requests",
|
||||
]
|
||||
|
||||
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
include = ["llama_stack*"]
|
21
llama_stack/providers/remote/safety/sambanova/pyproject.toml
Normal file
21
llama_stack/providers/remote/safety/sambanova/pyproject.toml
Normal file
|
@ -0,0 +1,21 @@
|
|||
[build-system]
|
||||
requires = ["setuptools>=61.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "llama-stack-provider-safety-sambanova"
|
||||
version = "0.1.0"
|
||||
description = "SambaNova's safety provider for content moderation and safety filtering"
|
||||
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||
requires-python = ">=3.12"
|
||||
license = { "text" = "MIT" }
|
||||
dependencies = [
|
||||
"litellm",
|
||||
"requests",
|
||||
]
|
||||
|
||||
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
include = ["llama_stack*"]
|
|
@ -0,0 +1,20 @@
|
|||
[build-system]
|
||||
requires = ["setuptools>=61.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "llama-stack-provider-tool-runtime-bing-search"
|
||||
version = "0.1.0"
|
||||
description = "Bing Search tool for web search capabilities using Microsoft's search engine"
|
||||
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||
requires-python = ">=3.12"
|
||||
license = { "text" = "MIT" }
|
||||
dependencies = [
|
||||
"requests",
|
||||
]
|
||||
|
||||
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
include = ["llama_stack*"]
|
|
@ -0,0 +1,20 @@
|
|||
[build-system]
|
||||
requires = ["setuptools>=61.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "llama-stack-provider-tool-runtime-brave-search"
|
||||
version = "0.1.0"
|
||||
description = "Brave Search tool for web search capabilities with privacy-focused results"
|
||||
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||
requires-python = ">=3.12"
|
||||
license = { "text" = "MIT" }
|
||||
dependencies = [
|
||||
"requests",
|
||||
]
|
||||
|
||||
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
include = ["llama_stack*"]
|
|
@ -0,0 +1,20 @@
|
|||
[build-system]
|
||||
requires = ["setuptools>=61.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "llama-stack-provider-tool-runtime-model-context-protocol"
|
||||
version = "0.1.0"
|
||||
description = "Model Context Protocol (MCP) tool for standardized tool calling and context management"
|
||||
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||
requires-python = ">=3.12"
|
||||
license = { "text" = "MIT" }
|
||||
dependencies = [
|
||||
"mcp>=1.8.1",
|
||||
]
|
||||
|
||||
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
include = ["llama_stack*"]
|
|
@ -0,0 +1,20 @@
|
|||
[build-system]
|
||||
requires = ["setuptools>=61.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "llama-stack-provider-tool-runtime-tavily-search"
|
||||
version = "0.1.0"
|
||||
description = "Tavily Search tool for AI-optimized web search with structured results"
|
||||
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||
requires-python = ">=3.12"
|
||||
license = { "text" = "MIT" }
|
||||
dependencies = [
|
||||
"requests",
|
||||
]
|
||||
|
||||
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
include = ["llama_stack*"]
|
|
@ -0,0 +1,20 @@
|
|||
[build-system]
|
||||
requires = ["setuptools>=61.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "llama-stack-provider-tool-runtime-wolfram-alpha"
|
||||
version = "0.1.0"
|
||||
description = "Wolfram Alpha tool for computational knowledge and mathematical calculations"
|
||||
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||
requires-python = ">=3.12"
|
||||
license = { "text" = "MIT" }
|
||||
dependencies = [
|
||||
"requests",
|
||||
]
|
||||
|
||||
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
include = ["llama_stack*"]
|
20
llama_stack/providers/remote/vector_io/chroma/pyproject.toml
Normal file
20
llama_stack/providers/remote/vector_io/chroma/pyproject.toml
Normal file
|
@ -0,0 +1,20 @@
|
|||
[build-system]
|
||||
requires = ["setuptools>=61.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "llama-stack-provider-vector-io-chroma-remote"
|
||||
version = "0.1.0"
|
||||
description = "Chroma remote vector database provider for Llama Stack"
|
||||
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||
requires-python = ">=3.12"
|
||||
license = { "text" = "MIT" }
|
||||
dependencies = [
|
||||
"chromadb-client",
|
||||
]
|
||||
|
||||
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
include = ["llama_stack*"]
|
20
llama_stack/providers/remote/vector_io/milvus/pyproject.toml
Normal file
20
llama_stack/providers/remote/vector_io/milvus/pyproject.toml
Normal file
|
@ -0,0 +1,20 @@
|
|||
[build-system]
|
||||
requires = ["setuptools>=61.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "llama-stack-provider-vector-io-milvus-remote"
|
||||
version = "0.1.0"
|
||||
description = "Milvus remote vector database provider for Llama Stack"
|
||||
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||
requires-python = ">=3.12"
|
||||
license = { "text" = "MIT" }
|
||||
dependencies = [
|
||||
"pymilvus>=2.4.10",
|
||||
]
|
||||
|
||||
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
include = ["llama_stack*"]
|
|
@ -0,0 +1,20 @@
|
|||
[build-system]
|
||||
requires = ["setuptools>=61.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "llama-stack-provider-vector-io-pgvector"
|
||||
version = "0.1.0"
|
||||
description = "PGVector remote vector database provider for Llama Stack"
|
||||
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||
requires-python = ">=3.12"
|
||||
license = { "text" = "MIT" }
|
||||
dependencies = [
|
||||
"psycopg2-binary",
|
||||
]
|
||||
|
||||
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
include = ["llama_stack*"]
|
20
llama_stack/providers/remote/vector_io/qdrant/pyproject.toml
Normal file
20
llama_stack/providers/remote/vector_io/qdrant/pyproject.toml
Normal file
|
@ -0,0 +1,20 @@
|
|||
[build-system]
|
||||
requires = ["setuptools>=61.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "llama-stack-provider-vector-io-qdrant-remote"
|
||||
version = "0.1.0"
|
||||
description = "Qdrant remote vector database provider for Llama Stack"
|
||||
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||
requires-python = ">=3.12"
|
||||
license = { "text" = "MIT" }
|
||||
dependencies = [
|
||||
"qdrant-client",
|
||||
]
|
||||
|
||||
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
include = ["llama_stack*"]
|
|
@ -0,0 +1,20 @@
|
|||
[build-system]
|
||||
requires = ["setuptools>=61.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "llama-stack-provider-vector-io-weaviate"
|
||||
version = "0.1.0"
|
||||
description = "Weaviate remote vector database provider for Llama Stack"
|
||||
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||
requires-python = ">=3.12"
|
||||
license = { "text" = "MIT" }
|
||||
dependencies = [
|
||||
"weaviate-client",
|
||||
]
|
||||
|
||||
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
include = ["llama_stack*"]
|
Loading…
Add table
Add a link
Reference in a new issue