This commit is contained in:
Charlie Doern 2025-09-23 08:55:26 +02:00 committed by GitHub
commit fa6c5a0ea6
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
95 changed files with 1854 additions and 465 deletions

View file

@ -0,0 +1,21 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-inference-anthropic"
version = "0.1.0"
description = "Anthropic inference provider for accessing Claude models and Anthropic's AI services"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"litellm",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,18 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-inference-azure"
version = "0.1.0"
description = "Azure OpenAI inference provider for accessing GPT models and other Azure services. Provider documentation https://learn.microsoft.com/en-us/azure/ai-foundry/openai/overview"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"litellm",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,21 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-inference-bedrock"
version = "0.1.0"
description = "AWS Bedrock inference provider for accessing various AI models through AWS's managed service"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"boto3",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,21 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-inference-cerebras"
version = "0.1.0"
description = "Cerebras inference provider for running models on Cerebras Cloud platform"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"cerebras_cloud_sdk",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,21 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-inference-databricks"
version = "0.1.0"
description = "Databricks inference provider for running models on Databricks' unified analytics platform"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"openai",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,21 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-inference-fireworks"
version = "0.1.0"
description = "Fireworks AI inference provider for Llama models and other AI models on the Fireworks platform"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"fireworks-ai",
"grpcio>=1.67.1,<1.71.0", # Pin grpcio version for compatibility
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,21 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-inference-gemini"
version = "0.1.0"
description = "Google Gemini inference provider for accessing Gemini models and Google's AI services"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"litellm",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,21 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-inference-groq"
version = "0.1.0"
description = "Groq inference provider for ultra-fast inference using Groq's LPU technology"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"litellm",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,21 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-inference-llama-openai-compat"
version = "0.1.0"
description = "Llama OpenAI-compatible provider for using Llama models with OpenAI API format"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"litellm",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,21 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-inference-nvidia"
version = "0.1.0"
description = "NVIDIA inference provider for accessing NVIDIA NIM models and AI services"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"openai",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,23 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-inference-ollama"
version = "0.1.0"
description = "Ollama inference provider for running local models through the Ollama runtime"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"ollama",
"aiohttp",
"h11>=0.16.0",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,21 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-inference-openai"
version = "0.1.0"
description = "OpenAI inference provider for accessing GPT models and other OpenAI services"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"litellm",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,20 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-inference-passthrough"
version = "0.1.0"
description = "Passthrough inference provider for connecting to any external inference service not directly supported"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,21 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-inference-runpod"
version = "0.1.0"
description = "RunPod inference provider for running models on RunPod's cloud GPU platform"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"openai",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,21 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-inference-sambanova"
version = "0.1.0"
description = "SambaNova inference provider for running models on SambaNova's dataflow architecture"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"litellm",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,22 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-inference-tgi"
version = "0.1.0"
description = "Text Generation Inference (TGI) provider for HuggingFace model serving"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"huggingface_hub",
"aiohttp",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,21 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-inference-together"
version = "0.1.0"
description = "Together AI inference provider for open-source models and collaborative AI development"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"together",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,19 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-inference-vertexai"
version = "0.1.0"
description = "Google VertexAI Remote Inference Provider"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"litellm",
"google-cloud-aiplatform"
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,21 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-inference-vllm"
version = "0.1.0"
description = "Remote vLLM inference provider for connecting to vLLM servers"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"openai",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,21 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-inference-watsonx"
version = "0.1.0"
description = "IBM WatsonX inference provider for accessing AI models on IBM's WatsonX platform"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"ibm_watson_machine_learning",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]