refactor: rename dev distro as starter (#2181)

We want this to be a "flagship" distribution we can advertize to a
segment of users to get started quickly. This distro should package a
bunch of remote providers and some cheap inline providers so they get a
solid "AI Platform in a box" setup instantly.
This commit is contained in:
Ashwin Bharambe 2025-05-15 12:52:34 -07:00 committed by GitHub
parent 87e284f1a0
commit 1a6d4af5e9
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
6 changed files with 54 additions and 54 deletions

View file

@ -152,46 +152,6 @@
"sentence-transformers --no-deps", "sentence-transformers --no-deps",
"torch torchvision --index-url https://download.pytorch.org/whl/cpu" "torch torchvision --index-url https://download.pytorch.org/whl/cpu"
], ],
"dev": [
"aiosqlite",
"autoevals",
"blobfile",
"chardet",
"chromadb-client",
"datasets",
"emoji",
"fastapi",
"fire",
"fireworks-ai",
"httpx",
"langdetect",
"litellm",
"matplotlib",
"mcp",
"nltk",
"numpy",
"openai",
"opentelemetry-exporter-otlp-proto-http",
"opentelemetry-sdk",
"pandas",
"pillow",
"psycopg2-binary",
"pymongo",
"pypdf",
"pythainlp",
"redis",
"requests",
"scikit-learn",
"scipy",
"sentencepiece",
"sqlite-vec",
"tqdm",
"transformers",
"tree_sitter",
"uvicorn",
"sentence-transformers --no-deps",
"torch torchvision --index-url https://download.pytorch.org/whl/cpu"
],
"fireworks": [ "fireworks": [
"aiosqlite", "aiosqlite",
"autoevals", "autoevals",
@ -642,6 +602,46 @@
"sentence-transformers --no-deps", "sentence-transformers --no-deps",
"torch torchvision --index-url https://download.pytorch.org/whl/cpu" "torch torchvision --index-url https://download.pytorch.org/whl/cpu"
], ],
"starter": [
"aiosqlite",
"autoevals",
"blobfile",
"chardet",
"chromadb-client",
"datasets",
"emoji",
"fastapi",
"fire",
"fireworks-ai",
"httpx",
"langdetect",
"litellm",
"matplotlib",
"mcp",
"nltk",
"numpy",
"openai",
"opentelemetry-exporter-otlp-proto-http",
"opentelemetry-sdk",
"pandas",
"pillow",
"psycopg2-binary",
"pymongo",
"pypdf",
"pythainlp",
"redis",
"requests",
"scikit-learn",
"scipy",
"sentencepiece",
"sqlite-vec",
"tqdm",
"transformers",
"tree_sitter",
"uvicorn",
"sentence-transformers --no-deps",
"torch torchvision --index-url https://download.pytorch.org/whl/cpu"
],
"tgi": [ "tgi": [
"aiohttp", "aiohttp",
"aiosqlite", "aiosqlite",

View file

@ -4,4 +4,4 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from .dev import get_distribution_template # noqa: F401 from .starter import get_distribution_template # noqa: F401

View file

@ -1,6 +1,6 @@
version: '2' version: '2'
distribution_spec: distribution_spec:
description: Distribution for running e2e tests in CI description: Quick start template for running Llama Stack with several popular providers
providers: providers:
inference: inference:
- remote::openai - remote::openai

View file

@ -1,5 +1,5 @@
version: '2' version: '2'
image_name: dev image_name: starter
apis: apis:
- agents - agents
- datasetio - datasetio
@ -46,7 +46,7 @@ providers:
- provider_id: sqlite-vec - provider_id: sqlite-vec
provider_type: inline::sqlite-vec provider_type: inline::sqlite-vec
config: config:
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/dev}/sqlite_vec.db db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/starter}/sqlite_vec.db
- provider_id: ${env.ENABLE_CHROMADB+chromadb} - provider_id: ${env.ENABLE_CHROMADB+chromadb}
provider_type: remote::chromadb provider_type: remote::chromadb
config: config:
@ -71,14 +71,14 @@ providers:
persistence_store: persistence_store:
type: sqlite type: sqlite
namespace: null namespace: null
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/dev}/agents_store.db db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/starter}/agents_store.db
telemetry: telemetry:
- provider_id: meta-reference - provider_id: meta-reference
provider_type: inline::meta-reference provider_type: inline::meta-reference
config: config:
service_name: ${env.OTEL_SERVICE_NAME:} service_name: ${env.OTEL_SERVICE_NAME:}
sinks: ${env.TELEMETRY_SINKS:console,sqlite} sinks: ${env.TELEMETRY_SINKS:console,sqlite}
sqlite_db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/dev}/trace_store.db sqlite_db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/starter}/trace_store.db
eval: eval:
- provider_id: meta-reference - provider_id: meta-reference
provider_type: inline::meta-reference provider_type: inline::meta-reference
@ -86,7 +86,7 @@ providers:
kvstore: kvstore:
type: sqlite type: sqlite
namespace: null namespace: null
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/dev}/meta_reference_eval.db db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/starter}/meta_reference_eval.db
datasetio: datasetio:
- provider_id: huggingface - provider_id: huggingface
provider_type: remote::huggingface provider_type: remote::huggingface
@ -94,14 +94,14 @@ providers:
kvstore: kvstore:
type: sqlite type: sqlite
namespace: null namespace: null
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/dev}/huggingface_datasetio.db db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/starter}/huggingface_datasetio.db
- provider_id: localfs - provider_id: localfs
provider_type: inline::localfs provider_type: inline::localfs
config: config:
kvstore: kvstore:
type: sqlite type: sqlite
namespace: null namespace: null
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/dev}/localfs_datasetio.db db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/starter}/localfs_datasetio.db
scoring: scoring:
- provider_id: basic - provider_id: basic
provider_type: inline::basic provider_type: inline::basic
@ -132,7 +132,7 @@ providers:
config: {} config: {}
metadata_store: metadata_store:
type: sqlite type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/dev}/registry.db db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/starter}/registry.db
models: models:
- metadata: {} - metadata: {}
model_id: openai/gpt-4o model_id: openai/gpt-4o

View file

@ -46,6 +46,7 @@ from llama_stack.providers.remote.vector_io.chroma.config import ChromaVectorIOC
from llama_stack.providers.remote.vector_io.pgvector.config import ( from llama_stack.providers.remote.vector_io.pgvector.config import (
PGVectorVectorIOConfig, PGVectorVectorIOConfig,
) )
from llama_stack.providers.utils.inference.model_registry import ProviderModelEntry
from llama_stack.templates.template import ( from llama_stack.templates.template import (
DistributionTemplate, DistributionTemplate,
RunConfigSettings, RunConfigSettings,
@ -53,7 +54,7 @@ from llama_stack.templates.template import (
) )
def get_inference_providers() -> tuple[list[Provider], list[ModelInput]]: def get_inference_providers() -> tuple[list[Provider], dict[str, list[ProviderModelEntry]]]:
# in this template, we allow each API key to be optional # in this template, we allow each API key to be optional
providers = [ providers = [
( (
@ -119,7 +120,7 @@ def get_distribution_template() -> DistributionTemplate:
"remote::model-context-protocol", "remote::model-context-protocol",
], ],
} }
name = "dev" name = "starter"
vector_io_providers = [ vector_io_providers = [
Provider( Provider(
@ -171,7 +172,7 @@ def get_distribution_template() -> DistributionTemplate:
return DistributionTemplate( return DistributionTemplate(
name=name, name=name,
distro_type="self_hosted", distro_type="self_hosted",
description="Distribution for running e2e tests in CI", description="Quick start template for running Llama Stack with several popular providers",
container_image=None, container_image=None,
template_path=None, template_path=None,
providers=providers, providers=providers,

View file

@ -304,7 +304,6 @@ exclude = [
"^llama_stack/strong_typing/inspection\\.py$", "^llama_stack/strong_typing/inspection\\.py$",
"^llama_stack/strong_typing/schema\\.py$", "^llama_stack/strong_typing/schema\\.py$",
"^llama_stack/strong_typing/serializer\\.py$", "^llama_stack/strong_typing/serializer\\.py$",
"^llama_stack/templates/dev/dev\\.py$",
"^llama_stack/templates/groq/groq\\.py$", "^llama_stack/templates/groq/groq\\.py$",
"^llama_stack/templates/llama_api/llama_api\\.py$", "^llama_stack/templates/llama_api/llama_api\\.py$",
"^llama_stack/templates/sambanova/sambanova\\.py$", "^llama_stack/templates/sambanova/sambanova\\.py$",