From d080b42a9b177efea45a8c52a6f8273a25ef231f Mon Sep 17 00:00:00 2001 From: Ashwin Bharambe Date: Thu, 15 May 2025 12:16:38 -0700 Subject: [PATCH] refactor: rename dev distro as starter --- llama_stack/templates/dependencies.json | 80 +++++++++---------- .../templates/{dev => starter}/__init__.py | 2 +- .../templates/{dev => starter}/build.yaml | 0 .../templates/{dev => starter}/run.yaml | 0 .../{dev/dev.py => starter/starter.py} | 3 +- pyproject.toml | 1 - 6 files changed, 43 insertions(+), 43 deletions(-) rename llama_stack/templates/{dev => starter}/__init__.py (76%) rename llama_stack/templates/{dev => starter}/build.yaml (100%) rename llama_stack/templates/{dev => starter}/run.yaml (100%) rename llama_stack/templates/{dev/dev.py => starter/starter.py} (97%) diff --git a/llama_stack/templates/dependencies.json b/llama_stack/templates/dependencies.json index 35cbc8878..015eaa228 100644 --- a/llama_stack/templates/dependencies.json +++ b/llama_stack/templates/dependencies.json @@ -152,46 +152,6 @@ "sentence-transformers --no-deps", "torch torchvision --index-url https://download.pytorch.org/whl/cpu" ], - "dev": [ - "aiosqlite", - "autoevals", - "blobfile", - "chardet", - "chromadb-client", - "datasets", - "emoji", - "fastapi", - "fire", - "fireworks-ai", - "httpx", - "langdetect", - "litellm", - "matplotlib", - "mcp", - "nltk", - "numpy", - "openai", - "opentelemetry-exporter-otlp-proto-http", - "opentelemetry-sdk", - "pandas", - "pillow", - "psycopg2-binary", - "pymongo", - "pypdf", - "pythainlp", - "redis", - "requests", - "scikit-learn", - "scipy", - "sentencepiece", - "sqlite-vec", - "tqdm", - "transformers", - "tree_sitter", - "uvicorn", - "sentence-transformers --no-deps", - "torch torchvision --index-url https://download.pytorch.org/whl/cpu" - ], "fireworks": [ "aiosqlite", "autoevals", @@ -642,6 +602,46 @@ "sentence-transformers --no-deps", "torch torchvision --index-url https://download.pytorch.org/whl/cpu" ], + "dev": [ + "aiosqlite", + "autoevals", + "blobfile", + "chardet", + "chromadb-client", + "datasets", + "emoji", + "fastapi", + "fire", + "fireworks-ai", + "httpx", + "langdetect", + "litellm", + "matplotlib", + "mcp", + "nltk", + "numpy", + "openai", + "opentelemetry-exporter-otlp-proto-http", + "opentelemetry-sdk", + "pandas", + "pillow", + "psycopg2-binary", + "pymongo", + "pypdf", + "pythainlp", + "redis", + "requests", + "scikit-learn", + "scipy", + "sentencepiece", + "sqlite-vec", + "tqdm", + "transformers", + "tree_sitter", + "uvicorn", + "sentence-transformers --no-deps", + "torch torchvision --index-url https://download.pytorch.org/whl/cpu" + ], "tgi": [ "aiohttp", "aiosqlite", diff --git a/llama_stack/templates/dev/__init__.py b/llama_stack/templates/starter/__init__.py similarity index 76% rename from llama_stack/templates/dev/__init__.py rename to llama_stack/templates/starter/__init__.py index cf966c2a6..9c0d937ce 100644 --- a/llama_stack/templates/dev/__init__.py +++ b/llama_stack/templates/starter/__init__.py @@ -4,4 +4,4 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. -from .dev import get_distribution_template # noqa: F401 +from .starter import get_distribution_template # noqa: F401 diff --git a/llama_stack/templates/dev/build.yaml b/llama_stack/templates/starter/build.yaml similarity index 100% rename from llama_stack/templates/dev/build.yaml rename to llama_stack/templates/starter/build.yaml diff --git a/llama_stack/templates/dev/run.yaml b/llama_stack/templates/starter/run.yaml similarity index 100% rename from llama_stack/templates/dev/run.yaml rename to llama_stack/templates/starter/run.yaml diff --git a/llama_stack/templates/dev/dev.py b/llama_stack/templates/starter/starter.py similarity index 97% rename from llama_stack/templates/dev/dev.py rename to llama_stack/templates/starter/starter.py index 76d5a1fb3..2d76eeb71 100644 --- a/llama_stack/templates/dev/dev.py +++ b/llama_stack/templates/starter/starter.py @@ -46,6 +46,7 @@ from llama_stack.providers.remote.vector_io.chroma.config import ChromaVectorIOC from llama_stack.providers.remote.vector_io.pgvector.config import ( PGVectorVectorIOConfig, ) +from llama_stack.providers.utils.inference.model_registry import ProviderModelEntry from llama_stack.templates.template import ( DistributionTemplate, RunConfigSettings, @@ -53,7 +54,7 @@ from llama_stack.templates.template import ( ) -def get_inference_providers() -> tuple[list[Provider], list[ModelInput]]: +def get_inference_providers() -> tuple[list[Provider], dict[str, list[ProviderModelEntry]]]: # in this template, we allow each API key to be optional providers = [ ( diff --git a/pyproject.toml b/pyproject.toml index f1bf7384f..88c331b78 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -304,7 +304,6 @@ exclude = [ "^llama_stack/strong_typing/inspection\\.py$", "^llama_stack/strong_typing/schema\\.py$", "^llama_stack/strong_typing/serializer\\.py$", - "^llama_stack/templates/dev/dev\\.py$", "^llama_stack/templates/groq/groq\\.py$", "^llama_stack/templates/llama_api/llama_api\\.py$", "^llama_stack/templates/sambanova/sambanova\\.py$",