mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-02 00:34:44 +00:00
refactor: rename dev distro as starter
This commit is contained in:
parent
10b1056dea
commit
d080b42a9b
6 changed files with 43 additions and 43 deletions
|
@ -152,46 +152,6 @@
|
||||||
"sentence-transformers --no-deps",
|
"sentence-transformers --no-deps",
|
||||||
"torch torchvision --index-url https://download.pytorch.org/whl/cpu"
|
"torch torchvision --index-url https://download.pytorch.org/whl/cpu"
|
||||||
],
|
],
|
||||||
"dev": [
|
|
||||||
"aiosqlite",
|
|
||||||
"autoevals",
|
|
||||||
"blobfile",
|
|
||||||
"chardet",
|
|
||||||
"chromadb-client",
|
|
||||||
"datasets",
|
|
||||||
"emoji",
|
|
||||||
"fastapi",
|
|
||||||
"fire",
|
|
||||||
"fireworks-ai",
|
|
||||||
"httpx",
|
|
||||||
"langdetect",
|
|
||||||
"litellm",
|
|
||||||
"matplotlib",
|
|
||||||
"mcp",
|
|
||||||
"nltk",
|
|
||||||
"numpy",
|
|
||||||
"openai",
|
|
||||||
"opentelemetry-exporter-otlp-proto-http",
|
|
||||||
"opentelemetry-sdk",
|
|
||||||
"pandas",
|
|
||||||
"pillow",
|
|
||||||
"psycopg2-binary",
|
|
||||||
"pymongo",
|
|
||||||
"pypdf",
|
|
||||||
"pythainlp",
|
|
||||||
"redis",
|
|
||||||
"requests",
|
|
||||||
"scikit-learn",
|
|
||||||
"scipy",
|
|
||||||
"sentencepiece",
|
|
||||||
"sqlite-vec",
|
|
||||||
"tqdm",
|
|
||||||
"transformers",
|
|
||||||
"tree_sitter",
|
|
||||||
"uvicorn",
|
|
||||||
"sentence-transformers --no-deps",
|
|
||||||
"torch torchvision --index-url https://download.pytorch.org/whl/cpu"
|
|
||||||
],
|
|
||||||
"fireworks": [
|
"fireworks": [
|
||||||
"aiosqlite",
|
"aiosqlite",
|
||||||
"autoevals",
|
"autoevals",
|
||||||
|
@ -642,6 +602,46 @@
|
||||||
"sentence-transformers --no-deps",
|
"sentence-transformers --no-deps",
|
||||||
"torch torchvision --index-url https://download.pytorch.org/whl/cpu"
|
"torch torchvision --index-url https://download.pytorch.org/whl/cpu"
|
||||||
],
|
],
|
||||||
|
"dev": [
|
||||||
|
"aiosqlite",
|
||||||
|
"autoevals",
|
||||||
|
"blobfile",
|
||||||
|
"chardet",
|
||||||
|
"chromadb-client",
|
||||||
|
"datasets",
|
||||||
|
"emoji",
|
||||||
|
"fastapi",
|
||||||
|
"fire",
|
||||||
|
"fireworks-ai",
|
||||||
|
"httpx",
|
||||||
|
"langdetect",
|
||||||
|
"litellm",
|
||||||
|
"matplotlib",
|
||||||
|
"mcp",
|
||||||
|
"nltk",
|
||||||
|
"numpy",
|
||||||
|
"openai",
|
||||||
|
"opentelemetry-exporter-otlp-proto-http",
|
||||||
|
"opentelemetry-sdk",
|
||||||
|
"pandas",
|
||||||
|
"pillow",
|
||||||
|
"psycopg2-binary",
|
||||||
|
"pymongo",
|
||||||
|
"pypdf",
|
||||||
|
"pythainlp",
|
||||||
|
"redis",
|
||||||
|
"requests",
|
||||||
|
"scikit-learn",
|
||||||
|
"scipy",
|
||||||
|
"sentencepiece",
|
||||||
|
"sqlite-vec",
|
||||||
|
"tqdm",
|
||||||
|
"transformers",
|
||||||
|
"tree_sitter",
|
||||||
|
"uvicorn",
|
||||||
|
"sentence-transformers --no-deps",
|
||||||
|
"torch torchvision --index-url https://download.pytorch.org/whl/cpu"
|
||||||
|
],
|
||||||
"tgi": [
|
"tgi": [
|
||||||
"aiohttp",
|
"aiohttp",
|
||||||
"aiosqlite",
|
"aiosqlite",
|
||||||
|
|
|
@ -4,4 +4,4 @@
|
||||||
# This source code is licensed under the terms described in the LICENSE file in
|
# This source code is licensed under the terms described in the LICENSE file in
|
||||||
# the root directory of this source tree.
|
# the root directory of this source tree.
|
||||||
|
|
||||||
from .dev import get_distribution_template # noqa: F401
|
from .starter import get_distribution_template # noqa: F401
|
|
@ -46,6 +46,7 @@ from llama_stack.providers.remote.vector_io.chroma.config import ChromaVectorIOC
|
||||||
from llama_stack.providers.remote.vector_io.pgvector.config import (
|
from llama_stack.providers.remote.vector_io.pgvector.config import (
|
||||||
PGVectorVectorIOConfig,
|
PGVectorVectorIOConfig,
|
||||||
)
|
)
|
||||||
|
from llama_stack.providers.utils.inference.model_registry import ProviderModelEntry
|
||||||
from llama_stack.templates.template import (
|
from llama_stack.templates.template import (
|
||||||
DistributionTemplate,
|
DistributionTemplate,
|
||||||
RunConfigSettings,
|
RunConfigSettings,
|
||||||
|
@ -53,7 +54,7 @@ from llama_stack.templates.template import (
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def get_inference_providers() -> tuple[list[Provider], list[ModelInput]]:
|
def get_inference_providers() -> tuple[list[Provider], dict[str, list[ProviderModelEntry]]]:
|
||||||
# in this template, we allow each API key to be optional
|
# in this template, we allow each API key to be optional
|
||||||
providers = [
|
providers = [
|
||||||
(
|
(
|
|
@ -304,7 +304,6 @@ exclude = [
|
||||||
"^llama_stack/strong_typing/inspection\\.py$",
|
"^llama_stack/strong_typing/inspection\\.py$",
|
||||||
"^llama_stack/strong_typing/schema\\.py$",
|
"^llama_stack/strong_typing/schema\\.py$",
|
||||||
"^llama_stack/strong_typing/serializer\\.py$",
|
"^llama_stack/strong_typing/serializer\\.py$",
|
||||||
"^llama_stack/templates/dev/dev\\.py$",
|
|
||||||
"^llama_stack/templates/groq/groq\\.py$",
|
"^llama_stack/templates/groq/groq\\.py$",
|
||||||
"^llama_stack/templates/llama_api/llama_api\\.py$",
|
"^llama_stack/templates/llama_api/llama_api\\.py$",
|
||||||
"^llama_stack/templates/sambanova/sambanova\\.py$",
|
"^llama_stack/templates/sambanova/sambanova\\.py$",
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue