feat(distro): add more providers to starter distro, prefix conflicting models (#2362)

The name changes to the verifications file are unfortunate, but maybe we
don't need that @ehhuang ?

Edit: deleted the verifications template now
This commit is contained in:
Ashwin Bharambe 2025-06-03 12:10:46 -07:00 committed by GitHub
parent b380cb463f
commit cba55808ab
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
11 changed files with 400 additions and 1037 deletions

View file

@ -34,6 +34,10 @@ from llama_stack.providers.remote.inference.groq.config import GroqConfig
from llama_stack.providers.remote.inference.groq.models import (
MODEL_ENTRIES as GROQ_MODEL_ENTRIES,
)
from llama_stack.providers.remote.inference.ollama.config import OllamaImplConfig
from llama_stack.providers.remote.inference.ollama.models import (
MODEL_ENTRIES as OLLAMA_MODEL_ENTRIES,
)
from llama_stack.providers.remote.inference.openai.config import OpenAIConfig
from llama_stack.providers.remote.inference.openai.models import (
MODEL_ENTRIES as OPENAI_MODEL_ENTRIES,
@ -42,6 +46,10 @@ from llama_stack.providers.remote.inference.sambanova.config import SambaNovaImp
from llama_stack.providers.remote.inference.sambanova.models import (
MODEL_ENTRIES as SAMBANOVA_MODEL_ENTRIES,
)
from llama_stack.providers.remote.inference.together.config import TogetherImplConfig
from llama_stack.providers.remote.inference.together.models import (
MODEL_ENTRIES as TOGETHER_MODEL_ENTRIES,
)
from llama_stack.providers.remote.inference.vllm import VLLMInferenceAdapterConfig
from llama_stack.providers.remote.vector_io.chroma.config import ChromaVectorIOConfig
from llama_stack.providers.remote.vector_io.pgvector.config import (
@ -69,6 +77,16 @@ def get_inference_providers() -> tuple[list[Provider], dict[str, list[ProviderMo
FIREWORKS_MODEL_ENTRIES,
FireworksImplConfig.sample_run_config(api_key="${env.FIREWORKS_API_KEY:}"),
),
(
"together",
TOGETHER_MODEL_ENTRIES,
TogetherImplConfig.sample_run_config(api_key="${env.TOGETHER_API_KEY:}"),
),
(
"ollama",
OLLAMA_MODEL_ENTRIES,
OllamaImplConfig.sample_run_config(),
),
(
"anthropic",
ANTHROPIC_MODEL_ENTRIES,