# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. from pathlib import Path from llama_stack.apis.models import ModelType from llama_stack.distribution.datatypes import ModelInput, Provider, ToolGroupInput from llama_stack.providers.inline.inference.sentence_transformers import ( SentenceTransformersInferenceConfig, ) from llama_stack.providers.remote.inference.groq import GroqConfig from llama_stack.providers.remote.inference.groq.models import MODEL_ENTRIES from llama_stack.templates.template import ( DistributionTemplate, RunConfigSettings, get_model_registry, ) def get_distribution_template() -> DistributionTemplate: providers = { "inference": ["remote::groq"], "vector_io": ["inline::faiss"], "safety": ["inline::llama-guard"], "agents": ["inline::meta-reference"], "telemetry": ["inline::meta-reference"], "eval": ["inline::meta-reference"], "datasetio": ["remote::huggingface", "inline::localfs"], "scoring": ["inline::basic", "inline::llm-as-judge", "inline::braintrust"], "tool_runtime": [ "remote::brave-search", "remote::tavily-search", "inline::rag-runtime", ], } name = "groq" inference_provider = Provider( provider_id=name, provider_type=f"remote::{name}", config=GroqConfig.sample_run_config(), ) embedding_provider = Provider( provider_id="sentence-transformers", provider_type="inline::sentence-transformers", config=SentenceTransformersInferenceConfig.sample_run_config(), ) embedding_model = ModelInput( model_id="all-MiniLM-L6-v2", provider_id="sentence-transformers", model_type=ModelType.embedding, metadata={ "embedding_dimension": 384, }, ) available_models = { "groq": MODEL_ENTRIES, } default_models = get_model_registry(available_models) default_tool_groups = [ ToolGroupInput( toolgroup_id="builtin::websearch", provider_id="tavily-search", ), ToolGroupInput( toolgroup_id="builtin::rag", provider_id="rag-runtime", ), ] return DistributionTemplate( name=name, distro_type="self_hosted", description="Use Groq for running LLM inference", docker_image=None, template_path=Path(__file__).parent / "doc_template.md", providers=providers, available_models_by_provider=available_models, run_configs={ "run.yaml": RunConfigSettings( provider_overrides={ "inference": [inference_provider, embedding_provider], }, default_models=default_models + [embedding_model], default_tool_groups=default_tool_groups, ), }, run_config_env_vars={ "LLAMASTACK_PORT": ( "8321", "Port for the Llama Stack distribution server", ), "GROQ_API_KEY": ( "", "Groq API Key", ), }, )