diff --git a/distributions/dependencies.json b/distributions/dependencies.json index 18a2484f2..28f057bdc 100644 --- a/distributions/dependencies.json +++ b/distributions/dependencies.json @@ -171,6 +171,39 @@ "sentence-transformers --no-deps", "torch torchvision --index-url https://download.pytorch.org/whl/cpu" ], + "groq": [ + "aiosqlite", + "autoevals", + "blobfile", + "chardet", + "datasets", + "faiss-cpu", + "fastapi", + "fire", + "groq", + "httpx", + "matplotlib", + "nltk", + "numpy", + "openai", + "opentelemetry-exporter-otlp-proto-http", + "opentelemetry-sdk", + "pandas", + "pillow", + "psycopg2-binary", + "pymongo", + "pypdf", + "redis", + "requests", + "scikit-learn", + "scipy", + "sentencepiece", + "tqdm", + "transformers", + "uvicorn", + "sentence-transformers --no-deps", + "torch torchvision --index-url https://download.pytorch.org/whl/cpu" + ], "hf-endpoint": [ "aiohttp", "aiosqlite", diff --git a/docs/source/distributions/self_hosted_distro/groq.md b/docs/source/distributions/self_hosted_distro/groq.md new file mode 100644 index 000000000..296a5f49b --- /dev/null +++ b/docs/source/distributions/self_hosted_distro/groq.md @@ -0,0 +1,77 @@ +--- +orphan: true +--- + +# Groq Distribution + +```{toctree} +:maxdepth: 2 +:hidden: + +self +``` + +The `llamastack/distribution-groq` distribution consists of the following provider configurations. + +| API | Provider(s) | +|-----|-------------| +| agents | `inline::meta-reference` | +| datasetio | `remote::huggingface`, `inline::localfs` | +| eval | `inline::meta-reference` | +| inference | `remote::groq` | +| safety | `inline::llama-guard` | +| scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` | +| telemetry | `inline::meta-reference` | +| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::rag-runtime` | +| vector_io | `inline::faiss` | + + +### Environment Variables + +The following environment variables can be configured: + +- `LLAMASTACK_PORT`: Port for the Llama Stack distribution server (default: `5001`) +- `GROQ_API_KEY`: Groq API Key (default: ``) + +### Models + +The following models are available by default: + +- `meta-llama/Llama-3.1-8B-Instruct (llama3-8b-8192)` +- `meta-llama/Llama-3.1-8B-Instruct (llama-3.1-8b-instant)` +- `meta-llama/Llama-3-70B-Instruct (llama3-70b-8192)` +- `meta-llama/Llama-3.3-70B-Instruct (llama-3.3-70b-versatile)` +- `meta-llama/Llama-3.2-3B-Instruct (llama-3.2-3b-preview)` + + +### Prerequisite: API Keys + +Make sure you have access to a Groq API Key. You can get one by visiting [Groq](https://api.groq.com/). + + +## Running Llama Stack with Groq + +You can do this via Conda (build code) or Docker which has a pre-built image. + +### Via Docker + +This method allows you to get started quickly without having to build the distribution code. + +```bash +LLAMA_STACK_PORT=5001 +docker run \ + -it \ + -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ + llamastack/distribution-groq \ + --port $LLAMA_STACK_PORT \ + --env GROQ_API_KEY=$GROQ_API_KEY +``` + +### Via Conda + +```bash +llama stack build --template groq --image-type conda +llama stack run ./run.yaml \ + --port $LLAMA_STACK_PORT \ + --env GROQ_API_KEY=$GROQ_API_KEY +``` diff --git a/llama_stack/providers/remote/inference/groq/config.py b/llama_stack/providers/remote/inference/groq/config.py index cb2619437..6b221478c 100644 --- a/llama_stack/providers/remote/inference/groq/config.py +++ b/llama_stack/providers/remote/inference/groq/config.py @@ -4,7 +4,7 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. -from typing import Optional +from typing import Any, Dict, Optional from pydantic import BaseModel, Field @@ -18,3 +18,15 @@ class GroqConfig(BaseModel): default=None, description="The Groq API key", ) + + url: str = Field( + default="https://api.groq.com", + description="The URL for the Groq AI server", + ) + + @classmethod + def sample_run_config(cls, **kwargs) -> Dict[str, Any]: + return { + "url": "https://api.groq.com", + "api_key": "${env.GROQ_API_KEY}", + } diff --git a/llama_stack/providers/remote/inference/groq/groq.py b/llama_stack/providers/remote/inference/groq/groq.py index 45c15a467..2c9fab614 100644 --- a/llama_stack/providers/remote/inference/groq/groq.py +++ b/llama_stack/providers/remote/inference/groq/groq.py @@ -29,17 +29,10 @@ from llama_stack.apis.inference import ( ToolConfig, ) from llama_stack.distribution.request_headers import NeedsRequestProviderData -from llama_stack.models.llama.datatypes import ( - SamplingParams, - ToolDefinition, - ToolPromptFormat, -) -from llama_stack.models.llama.sku_list import CoreModelId +from llama_stack.models.llama.datatypes import SamplingParams, ToolDefinition, ToolPromptFormat from llama_stack.providers.remote.inference.groq.config import GroqConfig from llama_stack.providers.utils.inference.model_registry import ( ModelRegistryHelper, - build_hf_repo_model_entry, - build_model_entry, ) from .groq_utils import ( @@ -47,33 +40,7 @@ from .groq_utils import ( convert_chat_completion_response, convert_chat_completion_response_stream, ) - -_MODEL_ENTRIES = [ - build_hf_repo_model_entry( - "llama3-8b-8192", - CoreModelId.llama3_1_8b_instruct.value, - ), - build_model_entry( - "llama-3.1-8b-instant", - CoreModelId.llama3_1_8b_instruct.value, - ), - build_hf_repo_model_entry( - "llama3-70b-8192", - CoreModelId.llama3_70b_instruct.value, - ), - build_hf_repo_model_entry( - "llama-3.3-70b-versatile", - CoreModelId.llama3_3_70b_instruct.value, - ), - # Groq only contains a preview version for llama-3.2-3b - # Preview models aren't recommended for production use, but we include this one - # to pass the test fixture - # TODO(aidand): Replace this with a stable model once Groq supports it - build_hf_repo_model_entry( - "llama-3.2-3b-preview", - CoreModelId.llama3_2_3b_instruct.value, - ), -] +from .models import _MODEL_ENTRIES class GroqInferenceAdapter(Inference, ModelRegistryHelper, NeedsRequestProviderData): diff --git a/llama_stack/providers/remote/inference/groq/models.py b/llama_stack/providers/remote/inference/groq/models.py new file mode 100644 index 000000000..54ca2e839 --- /dev/null +++ b/llama_stack/providers/remote/inference/groq/models.py @@ -0,0 +1,35 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +from llama_stack.models.llama.sku_list import CoreModelId +from llama_stack.providers.utils.inference.model_registry import build_model_entry + +_MODEL_ENTRIES = [ + build_model_entry( + "llama3-8b-8192", + CoreModelId.llama3_1_8b_instruct.value, + ), + build_model_entry( + "llama-3.1-8b-instant", + CoreModelId.llama3_1_8b_instruct.value, + ), + build_model_entry( + "llama3-70b-8192", + CoreModelId.llama3_70b_instruct.value, + ), + build_model_entry( + "llama-3.3-70b-versatile", + CoreModelId.llama3_3_70b_instruct.value, + ), + # Groq only contains a preview version for llama-3.2-3b + # Preview models aren't recommended for production use, but we include this one + # to pass the test fixture + # TODO(aidand): Replace this with a stable model once Groq supports it + build_model_entry( + "llama-3.2-3b-preview", + CoreModelId.llama3_2_3b_instruct.value, + ), +] diff --git a/llama_stack/templates/groq/__init__.py b/llama_stack/templates/groq/__init__.py new file mode 100644 index 000000000..02a39601d --- /dev/null +++ b/llama_stack/templates/groq/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +from .groq import get_distribution_template # noqa: F401 diff --git a/llama_stack/templates/groq/build.yaml b/llama_stack/templates/groq/build.yaml new file mode 100644 index 000000000..3263ce83b --- /dev/null +++ b/llama_stack/templates/groq/build.yaml @@ -0,0 +1,29 @@ +version: '2' +distribution_spec: + description: Use Groq for running LLM inference + providers: + inference: + - remote::groq + vector_io: + - inline::faiss + safety: + - inline::llama-guard + agents: + - inline::meta-reference + telemetry: + - inline::meta-reference + eval: + - inline::meta-reference + datasetio: + - remote::huggingface + - inline::localfs + scoring: + - inline::basic + - inline::llm-as-judge + - inline::braintrust + tool_runtime: + - remote::brave-search + - remote::tavily-search + - inline::code-interpreter + - inline::rag-runtime +image_type: conda diff --git a/llama_stack/templates/groq/doc_template.md b/llama_stack/templates/groq/doc_template.md new file mode 100644 index 000000000..3f9ccbd16 --- /dev/null +++ b/llama_stack/templates/groq/doc_template.md @@ -0,0 +1,68 @@ +--- +orphan: true +--- +# Groq Distribution + +```{toctree} +:maxdepth: 2 +:hidden: + +self +``` + +The `llamastack/distribution-{{ name }}` distribution consists of the following provider configurations. + +{{ providers_table }} + +{% if run_config_env_vars %} +### Environment Variables + +The following environment variables can be configured: + +{% for var, (default_value, description) in run_config_env_vars.items() %} +- `{{ var }}`: {{ description }} (default: `{{ default_value }}`) +{% endfor %} +{% endif %} + +{% if default_models %} +### Models + +The following models are available by default: + +{% for model in default_models %} +- `{{ model.model_id }} ({{ model.provider_model_id }})` +{% endfor %} +{% endif %} + + +### Prerequisite: API Keys + +Make sure you have access to a Groq API Key. You can get one by visiting [Groq](https://api.groq.com/). + + +## Running Llama Stack with Groq + +You can do this via Conda (build code) or Docker which has a pre-built image. + +### Via Docker + +This method allows you to get started quickly without having to build the distribution code. + +```bash +LLAMA_STACK_PORT=5001 +docker run \ + -it \ + -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ + llamastack/distribution-{{ name }} \ + --port $LLAMA_STACK_PORT \ + --env GROQ_API_KEY=$GROQ_API_KEY +``` + +### Via Conda + +```bash +llama stack build --template groq --image-type conda +llama stack run ./run.yaml \ + --port $LLAMA_STACK_PORT \ + --env GROQ_API_KEY=$GROQ_API_KEY +``` diff --git a/llama_stack/templates/groq/groq.py b/llama_stack/templates/groq/groq.py new file mode 100644 index 000000000..9e25f02cb --- /dev/null +++ b/llama_stack/templates/groq/groq.py @@ -0,0 +1,121 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +from pathlib import Path + +from llama_stack.apis.models.models import ModelType +from llama_stack.distribution.datatypes import ( + ModelInput, + Provider, + ToolGroupInput, +) +from llama_stack.models.llama.sku_list import all_registered_models +from llama_stack.providers.inline.inference.sentence_transformers import ( + SentenceTransformersInferenceConfig, +) +from llama_stack.providers.inline.vector_io.faiss.config import FaissVectorIOConfig +from llama_stack.providers.remote.inference.groq import GroqConfig +from llama_stack.providers.remote.inference.groq.models import _MODEL_ENTRIES +from llama_stack.templates.template import DistributionTemplate, RunConfigSettings + + +def get_distribution_template() -> DistributionTemplate: + providers = { + "inference": ["remote::groq"], + "vector_io": ["inline::faiss"], + "safety": ["inline::llama-guard"], + "agents": ["inline::meta-reference"], + "telemetry": ["inline::meta-reference"], + "eval": ["inline::meta-reference"], + "datasetio": ["remote::huggingface", "inline::localfs"], + "scoring": ["inline::basic", "inline::llm-as-judge", "inline::braintrust"], + "tool_runtime": [ + "remote::brave-search", + "remote::tavily-search", + "inline::code-interpreter", + "inline::rag-runtime", + ], + } + name = "groq" + + inference_provider = Provider( + provider_id=name, + provider_type=f"remote::{name}", + config=GroqConfig.sample_run_config(), + ) + + embedding_provider = Provider( + provider_id="sentence-transformers", + provider_type="inline::sentence-transformers", + config=SentenceTransformersInferenceConfig.sample_run_config(), + ) + vector_io_provider = Provider( + provider_id="faiss", + provider_type="inline::faiss", + config=FaissVectorIOConfig.sample_run_config(f"distributions/{name}"), + ) + embedding_model = ModelInput( + model_id="all-MiniLM-L6-v2", + provider_id="sentence-transformers", + model_type=ModelType.embedding, + metadata={ + "embedding_dimension": 384, + }, + ) + + core_model_to_hf_repo = {m.descriptor(): m.huggingface_repo for m in all_registered_models()} + default_models = [ + ModelInput( + model_id=core_model_to_hf_repo[m.llama_model], + provider_model_id=m.provider_model_id, + provider_id=name, + ) + for m in _MODEL_ENTRIES + ] + + default_tool_groups = [ + ToolGroupInput( + toolgroup_id="builtin::websearch", + provider_id="tavily-search", + ), + ToolGroupInput( + toolgroup_id="builtin::rag", + provider_id="rag-runtime", + ), + ToolGroupInput( + toolgroup_id="builtin::code_interpreter", + provider_id="code-interpreter", + ), + ] + + return DistributionTemplate( + name=name, + distro_type="self_hosted", + description="Use Groq for running LLM inference", + docker_image=None, + template_path=Path(__file__).parent / "doc_template.md", + providers=providers, + default_models=default_models, + run_configs={ + "run.yaml": RunConfigSettings( + provider_overrides={ + "inference": [inference_provider, embedding_provider], + }, + default_models=default_models + [embedding_model], + default_tool_groups=default_tool_groups, + ), + }, + run_config_env_vars={ + "LLAMASTACK_PORT": ( + "5001", + "Port for the Llama Stack distribution server", + ), + "GROQ_API_KEY": ( + "", + "Groq API Key", + ), + }, + ) diff --git a/llama_stack/templates/groq/run.yaml b/llama_stack/templates/groq/run.yaml new file mode 100644 index 000000000..218514cf6 --- /dev/null +++ b/llama_stack/templates/groq/run.yaml @@ -0,0 +1,136 @@ +version: '2' +image_name: groq +apis: +- agents +- datasetio +- eval +- inference +- safety +- scoring +- telemetry +- tool_runtime +- vector_io +providers: + inference: + - provider_id: groq + provider_type: remote::groq + config: + url: https://api.groq.com + api_key: ${env.GROQ_API_KEY} + - provider_id: sentence-transformers + provider_type: inline::sentence-transformers + config: {} + vector_io: + - provider_id: faiss + provider_type: inline::faiss + config: + kvstore: + type: sqlite + namespace: null + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/groq}/faiss_store.db + safety: + - provider_id: llama-guard + provider_type: inline::llama-guard + config: {} + agents: + - provider_id: meta-reference + provider_type: inline::meta-reference + config: + persistence_store: + type: sqlite + namespace: null + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/groq}/agents_store.db + telemetry: + - provider_id: meta-reference + provider_type: inline::meta-reference + config: + service_name: ${env.OTEL_SERVICE_NAME:llama-stack} + sinks: ${env.TELEMETRY_SINKS:console,sqlite} + sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/groq/trace_store.db} + eval: + - provider_id: meta-reference + provider_type: inline::meta-reference + config: {} + datasetio: + - provider_id: huggingface + provider_type: remote::huggingface + config: {} + - provider_id: localfs + provider_type: inline::localfs + config: {} + scoring: + - provider_id: basic + provider_type: inline::basic + config: {} + - provider_id: llm-as-judge + provider_type: inline::llm-as-judge + config: {} + - provider_id: braintrust + provider_type: inline::braintrust + config: + openai_api_key: ${env.OPENAI_API_KEY:} + tool_runtime: + - provider_id: brave-search + provider_type: remote::brave-search + config: + api_key: ${env.BRAVE_SEARCH_API_KEY:} + max_results: 3 + - provider_id: tavily-search + provider_type: remote::tavily-search + config: + api_key: ${env.TAVILY_SEARCH_API_KEY:} + max_results: 3 + - provider_id: code-interpreter + provider_type: inline::code-interpreter + config: {} + - provider_id: rag-runtime + provider_type: inline::rag-runtime + config: {} +metadata_store: + type: sqlite + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/groq}/registry.db +models: +- metadata: {} + model_id: meta-llama/Llama-3.1-8B-Instruct + provider_id: groq + provider_model_id: llama3-8b-8192 + model_type: llm +- metadata: {} + model_id: meta-llama/Llama-3.1-8B-Instruct + provider_id: groq + provider_model_id: llama-3.1-8b-instant + model_type: llm +- metadata: {} + model_id: meta-llama/Llama-3-70B-Instruct + provider_id: groq + provider_model_id: llama3-70b-8192 + model_type: llm +- metadata: {} + model_id: meta-llama/Llama-3.3-70B-Instruct + provider_id: groq + provider_model_id: llama-3.3-70b-versatile + model_type: llm +- metadata: {} + model_id: meta-llama/Llama-3.2-3B-Instruct + provider_id: groq + provider_model_id: llama-3.2-3b-preview + model_type: llm +- metadata: + embedding_dimension: 384 + model_id: all-MiniLM-L6-v2 + provider_id: sentence-transformers + model_type: embedding +shields: [] +vector_dbs: [] +datasets: [] +scoring_fns: [] +benchmarks: [] +tool_groups: +- toolgroup_id: builtin::websearch + provider_id: tavily-search +- toolgroup_id: builtin::rag + provider_id: rag-runtime +- toolgroup_id: builtin::code_interpreter + provider_id: code-interpreter +server: + port: 8321