diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 3707d4671..89064b692 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -57,3 +57,17 @@ repos: # hooks: # - id: markdown-link-check # args: ['--quiet'] + +# - repo: local +# hooks: +# - id: distro-codegen +# name: Distribution Template Codegen +# additional_dependencies: +# - rich +# - pydantic +# entry: python -m llama_stack.scripts.distro_codegen +# language: python +# pass_filenames: false +# require_serial: true +# files: ^llama_stack/templates/.*$ +# stages: [manual] diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7e05c683a..4713f564a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -12,6 +12,11 @@ We actively welcome your pull requests. 5. Make sure your code lints. 6. If you haven't already, complete the Contributor License Agreement ("CLA"). + +### Updating Provider Configurations + +If you have made changes to a provider's configuration in any form (introducing a new config key, or changing models, etc.), you should run `python llama_stack/scripts/distro_codegen.py` to re-generate various YAML files as well as the documentation. You should not change `docs/source/.../distributions/` files manually as they are auto-generated. + ### Building the Documentation If you are making changes to the documentation at [https://llama-stack.readthedocs.io/en/latest/](https://llama-stack.readthedocs.io/en/latest/), you can use the following command to build the documentation and preview your changes. You will need [Sphinx](https://www.sphinx-doc.org/en/master/) and the readthedocs theme. @@ -26,6 +31,19 @@ make html sphinx-autobuild source build/html ``` +## Pre-commit Hooks + +We use [pre-commit](https://pre-commit.com/) to run linting and formatting checks on your code. You can install the pre-commit hooks by running: + +```bash +$ cd llama-stack +$ conda activate +$ pip install pre-commit +$ pre-commit install +``` + +After that, pre-commit hooks will run automatically before each commit. + ## Contributor License Agreement ("CLA") In order to accept your pull request, we need you to submit a CLA. You only need to do this once to work on any of Meta's open source projects. diff --git a/MANIFEST.in b/MANIFEST.in index 0517b86a8..27cb775f7 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,4 +1,4 @@ include requirements.txt include llama_stack/distribution/*.sh include llama_stack/cli/scripts/*.sh -include llama_stack/templates/*/build.yaml +include llama_stack/templates/*/*.yaml diff --git a/README.md b/README.md index 593690740..bd2364f6f 100644 --- a/README.md +++ b/README.md @@ -112,7 +112,7 @@ Please checkout our [Documentations](https://llama-stack.readthedocs.io/en/lates | Python | [llama-stack-client-python](https://github.com/meta-llama/llama-stack-client-python) | [![PyPI version](https://img.shields.io/pypi/v/llama_stack_client.svg)](https://pypi.org/project/llama_stack_client/) | Swift | [llama-stack-client-swift](https://github.com/meta-llama/llama-stack-client-swift) | [![Swift Package Index](https://img.shields.io/endpoint?url=https%3A%2F%2Fswiftpackageindex.com%2Fapi%2Fpackages%2Fmeta-llama%2Fllama-stack-client-swift%2Fbadge%3Ftype%3Dswift-versions)](https://swiftpackageindex.com/meta-llama/llama-stack-client-swift) | Node | [llama-stack-client-node](https://github.com/meta-llama/llama-stack-client-node) | [![NPM version](https://img.shields.io/npm/v/llama-stack-client.svg)](https://npmjs.org/package/llama-stack-client) -| Kotlin | [llama-stack-client-kotlin](https://github.com/meta-llama/llama-stack-client-kotlin) | +| Kotlin | [llama-stack-client-kotlin](https://github.com/meta-llama/llama-stack-client-kotlin) | [![Maven version](https://img.shields.io/maven-central/v/com.llama.llamastack/llama-stack-client-kotlin)](https://central.sonatype.com/artifact/com.llama.llamastack/llama-stack-client-kotlin) Check out our client SDKs for connecting to Llama Stack server in your preferred language, you can choose from [python](https://github.com/meta-llama/llama-stack-client-python), [node](https://github.com/meta-llama/llama-stack-client-node), [swift](https://github.com/meta-llama/llama-stack-client-swift), and [kotlin](https://github.com/meta-llama/llama-stack-client-kotlin) programming languages to quickly build your applications. diff --git a/distributions/bedrock/run.yaml b/distributions/bedrock/run.yaml index 45e8aa7b5..2f7cb36ef 100644 --- a/distributions/bedrock/run.yaml +++ b/distributions/bedrock/run.yaml @@ -1,5 +1,4 @@ version: '2' -built_at: '2024-11-01T17:40:45.325529' image_name: local name: bedrock docker_image: null diff --git a/distributions/dell-tgi/run.yaml b/distributions/dell-tgi/run.yaml index 4b7b331fe..3f8a98779 100644 --- a/distributions/dell-tgi/run.yaml +++ b/distributions/dell-tgi/run.yaml @@ -1,5 +1,4 @@ version: '2' -built_at: '2024-10-08T17:40:45.325529' image_name: local docker_image: null conda_env: local diff --git a/distributions/fireworks/run.yaml b/distributions/fireworks/run.yaml deleted file mode 100644 index d2903aabb..000000000 --- a/distributions/fireworks/run.yaml +++ /dev/null @@ -1,51 +0,0 @@ -version: '2' -built_at: '2024-10-08T17:40:45.325529' -image_name: local -docker_image: null -conda_env: local -apis: -- shields -- agents -- models -- memory -- memory_banks -- inference -- safety -providers: - inference: - - provider_id: fireworks0 - provider_type: remote::fireworks - config: - url: https://api.fireworks.ai/inference - # api_key: - safety: - safety: - - provider_id: meta0 - provider_type: inline::llama-guard - config: - model: Llama-Guard-3-1B - excluded_categories: [] - - provider_id: meta1 - provider_type: inline::prompt-guard - config: - model: Prompt-Guard-86M - memory: - - provider_id: meta0 - provider_type: inline::meta-reference - config: {} - # Uncomment to use weaviate memory provider - # - provider_id: weaviate0 - # provider_type: remote::weaviate - # config: {} - agents: - - provider_id: meta0 - provider_type: inline::meta-reference - config: - persistence_store: - namespace: null - type: sqlite - db_path: ~/.llama/runtime/kvstore.db - telemetry: - - provider_id: meta0 - provider_type: inline::meta-reference - config: {} diff --git a/distributions/fireworks/run.yaml b/distributions/fireworks/run.yaml new file mode 120000 index 000000000..532e0e2a8 --- /dev/null +++ b/distributions/fireworks/run.yaml @@ -0,0 +1 @@ +../../llama_stack/templates/fireworks/run.yaml \ No newline at end of file diff --git a/distributions/inline-vllm/run.yaml b/distributions/inline-vllm/run.yaml index b998727c0..f42c942a3 100644 --- a/distributions/inline-vllm/run.yaml +++ b/distributions/inline-vllm/run.yaml @@ -1,5 +1,4 @@ version: '2' -built_at: '2024-10-08T17:40:45.325529' image_name: local docker_image: null conda_env: local diff --git a/distributions/meta-reference-gpu/run-with-safety.yaml b/distributions/meta-reference-gpu/run-with-safety.yaml new file mode 120000 index 000000000..4c5483425 --- /dev/null +++ b/distributions/meta-reference-gpu/run-with-safety.yaml @@ -0,0 +1 @@ +../../llama_stack/templates/meta-reference-gpu/run-with-safety.yaml \ No newline at end of file diff --git a/distributions/meta-reference-gpu/run.yaml b/distributions/meta-reference-gpu/run.yaml deleted file mode 100644 index 13d3787e1..000000000 --- a/distributions/meta-reference-gpu/run.yaml +++ /dev/null @@ -1,69 +0,0 @@ -version: '2' -built_at: '2024-10-08T17:40:45.325529' -image_name: local -docker_image: null -conda_env: local -apis: -- shields -- agents -- models -- memory -- memory_banks -- inference -- safety -providers: - inference: - - provider_id: inference0 - provider_type: inline::meta-reference - config: - model: Llama3.2-3B-Instruct - quantization: null - torch_seed: null - max_seq_len: 4096 - max_batch_size: 1 - - provider_id: inference1 - provider_type: inline::meta-reference - config: - model: Llama-Guard-3-1B - quantization: null - torch_seed: null - max_seq_len: 2048 - max_batch_size: 1 - safety: - - provider_id: meta0 - provider_type: inline::llama-guard - config: - model: Llama-Guard-3-1B - excluded_categories: [] - - provider_id: meta1 - provider_type: inline::prompt-guard - config: - model: Prompt-Guard-86M -# Uncomment to use prompt guard -# prompt_guard_shield: -# model: Prompt-Guard-86M - memory: - - provider_id: meta0 - provider_type: inline::meta-reference - config: {} - # Uncomment to use pgvector - # - provider_id: pgvector - # provider_type: remote::pgvector - # config: - # host: 127.0.0.1 - # port: 5432 - # db: postgres - # user: postgres - # password: mysecretpassword - agents: - - provider_id: meta0 - provider_type: inline::meta-reference - config: - persistence_store: - namespace: null - type: sqlite - db_path: ~/.llama/runtime/agents_store.db - telemetry: - - provider_id: meta0 - provider_type: inline::meta-reference - config: {} diff --git a/distributions/meta-reference-gpu/run.yaml b/distributions/meta-reference-gpu/run.yaml new file mode 120000 index 000000000..d680186ab --- /dev/null +++ b/distributions/meta-reference-gpu/run.yaml @@ -0,0 +1 @@ +../../llama_stack/templates/meta-reference-gpu/run.yaml \ No newline at end of file diff --git a/distributions/meta-reference-quantized-gpu/run.yaml b/distributions/meta-reference-quantized-gpu/run.yaml index d5012852d..19c726b09 100644 --- a/distributions/meta-reference-quantized-gpu/run.yaml +++ b/distributions/meta-reference-quantized-gpu/run.yaml @@ -1,5 +1,4 @@ version: '2' -built_at: '2024-10-08T17:40:45.325529' image_name: local docker_image: null conda_env: local diff --git a/distributions/ollama-gpu/run.yaml b/distributions/ollama-gpu/run.yaml index c702b878e..25471c69f 100644 --- a/distributions/ollama-gpu/run.yaml +++ b/distributions/ollama-gpu/run.yaml @@ -1,5 +1,4 @@ version: '2' -built_at: '2024-10-08T17:40:45.325529' image_name: local docker_image: null conda_env: local @@ -13,20 +12,15 @@ apis: - safety providers: inference: - - provider_id: ollama0 + - provider_id: ollama provider_type: remote::ollama config: - url: http://127.0.0.1:14343 + url: ${env.OLLAMA_URL:http://127.0.0.1:11434} safety: - provider_id: meta0 provider_type: inline::llama-guard config: - model: Llama-Guard-3-1B excluded_categories: [] - - provider_id: meta1 - provider_type: inline::prompt-guard - config: - model: Prompt-Guard-86M memory: - provider_id: meta0 provider_type: inline::meta-reference @@ -43,3 +37,10 @@ providers: - provider_id: meta0 provider_type: inline::meta-reference config: {} +models: + - model_id: ${env.INFERENCE_MODEL:Llama3.2-3B-Instruct} + provider_id: ollama + - model_id: ${env.SAFETY_MODEL:Llama-Guard-3-1B} + provider_id: ollama +shields: + - shield_id: ${env.SAFETY_MODEL:Llama-Guard-3-1B} diff --git a/distributions/ollama/compose.yaml b/distributions/ollama/compose.yaml index dc51d4759..176f19d6b 100644 --- a/distributions/ollama/compose.yaml +++ b/distributions/ollama/compose.yaml @@ -1,30 +1,71 @@ services: ollama: image: ollama/ollama:latest - network_mode: "host" + network_mode: ${NETWORK_MODE:-bridge} volumes: - - ollama:/root/.ollama # this solution synchronizes with the docker volume and loads the model rocket fast + - ~/.ollama:/root/.ollama ports: - "11434:11434" + environment: + OLLAMA_DEBUG: 1 command: [] + deploy: + resources: + limits: + memory: 8G # Set maximum memory + reservations: + memory: 8G # Set minimum memory reservation + # healthcheck: + # # ugh, no CURL in ollama image + # test: ["CMD", "curl", "-f", "http://ollama:11434"] + # interval: 10s + # timeout: 5s + # retries: 5 + + ollama-init: + image: ollama/ollama:latest + depends_on: + - ollama + # condition: service_healthy + network_mode: ${NETWORK_MODE:-bridge} + environment: + - OLLAMA_HOST=ollama + - INFERENCE_MODEL=${INFERENCE_MODEL} + - SAFETY_MODEL=${SAFETY_MODEL:-} + volumes: + - ~/.ollama:/root/.ollama + - ./pull-models.sh:/pull-models.sh + entrypoint: ["/pull-models.sh"] + llamastack: depends_on: - - ollama - image: llamastack/distribution-ollama - network_mode: "host" + ollama: + condition: service_started + ollama-init: + condition: service_started + image: ${LLAMA_STACK_IMAGE:-llamastack/distribution-ollama} + network_mode: ${NETWORK_MODE:-bridge} volumes: - ~/.llama:/root/.llama # Link to ollama run.yaml file - - ./run.yaml:/root/my-run.yaml + - ~/local/llama-stack/:/app/llama-stack-source + - ./run${SAFETY_MODEL:+-with-safety}.yaml:/root/my-run.yaml ports: - - "5000:5000" - # Hack: wait for ollama server to start before starting docker - entrypoint: bash -c "sleep 60; python -m llama_stack.distribution.server.server --yaml_config /root/my-run.yaml" + - "${LLAMA_STACK_PORT:-5001}:${LLAMA_STACK_PORT:-5001}" + environment: + - INFERENCE_MODEL=${INFERENCE_MODEL} + - SAFETY_MODEL=${SAFETY_MODEL:-} + - OLLAMA_URL=http://ollama:11434 + entrypoint: > + python -m llama_stack.distribution.server.server /root/my-run.yaml \ + --port ${LLAMA_STACK_PORT:-5001} deploy: restart_policy: condition: on-failure - delay: 3s - max_attempts: 5 + delay: 10s + max_attempts: 3 window: 60s volumes: ollama: + ollama-init: + llamastack: diff --git a/distributions/ollama/pull-models.sh b/distributions/ollama/pull-models.sh new file mode 100755 index 000000000..fb5bf8a4a --- /dev/null +++ b/distributions/ollama/pull-models.sh @@ -0,0 +1,18 @@ +#!/bin/sh + +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +echo "Preloading (${INFERENCE_MODEL}, ${SAFETY_MODEL})..." +for model in ${INFERENCE_MODEL} ${SAFETY_MODEL}; do + echo "Preloading $model..." + if ! ollama run "$model"; then + echo "Failed to pull and run $model" + exit 1 + fi +done + +echo "All models pulled successfully" diff --git a/distributions/ollama/run-with-safety.yaml b/distributions/ollama/run-with-safety.yaml new file mode 120000 index 000000000..5695b49e7 --- /dev/null +++ b/distributions/ollama/run-with-safety.yaml @@ -0,0 +1 @@ +../../llama_stack/templates/ollama/run-with-safety.yaml \ No newline at end of file diff --git a/distributions/ollama/run.yaml b/distributions/ollama/run.yaml deleted file mode 100644 index c702b878e..000000000 --- a/distributions/ollama/run.yaml +++ /dev/null @@ -1,45 +0,0 @@ -version: '2' -built_at: '2024-10-08T17:40:45.325529' -image_name: local -docker_image: null -conda_env: local -apis: -- shields -- agents -- models -- memory -- memory_banks -- inference -- safety -providers: - inference: - - provider_id: ollama0 - provider_type: remote::ollama - config: - url: http://127.0.0.1:14343 - safety: - - provider_id: meta0 - provider_type: inline::llama-guard - config: - model: Llama-Guard-3-1B - excluded_categories: [] - - provider_id: meta1 - provider_type: inline::prompt-guard - config: - model: Prompt-Guard-86M - memory: - - provider_id: meta0 - provider_type: inline::meta-reference - config: {} - agents: - - provider_id: meta0 - provider_type: inline::meta-reference - config: - persistence_store: - namespace: null - type: sqlite - db_path: ~/.llama/runtime/kvstore.db - telemetry: - - provider_id: meta0 - provider_type: inline::meta-reference - config: {} diff --git a/distributions/ollama/run.yaml b/distributions/ollama/run.yaml new file mode 120000 index 000000000..b008b1bf4 --- /dev/null +++ b/distributions/ollama/run.yaml @@ -0,0 +1 @@ +../../llama_stack/templates/ollama/run.yaml \ No newline at end of file diff --git a/distributions/remote-vllm/compose.yaml b/distributions/remote-vllm/compose.yaml index 90d58a2af..09701e099 100644 --- a/distributions/remote-vllm/compose.yaml +++ b/distributions/remote-vllm/compose.yaml @@ -1,33 +1,28 @@ -# NOTES: -# -# This Docker Compose (and the associated run.yaml) assumes you will be -# running in the default "bridged" network mode. -# -# If you need "host" network mode, please uncomment -# - network_mode: "host" -# -# Similarly change "host.docker.internal" to "localhost" in the run.yaml file -# services: - vllm-0: + vllm-inference: image: vllm/vllm-openai:latest volumes: - $HOME/.cache/huggingface:/root/.cache/huggingface - # network_mode: "host" + network_mode: ${NETWORK_MODE:-bridged} ports: - - "5100:5100" + - "${VLLM_INFERENCE_PORT:-5100}:${VLLM_INFERENCE_PORT:-5100}" devices: - nvidia.com/gpu=all environment: - - CUDA_VISIBLE_DEVICES=0 + - CUDA_VISIBLE_DEVICES=${VLLM_INFERENCE_GPU:-0} - HUGGING_FACE_HUB_TOKEN=$HF_TOKEN command: > --gpu-memory-utilization 0.75 - --model meta-llama/Llama-3.1-8B-Instruct + --model ${VLLM_INFERENCE_MODEL:-meta-llama/Llama-3.2-3B-Instruct} --enforce-eager --max-model-len 8192 --max-num-seqs 16 - --port 5100 + --port ${VLLM_INFERENCE_PORT:-5100} + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:${VLLM_INFERENCE_PORT:-5100}/v1/health"] + interval: 30s + timeout: 10s + retries: 5 deploy: resources: reservations: @@ -35,25 +30,34 @@ services: - driver: nvidia capabilities: [gpu] runtime: nvidia - vllm-1: + + # A little trick: + # if VLLM_SAFETY_MODEL is set, we will create a service for the safety model + # otherwise, the entry will end in a hyphen which gets ignored by docker compose + vllm-${VLLM_SAFETY_MODEL:+safety}: image: vllm/vllm-openai:latest volumes: - $HOME/.cache/huggingface:/root/.cache/huggingface - # network_mode: "host" + network_mode: ${NETWORK_MODE:-bridged} ports: - - "5101:5101" + - "${VLLM_SAFETY_PORT:-5101}:${VLLM_SAFETY_PORT:-5101}" devices: - nvidia.com/gpu=all environment: - - CUDA_VISIBLE_DEVICES=1 + - CUDA_VISIBLE_DEVICES=${VLLM_SAFETY_GPU:-1} - HUGGING_FACE_HUB_TOKEN=$HF_TOKEN command: > --gpu-memory-utilization 0.75 - --model meta-llama/Llama-Guard-3-1B + --model ${VLLM_SAFETY_MODEL} --enforce-eager --max-model-len 8192 --max-num-seqs 16 - --port 5101 + --port ${VLLM_SAFETY_PORT:-5101} + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:${VLLM_SAFETY_PORT:-5101}/v1/health"] + interval: 30s + timeout: 10s + retries: 5 deploy: resources: reservations: @@ -63,23 +67,25 @@ services: runtime: nvidia llamastack: depends_on: - - vllm-0 - - vllm-1 - # image: llamastack/distribution-remote-vllm + - vllm-inference: + condition: service_healthy + - vllm-${VLLM_SAFETY_MODEL:+safety}: + condition: service_healthy + # image: llamastack/distribution-remote-vllm image: llamastack/distribution-remote-vllm:test-0.0.52rc3 volumes: - ~/.llama:/root/.llama - - ~/local/llama-stack/distributions/remote-vllm/run.yaml:/root/llamastack-run-remote-vllm.yaml - # network_mode: "host" + - ./run${VLLM_SAFETY_MODEL:+-with-safety}.yaml:/root/llamastack-run-remote-vllm.yaml + network_mode: ${NETWORK_MODE:-bridged} environment: - - LLAMA_INFERENCE_VLLM_URL=${LLAMA_INFERENCE_VLLM_URL:-http://host.docker.internal:5100/v1} - - LLAMA_INFERENCE_MODEL=${LLAMA_INFERENCE_MODEL:-Llama3.1-8B-Instruct} + - VLLM_URL=http://vllm-inference:${VLLM_INFERENCE_PORT:-5100}/v1 + - VLLM_SAFETY_URL=http://vllm-safety:${VLLM_SAFETY_PORT:-5101}/v1 + - INFERENCE_MODEL=${INFERENCE_MODEL:-meta-llama/Llama-3.2-3B-Instruct} - MAX_TOKENS=${MAX_TOKENS:-4096} - SQLITE_STORE_DIR=${SQLITE_STORE_DIR:-$HOME/.llama/distributions/remote-vllm} - - LLAMA_SAFETY_VLLM_URL=${LLAMA_SAFETY_VLLM_URL:-http://host.docker.internal:5101/v1} - - LLAMA_SAFETY_MODEL=${LLAMA_SAFETY_MODEL:-Llama-Guard-3-1B} + - SAFETY_MODEL=${SAFETY_MODEL:-meta-llama/Llama-Guard-3-1B} ports: - - "5001:5001" + - "${LLAMASTACK_PORT:-5001}:${LLAMASTACK_PORT:-5001}" # Hack: wait for vLLM server to start before starting docker entrypoint: bash -c "sleep 60; python -m llama_stack.distribution.server.server --yaml_config /root/llamastack-run-remote-vllm.yaml --port 5001" deploy: @@ -89,6 +95,6 @@ services: max_attempts: 5 window: 60s volumes: - vllm-0: - vllm-1: + vllm-inference: + vllm-safety: llamastack: diff --git a/distributions/remote-vllm/run-with-safety.yaml b/distributions/remote-vllm/run-with-safety.yaml new file mode 120000 index 000000000..b2c3c36da --- /dev/null +++ b/distributions/remote-vllm/run-with-safety.yaml @@ -0,0 +1 @@ +../../llama_stack/templates/remote-vllm/run-with-safety.yaml \ No newline at end of file diff --git a/distributions/remote-vllm/run.yaml b/distributions/remote-vllm/run.yaml deleted file mode 100644 index eae5b8a6f..000000000 --- a/distributions/remote-vllm/run.yaml +++ /dev/null @@ -1,68 +0,0 @@ -version: '2' -built_at: '2024-11-11T20:09:45.988375' -image_name: remote-vllm -docker_image: remote-vllm -conda_env: null -apis: -- inference -- memory -- safety -- agents -- telemetry -providers: - inference: - # serves main inference model - - provider_id: vllm-0 - provider_type: remote::vllm - config: - # NOTE: replace with "localhost" if you are running in "host" network mode - url: ${env.LLAMA_INFERENCE_VLLM_URL:http://host.docker.internal:5100/v1} - max_tokens: ${env.MAX_TOKENS:4096} - api_token: fake - # serves safety llama_guard model - - provider_id: vllm-1 - provider_type: remote::vllm - config: - # NOTE: replace with "localhost" if you are running in "host" network mode - url: ${env.LLAMA_SAFETY_VLLM_URL:http://host.docker.internal:5101/v1} - max_tokens: ${env.MAX_TOKENS:4096} - api_token: fake - memory: - - provider_id: faiss-0 - provider_type: inline::faiss - config: - kvstore: - namespace: null - type: sqlite - db_path: "${env.SQLITE_STORE_DIR:/home/ashwin/.llama/distributions/remote-vllm}/faiss_store.db" - safety: - - provider_id: llama-guard - provider_type: inline::llama-guard - config: {} - memory: - - provider_id: meta0 - provider_type: inline::faiss - config: {} - agents: - - provider_id: meta0 - provider_type: inline::meta-reference - config: - persistence_store: - namespace: null - type: sqlite - db_path: "${env.SQLITE_STORE_DIR:/home/ashwin/.llama/distributions/remote-vllm}/agents_store.db" - telemetry: - - provider_id: meta0 - provider_type: inline::meta-reference - config: {} -metadata_store: - namespace: null - type: sqlite - db_path: "${env.SQLITE_STORE_DIR:/home/ashwin/.llama/distributions/remote-vllm}/registry.db" -models: - - model_id: ${env.LLAMA_INFERENCE_MODEL:Llama3.1-8B-Instruct} - provider_id: vllm-0 - - model_id: ${env.LLAMA_SAFETY_MODEL:Llama-Guard-3-1B} - provider_id: vllm-1 -shields: - - shield_id: ${env.LLAMA_SAFETY_MODEL:Llama-Guard-3-1B} diff --git a/distributions/remote-vllm/run.yaml b/distributions/remote-vllm/run.yaml new file mode 120000 index 000000000..ac70c0e6a --- /dev/null +++ b/distributions/remote-vllm/run.yaml @@ -0,0 +1 @@ +../../llama_stack/templates/remote-vllm/run.yaml \ No newline at end of file diff --git a/distributions/tgi/compose.yaml b/distributions/tgi/compose.yaml index bea7eb907..753b7880b 100644 --- a/distributions/tgi/compose.yaml +++ b/distributions/tgi/compose.yaml @@ -1,51 +1,89 @@ services: - text-generation-inference: + tgi-inference: image: ghcr.io/huggingface/text-generation-inference:latest - network_mode: "host" volumes: - $HOME/.cache/huggingface:/data + network_mode: ${NETWORK_MODE:-bridged} ports: - - "5009:5009" + - "${TGI_INFERENCE_PORT:-8080}:${TGI_INFERENCE_PORT:-8080}" devices: - nvidia.com/gpu=all environment: - - CUDA_VISIBLE_DEVICES=0 + - CUDA_VISIBLE_DEVICES=${TGI_INFERENCE_GPU:-0} + - HF_TOKEN=$HF_TOKEN - HF_HOME=/data - HF_DATASETS_CACHE=/data - HF_MODULES_CACHE=/data - HF_HUB_CACHE=/data - command: ["--dtype", "bfloat16", "--usage-stats", "on", "--sharded", "false", "--model-id", "meta-llama/Llama-3.1-8B-Instruct", "--port", "5009", "--cuda-memory-fraction", "0.3"] + command: > + --dtype bfloat16 + --usage-stats off + --sharded false + --model-id ${TGI_INFERENCE_MODEL:-meta-llama/Llama-3.2-3B-Instruct} + --port ${TGI_INFERENCE_PORT:-8080} + --cuda-memory-fraction 0.75 + healthcheck: + test: ["CMD", "curl", "-f", "http://tgi-inference:${TGI_INFERENCE_PORT:-8080}/health"] + interval: 5s + timeout: 5s + retries: 30 deploy: resources: reservations: devices: - driver: nvidia - # that's the closest analogue to --gpus; provide - # an integer amount of devices or 'all' - count: 1 - # Devices are reserved using a list of capabilities, making - # capabilities the only required field. A device MUST - # satisfy all the requested capabilities for a successful - # reservation. capabilities: [gpu] runtime: nvidia + + tgi-${TGI_SAFETY_MODEL:+safety}: + image: ghcr.io/huggingface/text-generation-inference:latest + volumes: + - $HOME/.cache/huggingface:/data + network_mode: ${NETWORK_MODE:-bridged} + ports: + - "${TGI_SAFETY_PORT:-8081}:${TGI_SAFETY_PORT:-8081}" + devices: + - nvidia.com/gpu=all + environment: + - CUDA_VISIBLE_DEVICES=${TGI_SAFETY_GPU:-1} + - HF_TOKEN=$HF_TOKEN + - HF_HOME=/data + - HF_DATASETS_CACHE=/data + - HF_MODULES_CACHE=/data + - HF_HUB_CACHE=/data + command: > + --dtype bfloat16 + --usage-stats off + --sharded false + --model-id ${TGI_SAFETY_MODEL:-meta-llama/Llama-Guard-3-1B} + --port ${TGI_SAFETY_PORT:-8081} + --cuda-memory-fraction 0.75 healthcheck: - test: ["CMD", "curl", "-f", "http://text-generation-inference:5009/health"] + test: ["CMD", "curl", "-f", "http://tgi-safety:${TGI_SAFETY_PORT:-8081}/health"] interval: 5s timeout: 5s retries: 30 + deploy: + resources: + reservations: + devices: + - driver: nvidia + capabilities: [gpu] + runtime: nvidia + llamastack: depends_on: - text-generation-inference: + tgi-inference: condition: service_healthy - image: llamastack/distribution-tgi - network_mode: "host" + tgi-${TGI_SAFETY_MODEL:+safety}: + condition: service_healthy + image: llamastack/distribution-tgi:test-0.0.52rc3 + network_mode: ${NETWORK_MODE:-bridged} volumes: - ~/.llama:/root/.llama - # Link to TGI run.yaml file - - ./run.yaml:/root/my-run.yaml + - ./run${TGI_SAFETY_MODEL:+-with-safety}.yaml:/root/my-run.yaml ports: - - "5000:5000" + - "${LLAMA_STACK_PORT:-5001}:${LLAMA_STACK_PORT:-5001}" # Hack: wait for TGI server to start before starting docker entrypoint: bash -c "sleep 60; python -m llama_stack.distribution.server.server --yaml_config /root/my-run.yaml" restart_policy: @@ -53,3 +91,13 @@ services: delay: 3s max_attempts: 5 window: 60s + environment: + - TGI_URL=http://tgi-inference:${TGI_INFERENCE_PORT:-8080} + - SAFETY_TGI_URL=http://tgi-safety:${TGI_SAFETY_PORT:-8081} + - INFERENCE_MODEL=${INFERENCE_MODEL:-meta-llama/Llama-3.2-3B-Instruct} + - SAFETY_MODEL=${SAFETY_MODEL:-meta-llama/Llama-Guard-3-1B} + +volumes: + tgi-inference: + tgi-safety: + llamastack: diff --git a/distributions/tgi/run-with-safety.yaml b/distributions/tgi/run-with-safety.yaml new file mode 120000 index 000000000..62d26708e --- /dev/null +++ b/distributions/tgi/run-with-safety.yaml @@ -0,0 +1 @@ +../../llama_stack/templates/tgi/run-with-safety.yaml \ No newline at end of file diff --git a/distributions/tgi/run.yaml b/distributions/tgi/run.yaml deleted file mode 100644 index 84ec536f8..000000000 --- a/distributions/tgi/run.yaml +++ /dev/null @@ -1,45 +0,0 @@ -version: '2' -built_at: '2024-10-08T17:40:45.325529' -image_name: local -docker_image: null -conda_env: local -apis: -- shields -- agents -- models -- memory -- memory_banks -- inference -- safety -providers: - inference: - - provider_id: tgi0 - provider_type: remote::tgi - config: - url: http://127.0.0.1:5009 - safety: - - provider_id: meta0 - provider_type: inline::llama-guard - config: - model: Llama-Guard-3-1B - excluded_categories: [] - - provider_id: meta1 - provider_type: inline::prompt-guard - config: - model: Prompt-Guard-86M - memory: - - provider_id: meta0 - provider_type: inline::meta-reference - config: {} - agents: - - provider_id: meta0 - provider_type: inline::meta-reference - config: - persistence_store: - namespace: null - type: sqlite - db_path: ~/.llama/runtime/kvstore.db - telemetry: - - provider_id: meta0 - provider_type: inline::meta-reference - config: {} diff --git a/distributions/tgi/run.yaml b/distributions/tgi/run.yaml new file mode 120000 index 000000000..f3cc3a502 --- /dev/null +++ b/distributions/tgi/run.yaml @@ -0,0 +1 @@ +../../llama_stack/templates/tgi/run.yaml \ No newline at end of file diff --git a/distributions/together/run.yaml b/distributions/together/run.yaml deleted file mode 100644 index 142316a8d..000000000 --- a/distributions/together/run.yaml +++ /dev/null @@ -1,46 +0,0 @@ -version: '2' -built_at: '2024-10-08T17:40:45.325529' -image_name: local -docker_image: null -conda_env: local -apis: -- shields -- agents -- models -- memory -- memory_banks -- inference -- safety -providers: - inference: - - provider_id: together0 - provider_type: remote::together - config: - url: https://api.together.xyz/v1 - # api_key: - safety: - - provider_id: meta0 - provider_type: inline::llama-guard - config: - model: Llama-Guard-3-1B - excluded_categories: [] - - provider_id: meta1 - provider_type: inline::prompt-guard - config: - model: Prompt-Guard-86M - memory: - - provider_id: meta0 - provider_type: remote::weaviate - config: {} - agents: - - provider_id: meta0 - provider_type: inline::meta-reference - config: - persistence_store: - namespace: null - type: sqlite - db_path: ~/.llama/runtime/kvstore.db - telemetry: - - provider_id: meta0 - provider_type: inline::meta-reference - config: {} diff --git a/distributions/together/run.yaml b/distributions/together/run.yaml new file mode 120000 index 000000000..102d9866e --- /dev/null +++ b/distributions/together/run.yaml @@ -0,0 +1 @@ +../../llama_stack/templates/together/run.yaml \ No newline at end of file diff --git a/docs/openapi_generator/generate.py b/docs/openapi_generator/generate.py index 97d265aeb..3aa7ea6dc 100644 --- a/docs/openapi_generator/generate.py +++ b/docs/openapi_generator/generate.py @@ -31,7 +31,10 @@ from .strong_typing.schema import json_schema_type schema_utils.json_schema_type = json_schema_type -from llama_stack.distribution.stack import LlamaStack +# this line needs to be here to ensure json_schema_type has been altered before +# the imports use the annotation +from llama_stack.apis.version import LLAMA_STACK_API_VERSION # noqa: E402 +from llama_stack.distribution.stack import LlamaStack # noqa: E402 def main(output_dir: str): @@ -50,7 +53,7 @@ def main(output_dir: str): server=Server(url="http://any-hosted-llama-stack.com"), info=Info( title="[DRAFT] Llama Stack Specification", - version="0.0.1", + version=LLAMA_STACK_API_VERSION, description="""This is the specification of the llama stack that provides a set of endpoints and their corresponding interfaces that are tailored to best leverage Llama Models. The specification is still in draft and subject to change. diff --git a/docs/openapi_generator/pyopenapi/generator.py b/docs/openapi_generator/pyopenapi/generator.py index 12e3396e4..2e1fbb856 100644 --- a/docs/openapi_generator/pyopenapi/generator.py +++ b/docs/openapi_generator/pyopenapi/generator.py @@ -202,7 +202,9 @@ class ContentBuilder: ) -> MediaType: schema = self.schema_builder.classdef_to_ref(item_type) if self.schema_transformer: - schema_transformer: Callable[[SchemaOrRef], SchemaOrRef] = self.schema_transformer # type: ignore + schema_transformer: Callable[[SchemaOrRef], SchemaOrRef] = ( + self.schema_transformer + ) schema = schema_transformer(schema) if not examples: @@ -630,6 +632,7 @@ class Generator: raise NotImplementedError(f"unknown HTTP method: {op.http_method}") route = op.get_route() + print(f"route: {route}") if route in paths: paths[route].update(pathItem) else: diff --git a/docs/openapi_generator/pyopenapi/operations.py b/docs/openapi_generator/pyopenapi/operations.py index f4238f6f8..cc3a06b7b 100644 --- a/docs/openapi_generator/pyopenapi/operations.py +++ b/docs/openapi_generator/pyopenapi/operations.py @@ -12,6 +12,8 @@ import uuid from dataclasses import dataclass from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Union +from llama_stack.apis.version import LLAMA_STACK_API_VERSION + from termcolor import colored from ..strong_typing.inspection import ( @@ -111,9 +113,12 @@ class EndpointOperation: def get_route(self) -> str: if self.route is not None: - return self.route + assert ( + "_" not in self.route + ), f"route should not contain underscores: {self.route}" + return "/".join(["", LLAMA_STACK_API_VERSION, self.route.lstrip("/")]) - route_parts = ["", self.name] + route_parts = ["", LLAMA_STACK_API_VERSION, self.name] for param_name, _ in self.path_params: route_parts.append("{" + param_name + "}") return "/".join(route_parts) diff --git a/docs/resources/llama-stack-spec.html b/docs/resources/llama-stack-spec.html index ce6226f98..838633a4f 100644 --- a/docs/resources/llama-stack-spec.html +++ b/docs/resources/llama-stack-spec.html @@ -20,8 +20,8 @@ "openapi": "3.1.0", "info": { "title": "[DRAFT] Llama Stack Specification", - "version": "0.0.1", - "description": "This is the specification of the llama stack that provides\n a set of endpoints and their corresponding interfaces that are tailored to\n best leverage Llama Models. The specification is still in draft and subject to change.\n Generated at 2024-11-14 17:04:24.301559" + "version": "alpha", + "description": "This is the specification of the llama stack that provides\n a set of endpoints and their corresponding interfaces that are tailored to\n best leverage Llama Models. The specification is still in draft and subject to change.\n Generated at 2024-11-18 23:37:24.867143" }, "servers": [ { @@ -29,7 +29,7 @@ } ], "paths": { - "/batch_inference/chat_completion": { + "/alpha/batch-inference/chat-completion": { "post": { "responses": { "200": { @@ -69,7 +69,7 @@ } } }, - "/batch_inference/completion": { + "/alpha/batch-inference/completion": { "post": { "responses": { "200": { @@ -109,7 +109,7 @@ } } }, - "/post_training/job/cancel": { + "/alpha/post-training/job/cancel": { "post": { "responses": { "200": { @@ -142,7 +142,7 @@ } } }, - "/inference/chat_completion": { + "/alpha/inference/chat-completion": { "post": { "responses": { "200": { @@ -189,7 +189,7 @@ } } }, - "/inference/completion": { + "/alpha/inference/completion": { "post": { "responses": { "200": { @@ -236,7 +236,7 @@ } } }, - "/agents/create": { + "/alpha/agents/create": { "post": { "responses": { "200": { @@ -276,7 +276,7 @@ } } }, - "/agents/session/create": { + "/alpha/agents/session/create": { "post": { "responses": { "200": { @@ -316,7 +316,7 @@ } } }, - "/agents/turn/create": { + "/alpha/agents/turn/create": { "post": { "responses": { "200": { @@ -363,7 +363,7 @@ } } }, - "/agents/delete": { + "/alpha/agents/delete": { "post": { "responses": { "200": { @@ -396,7 +396,7 @@ } } }, - "/agents/session/delete": { + "/alpha/agents/session/delete": { "post": { "responses": { "200": { @@ -429,7 +429,7 @@ } } }, - "/inference/embeddings": { + "/alpha/inference/embeddings": { "post": { "responses": { "200": { @@ -469,7 +469,7 @@ } } }, - "/eval/evaluate_rows": { + "/alpha/eval/evaluate-rows": { "post": { "responses": { "200": { @@ -509,7 +509,7 @@ } } }, - "/agents/session/get": { + "/alpha/agents/session/get": { "post": { "responses": { "200": { @@ -565,7 +565,7 @@ } } }, - "/agents/step/get": { + "/alpha/agents/step/get": { "get": { "responses": { "200": { @@ -627,7 +627,7 @@ ] } }, - "/agents/turn/get": { + "/alpha/agents/turn/get": { "get": { "responses": { "200": { @@ -681,7 +681,7 @@ ] } }, - "/datasets/get": { + "/alpha/datasets/get": { "get": { "responses": { "200": { @@ -726,7 +726,7 @@ ] } }, - "/eval_tasks/get": { + "/alpha/eval-tasks/get": { "get": { "responses": { "200": { @@ -771,7 +771,7 @@ ] } }, - "/memory_banks/get": { + "/alpha/memory-banks/get": { "get": { "responses": { "200": { @@ -829,7 +829,7 @@ ] } }, - "/models/get": { + "/alpha/models/get": { "get": { "responses": { "200": { @@ -874,7 +874,7 @@ ] } }, - "/datasetio/get_rows_paginated": { + "/alpha/datasetio/get-rows-paginated": { "get": { "responses": { "200": { @@ -936,7 +936,7 @@ ] } }, - "/scoring_functions/get": { + "/alpha/scoring-functions/get": { "get": { "responses": { "200": { @@ -981,7 +981,7 @@ ] } }, - "/shields/get": { + "/alpha/shields/get": { "get": { "responses": { "200": { @@ -1026,7 +1026,7 @@ ] } }, - "/telemetry/get_trace": { + "/alpha/telemetry/get-trace": { "get": { "responses": { "200": { @@ -1064,7 +1064,7 @@ ] } }, - "/post_training/job/artifacts": { + "/alpha/post-training/job/artifacts": { "get": { "responses": { "200": { @@ -1102,7 +1102,7 @@ ] } }, - "/post_training/job/logs": { + "/alpha/post-training/job/logs": { "get": { "responses": { "200": { @@ -1140,7 +1140,7 @@ ] } }, - "/post_training/job/status": { + "/alpha/post-training/job/status": { "get": { "responses": { "200": { @@ -1178,7 +1178,7 @@ ] } }, - "/post_training/jobs": { + "/alpha/post-training/jobs": { "get": { "responses": { "200": { @@ -1208,7 +1208,7 @@ ] } }, - "/health": { + "/alpha/health": { "get": { "responses": { "200": { @@ -1238,7 +1238,7 @@ ] } }, - "/memory/insert": { + "/alpha/memory/insert": { "post": { "responses": { "200": { @@ -1271,7 +1271,7 @@ } } }, - "/eval/job/cancel": { + "/alpha/eval/job/cancel": { "post": { "responses": { "200": { @@ -1304,7 +1304,7 @@ } } }, - "/eval/job/result": { + "/alpha/eval/job/result": { "get": { "responses": { "200": { @@ -1350,7 +1350,7 @@ ] } }, - "/eval/job/status": { + "/alpha/eval/job/status": { "get": { "responses": { "200": { @@ -1403,7 +1403,7 @@ ] } }, - "/datasets/list": { + "/alpha/datasets/list": { "get": { "responses": { "200": { @@ -1433,7 +1433,7 @@ ] } }, - "/eval_tasks/list": { + "/alpha/eval-tasks/list": { "get": { "responses": { "200": { @@ -1463,7 +1463,7 @@ ] } }, - "/memory_banks/list": { + "/alpha/memory-banks/list": { "get": { "responses": { "200": { @@ -1506,7 +1506,7 @@ ] } }, - "/models/list": { + "/alpha/models/list": { "get": { "responses": { "200": { @@ -1536,7 +1536,7 @@ ] } }, - "/providers/list": { + "/alpha/providers/list": { "get": { "responses": { "200": { @@ -1569,7 +1569,7 @@ ] } }, - "/routes/list": { + "/alpha/routes/list": { "get": { "responses": { "200": { @@ -1605,7 +1605,7 @@ ] } }, - "/scoring_functions/list": { + "/alpha/scoring-functions/list": { "get": { "responses": { "200": { @@ -1635,7 +1635,7 @@ ] } }, - "/shields/list": { + "/alpha/shields/list": { "get": { "responses": { "200": { @@ -1665,7 +1665,7 @@ ] } }, - "/telemetry/log_event": { + "/alpha/telemetry/log-event": { "post": { "responses": { "200": { @@ -1698,7 +1698,7 @@ } } }, - "/post_training/preference_optimize": { + "/alpha/post-training/preference-optimize": { "post": { "responses": { "200": { @@ -1738,7 +1738,7 @@ } } }, - "/memory/query": { + "/alpha/memory/query": { "post": { "responses": { "200": { @@ -1778,7 +1778,7 @@ } } }, - "/datasets/register": { + "/alpha/datasets/register": { "post": { "responses": { "200": { @@ -1811,7 +1811,7 @@ } } }, - "/eval_tasks/register": { + "/alpha/eval-tasks/register": { "post": { "responses": { "200": { @@ -1844,7 +1844,7 @@ } } }, - "/memory_banks/register": { + "/alpha/memory-banks/register": { "post": { "responses": {}, "tags": [ @@ -1873,7 +1873,7 @@ } } }, - "/models/register": { + "/alpha/models/register": { "post": { "responses": { "200": { @@ -1913,7 +1913,7 @@ } } }, - "/scoring_functions/register": { + "/alpha/scoring-functions/register": { "post": { "responses": { "200": { @@ -1946,7 +1946,7 @@ } } }, - "/shields/register": { + "/alpha/shields/register": { "post": { "responses": { "200": { @@ -1986,7 +1986,7 @@ } } }, - "/eval/run_eval": { + "/alpha/eval/run-eval": { "post": { "responses": { "200": { @@ -2026,7 +2026,7 @@ } } }, - "/safety/run_shield": { + "/alpha/safety/run-shield": { "post": { "responses": { "200": { @@ -2066,7 +2066,7 @@ } } }, - "/scoring/score": { + "/alpha/scoring/score": { "post": { "responses": { "200": { @@ -2106,7 +2106,7 @@ } } }, - "/scoring/score_batch": { + "/alpha/scoring/score-batch": { "post": { "responses": { "200": { @@ -2146,7 +2146,7 @@ } } }, - "/post_training/supervised_fine_tune": { + "/alpha/post-training/supervised-fine-tune": { "post": { "responses": { "200": { @@ -2186,7 +2186,7 @@ } } }, - "/synthetic_data_generation/generate": { + "/alpha/synthetic-data-generation/generate": { "post": { "responses": { "200": { @@ -2226,7 +2226,7 @@ } } }, - "/memory_banks/unregister": { + "/alpha/memory-banks/unregister": { "post": { "responses": { "200": { @@ -2259,7 +2259,7 @@ } } }, - "/models/unregister": { + "/alpha/models/unregister": { "post": { "responses": { "200": { diff --git a/docs/resources/llama-stack-spec.yaml b/docs/resources/llama-stack-spec.yaml index a0b3d6c5e..994e3aac4 100644 --- a/docs/resources/llama-stack-spec.yaml +++ b/docs/resources/llama-stack-spec.yaml @@ -3400,13 +3400,13 @@ info: description: "This is the specification of the llama stack that provides\n \ \ a set of endpoints and their corresponding interfaces that are tailored\ \ to\n best leverage Llama Models. The specification is still in\ - \ draft and subject to change.\n Generated at 2024-11-14 17:04:24.301559" + \ draft and subject to change.\n Generated at 2024-11-18 23:37:24.867143" title: '[DRAFT] Llama Stack Specification' - version: 0.0.1 + version: alpha jsonSchemaDialect: https://json-schema.org/draft/2020-12/schema openapi: 3.1.0 paths: - /agents/create: + /alpha/agents/create: post: parameters: - description: JSON-encoded provider data which will be made available to the @@ -3431,7 +3431,7 @@ paths: description: OK tags: - Agents - /agents/delete: + /alpha/agents/delete: post: parameters: - description: JSON-encoded provider data which will be made available to the @@ -3452,7 +3452,7 @@ paths: description: OK tags: - Agents - /agents/session/create: + /alpha/agents/session/create: post: parameters: - description: JSON-encoded provider data which will be made available to the @@ -3477,7 +3477,7 @@ paths: description: OK tags: - Agents - /agents/session/delete: + /alpha/agents/session/delete: post: parameters: - description: JSON-encoded provider data which will be made available to the @@ -3498,7 +3498,7 @@ paths: description: OK tags: - Agents - /agents/session/get: + /alpha/agents/session/get: post: parameters: - in: query @@ -3533,7 +3533,7 @@ paths: description: OK tags: - Agents - /agents/step/get: + /alpha/agents/step/get: get: parameters: - in: query @@ -3572,7 +3572,7 @@ paths: description: OK tags: - Agents - /agents/turn/create: + /alpha/agents/turn/create: post: parameters: - description: JSON-encoded provider data which will be made available to the @@ -3600,7 +3600,7 @@ paths: streamed agent turn completion response. tags: - Agents - /agents/turn/get: + /alpha/agents/turn/get: get: parameters: - in: query @@ -3634,7 +3634,7 @@ paths: description: OK tags: - Agents - /batch_inference/chat_completion: + /alpha/batch-inference/chat-completion: post: parameters: - description: JSON-encoded provider data which will be made available to the @@ -3659,7 +3659,7 @@ paths: description: OK tags: - BatchInference - /batch_inference/completion: + /alpha/batch-inference/completion: post: parameters: - description: JSON-encoded provider data which will be made available to the @@ -3684,7 +3684,7 @@ paths: description: OK tags: - BatchInference - /datasetio/get_rows_paginated: + /alpha/datasetio/get-rows-paginated: get: parameters: - in: query @@ -3723,7 +3723,7 @@ paths: description: OK tags: - DatasetIO - /datasets/get: + /alpha/datasets/get: get: parameters: - in: query @@ -3749,7 +3749,7 @@ paths: description: OK tags: - Datasets - /datasets/list: + /alpha/datasets/list: get: parameters: - description: JSON-encoded provider data which will be made available to the @@ -3768,7 +3768,7 @@ paths: description: OK tags: - Datasets - /datasets/register: + /alpha/datasets/register: post: parameters: - description: JSON-encoded provider data which will be made available to the @@ -3789,7 +3789,73 @@ paths: description: OK tags: - Datasets - /eval/evaluate_rows: + /alpha/eval-tasks/get: + get: + parameters: + - in: query + name: name + required: true + schema: + type: string + - description: JSON-encoded provider data which will be made available to the + adapter servicing the API + in: header + name: X-LlamaStack-ProviderData + required: false + schema: + type: string + responses: + '200': + content: + application/json: + schema: + oneOf: + - $ref: '#/components/schemas/EvalTask' + - type: 'null' + description: OK + tags: + - EvalTasks + /alpha/eval-tasks/list: + get: + parameters: + - description: JSON-encoded provider data which will be made available to the + adapter servicing the API + in: header + name: X-LlamaStack-ProviderData + required: false + schema: + type: string + responses: + '200': + content: + application/jsonl: + schema: + $ref: '#/components/schemas/EvalTask' + description: OK + tags: + - EvalTasks + /alpha/eval-tasks/register: + post: + parameters: + - description: JSON-encoded provider data which will be made available to the + adapter servicing the API + in: header + name: X-LlamaStack-ProviderData + required: false + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/RegisterEvalTaskRequest' + required: true + responses: + '200': + description: OK + tags: + - EvalTasks + /alpha/eval/evaluate-rows: post: parameters: - description: JSON-encoded provider data which will be made available to the @@ -3814,7 +3880,7 @@ paths: description: OK tags: - Eval - /eval/job/cancel: + /alpha/eval/job/cancel: post: parameters: - description: JSON-encoded provider data which will be made available to the @@ -3835,7 +3901,7 @@ paths: description: OK tags: - Eval - /eval/job/result: + /alpha/eval/job/result: get: parameters: - in: query @@ -3864,7 +3930,7 @@ paths: description: OK tags: - Eval - /eval/job/status: + /alpha/eval/job/status: get: parameters: - in: query @@ -3895,7 +3961,7 @@ paths: description: OK tags: - Eval - /eval/run_eval: + /alpha/eval/run-eval: post: parameters: - description: JSON-encoded provider data which will be made available to the @@ -3920,73 +3986,7 @@ paths: description: OK tags: - Eval - /eval_tasks/get: - get: - parameters: - - in: query - name: name - required: true - schema: - type: string - - description: JSON-encoded provider data which will be made available to the - adapter servicing the API - in: header - name: X-LlamaStack-ProviderData - required: false - schema: - type: string - responses: - '200': - content: - application/json: - schema: - oneOf: - - $ref: '#/components/schemas/EvalTask' - - type: 'null' - description: OK - tags: - - EvalTasks - /eval_tasks/list: - get: - parameters: - - description: JSON-encoded provider data which will be made available to the - adapter servicing the API - in: header - name: X-LlamaStack-ProviderData - required: false - schema: - type: string - responses: - '200': - content: - application/jsonl: - schema: - $ref: '#/components/schemas/EvalTask' - description: OK - tags: - - EvalTasks - /eval_tasks/register: - post: - parameters: - - description: JSON-encoded provider data which will be made available to the - adapter servicing the API - in: header - name: X-LlamaStack-ProviderData - required: false - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/RegisterEvalTaskRequest' - required: true - responses: - '200': - description: OK - tags: - - EvalTasks - /health: + /alpha/health: get: parameters: - description: JSON-encoded provider data which will be made available to the @@ -4005,7 +4005,7 @@ paths: description: OK tags: - Inspect - /inference/chat_completion: + /alpha/inference/chat-completion: post: parameters: - description: JSON-encoded provider data which will be made available to the @@ -4032,7 +4032,7 @@ paths: description: Chat completion response. **OR** SSE-stream of these events. tags: - Inference - /inference/completion: + /alpha/inference/completion: post: parameters: - description: JSON-encoded provider data which will be made available to the @@ -4059,7 +4059,7 @@ paths: description: Completion response. **OR** streamed completion response. tags: - Inference - /inference/embeddings: + /alpha/inference/embeddings: post: parameters: - description: JSON-encoded provider data which will be made available to the @@ -4084,53 +4084,7 @@ paths: description: OK tags: - Inference - /memory/insert: - post: - parameters: - - description: JSON-encoded provider data which will be made available to the - adapter servicing the API - in: header - name: X-LlamaStack-ProviderData - required: false - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/InsertDocumentsRequest' - required: true - responses: - '200': - description: OK - tags: - - Memory - /memory/query: - post: - parameters: - - description: JSON-encoded provider data which will be made available to the - adapter servicing the API - in: header - name: X-LlamaStack-ProviderData - required: false - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/QueryDocumentsRequest' - required: true - responses: - '200': - content: - application/json: - schema: - $ref: '#/components/schemas/QueryDocumentsResponse' - description: OK - tags: - - Memory - /memory_banks/get: + /alpha/memory-banks/get: get: parameters: - in: query @@ -4160,7 +4114,7 @@ paths: description: OK tags: - MemoryBanks - /memory_banks/list: + /alpha/memory-banks/list: get: parameters: - description: JSON-encoded provider data which will be made available to the @@ -4183,7 +4137,7 @@ paths: description: OK tags: - MemoryBanks - /memory_banks/register: + /alpha/memory-banks/register: post: parameters: - description: JSON-encoded provider data which will be made available to the @@ -4202,7 +4156,7 @@ paths: responses: {} tags: - MemoryBanks - /memory_banks/unregister: + /alpha/memory-banks/unregister: post: parameters: - description: JSON-encoded provider data which will be made available to the @@ -4223,7 +4177,53 @@ paths: description: OK tags: - MemoryBanks - /models/get: + /alpha/memory/insert: + post: + parameters: + - description: JSON-encoded provider data which will be made available to the + adapter servicing the API + in: header + name: X-LlamaStack-ProviderData + required: false + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/InsertDocumentsRequest' + required: true + responses: + '200': + description: OK + tags: + - Memory + /alpha/memory/query: + post: + parameters: + - description: JSON-encoded provider data which will be made available to the + adapter servicing the API + in: header + name: X-LlamaStack-ProviderData + required: false + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/QueryDocumentsRequest' + required: true + responses: + '200': + content: + application/json: + schema: + $ref: '#/components/schemas/QueryDocumentsResponse' + description: OK + tags: + - Memory + /alpha/models/get: get: parameters: - in: query @@ -4249,7 +4249,7 @@ paths: description: OK tags: - Models - /models/list: + /alpha/models/list: get: parameters: - description: JSON-encoded provider data which will be made available to the @@ -4268,7 +4268,7 @@ paths: description: OK tags: - Models - /models/register: + /alpha/models/register: post: parameters: - description: JSON-encoded provider data which will be made available to the @@ -4293,7 +4293,7 @@ paths: description: OK tags: - Models - /models/unregister: + /alpha/models/unregister: post: parameters: - description: JSON-encoded provider data which will be made available to the @@ -4314,7 +4314,7 @@ paths: description: OK tags: - Models - /post_training/job/artifacts: + /alpha/post-training/job/artifacts: get: parameters: - in: query @@ -4338,7 +4338,7 @@ paths: description: OK tags: - PostTraining - /post_training/job/cancel: + /alpha/post-training/job/cancel: post: parameters: - description: JSON-encoded provider data which will be made available to the @@ -4359,7 +4359,7 @@ paths: description: OK tags: - PostTraining - /post_training/job/logs: + /alpha/post-training/job/logs: get: parameters: - in: query @@ -4383,7 +4383,7 @@ paths: description: OK tags: - PostTraining - /post_training/job/status: + /alpha/post-training/job/status: get: parameters: - in: query @@ -4407,7 +4407,7 @@ paths: description: OK tags: - PostTraining - /post_training/jobs: + /alpha/post-training/jobs: get: parameters: - description: JSON-encoded provider data which will be made available to the @@ -4426,7 +4426,7 @@ paths: description: OK tags: - PostTraining - /post_training/preference_optimize: + /alpha/post-training/preference-optimize: post: parameters: - description: JSON-encoded provider data which will be made available to the @@ -4451,7 +4451,7 @@ paths: description: OK tags: - PostTraining - /post_training/supervised_fine_tune: + /alpha/post-training/supervised-fine-tune: post: parameters: - description: JSON-encoded provider data which will be made available to the @@ -4476,7 +4476,7 @@ paths: description: OK tags: - PostTraining - /providers/list: + /alpha/providers/list: get: parameters: - description: JSON-encoded provider data which will be made available to the @@ -4497,7 +4497,7 @@ paths: description: OK tags: - Inspect - /routes/list: + /alpha/routes/list: get: parameters: - description: JSON-encoded provider data which will be made available to the @@ -4520,7 +4520,7 @@ paths: description: OK tags: - Inspect - /safety/run_shield: + /alpha/safety/run-shield: post: parameters: - description: JSON-encoded provider data which will be made available to the @@ -4545,57 +4545,7 @@ paths: description: OK tags: - Safety - /scoring/score: - post: - parameters: - - description: JSON-encoded provider data which will be made available to the - adapter servicing the API - in: header - name: X-LlamaStack-ProviderData - required: false - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/ScoreRequest' - required: true - responses: - '200': - content: - application/json: - schema: - $ref: '#/components/schemas/ScoreResponse' - description: OK - tags: - - Scoring - /scoring/score_batch: - post: - parameters: - - description: JSON-encoded provider data which will be made available to the - adapter servicing the API - in: header - name: X-LlamaStack-ProviderData - required: false - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/ScoreBatchRequest' - required: true - responses: - '200': - content: - application/json: - schema: - $ref: '#/components/schemas/ScoreBatchResponse' - description: OK - tags: - - Scoring - /scoring_functions/get: + /alpha/scoring-functions/get: get: parameters: - in: query @@ -4621,7 +4571,7 @@ paths: description: OK tags: - ScoringFunctions - /scoring_functions/list: + /alpha/scoring-functions/list: get: parameters: - description: JSON-encoded provider data which will be made available to the @@ -4640,7 +4590,7 @@ paths: description: OK tags: - ScoringFunctions - /scoring_functions/register: + /alpha/scoring-functions/register: post: parameters: - description: JSON-encoded provider data which will be made available to the @@ -4661,7 +4611,57 @@ paths: description: OK tags: - ScoringFunctions - /shields/get: + /alpha/scoring/score: + post: + parameters: + - description: JSON-encoded provider data which will be made available to the + adapter servicing the API + in: header + name: X-LlamaStack-ProviderData + required: false + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/ScoreRequest' + required: true + responses: + '200': + content: + application/json: + schema: + $ref: '#/components/schemas/ScoreResponse' + description: OK + tags: + - Scoring + /alpha/scoring/score-batch: + post: + parameters: + - description: JSON-encoded provider data which will be made available to the + adapter servicing the API + in: header + name: X-LlamaStack-ProviderData + required: false + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/ScoreBatchRequest' + required: true + responses: + '200': + content: + application/json: + schema: + $ref: '#/components/schemas/ScoreBatchResponse' + description: OK + tags: + - Scoring + /alpha/shields/get: get: parameters: - in: query @@ -4687,7 +4687,7 @@ paths: description: OK tags: - Shields - /shields/list: + /alpha/shields/list: get: parameters: - description: JSON-encoded provider data which will be made available to the @@ -4706,7 +4706,7 @@ paths: description: OK tags: - Shields - /shields/register: + /alpha/shields/register: post: parameters: - description: JSON-encoded provider data which will be made available to the @@ -4731,7 +4731,7 @@ paths: description: OK tags: - Shields - /synthetic_data_generation/generate: + /alpha/synthetic-data-generation/generate: post: parameters: - description: JSON-encoded provider data which will be made available to the @@ -4756,7 +4756,7 @@ paths: description: OK tags: - SyntheticDataGeneration - /telemetry/get_trace: + /alpha/telemetry/get-trace: get: parameters: - in: query @@ -4780,7 +4780,7 @@ paths: description: OK tags: - Telemetry - /telemetry/log_event: + /alpha/telemetry/log-event: post: parameters: - description: JSON-encoded provider data which will be made available to the diff --git a/docs/source/getting_started/distributions/self_hosted_distro/fireworks.md b/docs/source/getting_started/distributions/self_hosted_distro/fireworks.md index ee46cd18d..f940e6de2 100644 --- a/docs/source/getting_started/distributions/self_hosted_distro/fireworks.md +++ b/docs/source/getting_started/distributions/self_hosted_distro/fireworks.md @@ -2,63 +2,67 @@ The `llamastack/distribution-fireworks` distribution consists of the following provider configurations. +| API | Provider(s) | +|-----|-------------| +| agents | `inline::meta-reference` | +| inference | `remote::fireworks` | +| memory | `inline::faiss`, `remote::chromadb`, `remote::pgvector` | +| safety | `inline::llama-guard` | +| telemetry | `inline::meta-reference` | -| **API** | **Inference** | **Agents** | **Memory** | **Safety** | **Telemetry** | -|----------------- |--------------- |---------------- |-------------------------------------------------- |---------------- |---------------- | -| **Provider(s)** | remote::fireworks | meta-reference | meta-reference | meta-reference | meta-reference | -### Step 0. Prerequisite -- Make sure you have access to a fireworks API Key. You can get one by visiting [fireworks.ai](https://fireworks.ai/) +### Environment Variables -### Step 1. Start the Distribution (Single Node CPU) +The following environment variables can be configured: -#### (Option 1) Start Distribution Via Docker -> [!NOTE] -> This assumes you have an hosted endpoint at Fireworks with API Key. +- `LLAMASTACK_PORT`: Port for the Llama Stack distribution server (default: `5001`) +- `FIREWORKS_API_KEY`: Fireworks.AI API Key (default: ``) -``` -$ cd distributions/fireworks && docker compose up +### Models + +The following models are available by default: + +- `meta-llama/Llama-3.1-8B-Instruct (fireworks/llama-v3p1-8b-instruct)` +- `meta-llama/Llama-3.1-70B-Instruct (fireworks/llama-v3p1-70b-instruct)` +- `meta-llama/Llama-3.1-405B-Instruct-FP8 (fireworks/llama-v3p1-405b-instruct)` +- `meta-llama/Llama-3.2-3B-Instruct (fireworks/llama-v3p2-1b-instruct)` +- `meta-llama/Llama-3.2-11B-Vision-Instruct (fireworks/llama-v3p2-3b-instruct)` +- `meta-llama/Llama-3.2-11B-Vision-Instruct (fireworks/llama-v3p2-11b-vision-instruct)` +- `meta-llama/Llama-3.2-90B-Vision-Instruct (fireworks/llama-v3p2-90b-vision-instruct)` +- `meta-llama/Llama-Guard-3-8B (fireworks/llama-guard-3-8b)` +- `meta-llama/Llama-Guard-3-11B-Vision (fireworks/llama-guard-3-11b-vision)` + + +### Prerequisite: API Keys + +Make sure you have access to a Fireworks API Key. You can get one by visiting [fireworks.ai](https://fireworks.ai/). + + +## Running Llama Stack with Fireworks + +You can do this via Conda (build code) or Docker which has a pre-built image. + +### Via Docker + +This method allows you to get started quickly without having to build the distribution code. + +```bash +LLAMA_STACK_PORT=5001 +docker run \ + -it \ + -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ + -v ./run.yaml:/root/my-run.yaml \ + llamastack/distribution-fireworks \ + --yaml-config /root/my-run.yaml \ + --port $LLAMA_STACK_PORT \ + --env FIREWORKS_API_KEY=$FIREWORKS_API_KEY ``` -Make sure in you `run.yaml` file, you inference provider is pointing to the correct Fireworks URL server endpoint. E.g. -``` -inference: - - provider_id: fireworks - provider_type: remote::fireworks - config: - url: https://api.fireworks.ai/inference - api_key: -``` - -#### (Option 2) Start Distribution Via Conda +### Via Conda ```bash llama stack build --template fireworks --image-type conda -# -- modify run.yaml to a valid Fireworks server endpoint -llama stack run ./run.yaml -``` - - -### (Optional) Model Serving - -Use `llama-stack-client models list` to check the available models served by Fireworks. -``` -$ llama-stack-client models list -+------------------------------+------------------------------+---------------+------------+ -| identifier | llama_model | provider_id | metadata | -+==============================+==============================+===============+============+ -| Llama3.1-8B-Instruct | Llama3.1-8B-Instruct | fireworks0 | {} | -+------------------------------+------------------------------+---------------+------------+ -| Llama3.1-70B-Instruct | Llama3.1-70B-Instruct | fireworks0 | {} | -+------------------------------+------------------------------+---------------+------------+ -| Llama3.1-405B-Instruct | Llama3.1-405B-Instruct | fireworks0 | {} | -+------------------------------+------------------------------+---------------+------------+ -| Llama3.2-1B-Instruct | Llama3.2-1B-Instruct | fireworks0 | {} | -+------------------------------+------------------------------+---------------+------------+ -| Llama3.2-3B-Instruct | Llama3.2-3B-Instruct | fireworks0 | {} | -+------------------------------+------------------------------+---------------+------------+ -| Llama3.2-11B-Vision-Instruct | Llama3.2-11B-Vision-Instruct | fireworks0 | {} | -+------------------------------+------------------------------+---------------+------------+ -| Llama3.2-90B-Vision-Instruct | Llama3.2-90B-Vision-Instruct | fireworks0 | {} | -+------------------------------+------------------------------+---------------+------------+ +llama stack run ./run.yaml \ + --port 5001 \ + --env FIREWORKS_API_KEY=$FIREWORKS_API_KEY ``` diff --git a/docs/source/getting_started/distributions/self_hosted_distro/meta-reference-gpu.md b/docs/source/getting_started/distributions/self_hosted_distro/meta-reference-gpu.md index 1d5842c07..74a838d2f 100644 --- a/docs/source/getting_started/distributions/self_hosted_distro/meta-reference-gpu.md +++ b/docs/source/getting_started/distributions/self_hosted_distro/meta-reference-gpu.md @@ -1,15 +1,32 @@ # Meta Reference Distribution -The `llamastack/distribution-meta-reference-gpu` distribution consists of the following provider configurations. +The `llamastack/distribution-meta-reference-gpu` distribution consists of the following provider configurations: + +| API | Provider(s) | +|-----|-------------| +| agents | `inline::meta-reference` | +| inference | `inline::meta-reference` | +| memory | `inline::faiss`, `remote::chromadb`, `remote::pgvector` | +| safety | `inline::llama-guard` | +| telemetry | `inline::meta-reference` | -| **API** | **Inference** | **Agents** | **Memory** | **Safety** | **Telemetry** | -|----------------- |--------------- |---------------- |-------------------------------------------------- |---------------- |---------------- | -| **Provider(s)** | meta-reference | meta-reference | meta-reference, remote::pgvector, remote::chroma | meta-reference | meta-reference | +Note that you need access to nvidia GPUs to run this distribution. This distribution is not compatible with CPU-only machines or machines with AMD GPUs. + +### Environment Variables + +The following environment variables can be configured: + +- `LLAMASTACK_PORT`: Port for the Llama Stack distribution server (default: `5001`) +- `INFERENCE_MODEL`: Inference model loaded into the Meta Reference server (default: `meta-llama/Llama-3.2-3B-Instruct`) +- `INFERENCE_CHECKPOINT_DIR`: Directory containing the Meta Reference model checkpoint (default: `null`) +- `SAFETY_MODEL`: Name of the safety (Llama-Guard) model to use (default: `meta-llama/Llama-Guard-3-1B`) +- `SAFETY_CHECKPOINT_DIR`: Directory containing the Llama-Guard model checkpoint (default: `null`) -### Step 0. Prerequisite - Downloading Models -Please make sure you have llama model checkpoints downloaded in `~/.llama` before proceeding. See [installation guide](https://llama-stack.readthedocs.io/en/latest/cli_reference/download_models.html) here to download the models. +## Prerequisite: Downloading Models + +Please make sure you have llama model checkpoints downloaded in `~/.llama` before proceeding. See [installation guide](https://llama-stack.readthedocs.io/en/latest/cli_reference/download_models.html) here to download the models. Run `llama model list` to see the available models to download, and `llama model download` to download the checkpoints. ``` $ ls ~/.llama/checkpoints @@ -17,55 +34,56 @@ Llama3.1-8B Llama3.2-11B-Vision-Instruct Llama3.2-1B-Instruct Llama3 Llama3.1-8B-Instruct Llama3.2-1B Llama3.2-3B-Instruct Llama-Guard-3-1B Prompt-Guard-86M ``` -### Step 1. Start the Distribution +## Running the Distribution -#### (Option 1) Start with Docker -``` -$ cd distributions/meta-reference-gpu && docker compose up +You can do this via Conda (build code) or Docker which has a pre-built image. + +### Via Docker + +This method allows you to get started quickly without having to build the distribution code. + +```bash +LLAMA_STACK_PORT=5001 +docker run \ + -it \ + -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ + -v ./run.yaml:/root/my-run.yaml \ + llamastack/distribution-meta-reference-gpu \ + /root/my-run.yaml \ + --port $LLAMA_STACK_PORT \ + --env INFERENCE_MODEL=meta-llama/Llama-3.2-3B-Instruct ``` -> [!NOTE] -> This assumes you have access to GPU to start a local server with access to your GPU. +If you are using Llama Stack Safety / Shield APIs, use: - -> [!NOTE] -> `~/.llama` should be the path containing downloaded weights of Llama models. - - -This will download and start running a pre-built docker container. Alternatively, you may use the following commands: - -``` -docker run -it -p 5000:5000 -v ~/.llama:/root/.llama -v ./run.yaml:/root/my-run.yaml --gpus=all distribution-meta-reference-gpu --yaml_config /root/my-run.yaml +```bash +docker run \ + -it \ + -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ + -v ./run-with-safety.yaml:/root/my-run.yaml \ + llamastack/distribution-meta-reference-gpu \ + /root/my-run.yaml \ + --port $LLAMA_STACK_PORT \ + --env INFERENCE_MODEL=meta-llama/Llama-3.2-3B-Instruct \ + --env SAFETY_MODEL=meta-llama/Llama-Guard-3-1B ``` -#### (Option 2) Start with Conda +### Via Conda -1. Install the `llama` CLI. See [CLI Reference](https://llama-stack.readthedocs.io/en/latest/cli_reference/index.html) +Make sure you have done `pip install llama-stack` and have the Llama Stack CLI available. -2. Build the `meta-reference-gpu` distribution - -``` -$ llama stack build --template meta-reference-gpu --image-type conda +```bash +llama stack build --template meta-reference-gpu --image-type conda +llama stack run ./run.yaml \ + --port 5001 \ + --env INFERENCE_MODEL=meta-llama/Llama-3.2-3B-Instruct ``` -3. Start running distribution -``` -$ cd distributions/meta-reference-gpu -$ llama stack run ./run.yaml -``` +If you are using Llama Stack Safety / Shield APIs, use: -### (Optional) Serving a new model -You may change the `config.model` in `run.yaml` to update the model currently being served by the distribution. Make sure you have the model checkpoint downloaded in your `~/.llama`. +```bash +llama stack run ./run-with-safety.yaml \ + --port 5001 \ + --env INFERENCE_MODEL=meta-llama/Llama-3.2-3B-Instruct \ + --env SAFETY_MODEL=meta-llama/Llama-Guard-3-1B ``` -inference: - - provider_id: meta0 - provider_type: inline::meta-reference - config: - model: Llama3.2-11B-Vision-Instruct - quantization: null - torch_seed: null - max_seq_len: 4096 - max_batch_size: 1 -``` - -Run `llama model list` to see the available models to download, and `llama model download` to download the checkpoints. diff --git a/docs/source/getting_started/distributions/self_hosted_distro/ollama.md b/docs/source/getting_started/distributions/self_hosted_distro/ollama.md index 37bef9536..d1e9ea67a 100644 --- a/docs/source/getting_started/distributions/self_hosted_distro/ollama.md +++ b/docs/source/getting_started/distributions/self_hosted_distro/ollama.md @@ -2,103 +2,114 @@ The `llamastack/distribution-ollama` distribution consists of the following provider configurations. -| **API** | **Inference** | **Agents** | **Memory** | **Safety** | **Telemetry** | -|----------------- |---------------- |---------------- |------------------------------------ |---------------- |---------------- | -| **Provider(s)** | remote::ollama | meta-reference | remote::pgvector, remote::chromadb | meta-reference | meta-reference | +| API | Provider(s) | +|-----|-------------| +| agents | `inline::meta-reference` | +| inference | `remote::ollama` | +| memory | `inline::faiss`, `remote::chromadb`, `remote::pgvector` | +| safety | `inline::llama-guard` | +| telemetry | `inline::meta-reference` | -## Using Docker Compose +You should use this distribution if you have a regular desktop machine without very powerful GPUs. Of course, if you have powerful GPUs, you can still continue using this distribution since Ollama supports GPU acceleration.### Environment Variables -You can use `docker compose` to start a Ollama server and connect with Llama Stack server in a single command. +The following environment variables can be configured: -### Docker: Start the Distribution (Single Node regular Desktop machine) +- `LLAMASTACK_PORT`: Port for the Llama Stack distribution server (default: `5001`) +- `OLLAMA_URL`: URL of the Ollama server (default: `http://127.0.0.1:11434`) +- `INFERENCE_MODEL`: Inference model loaded into the Ollama server (default: `meta-llama/Llama-3.2-3B-Instruct`) +- `SAFETY_MODEL`: Safety model loaded into the Ollama server (default: `meta-llama/Llama-Guard-3-1B`) -> [!NOTE] -> This will start an ollama server with CPU only, please see [Ollama Documentations](https://github.com/ollama/ollama) for serving models on CPU only. + +## Setting up Ollama server + +Please check the [Ollama Documentation](https://github.com/ollama/ollama) on how to install and run Ollama. After installing Ollama, you need to run `ollama serve` to start the server. + +In order to load models, you can run: ```bash -$ cd distributions/ollama; docker compose up +export INFERENCE_MODEL="meta-llama/Llama-3.2-3B-Instruct" + +# ollama names this model differently, and we must use the ollama name when loading the model +export OLLAMA_INFERENCE_MODEL="llama3.2:3b-instruct-fp16" +ollama run $OLLAMA_INFERENCE_MODEL --keepalive 60m ``` -### Docker: Start a Distribution (Single Node with nvidia GPUs) - -> [!NOTE] -> This assumes you have access to GPU to start a Ollama server with access to your GPU. +If you are using Llama Stack Safety / Shield APIs, you will also need to pull and run the safety model. ```bash -$ cd distributions/ollama-gpu; docker compose up +export SAFETY_MODEL="meta-llama/Llama-Guard-3-1B" + +# ollama names this model differently, and we must use the ollama name when loading the model +export OLLAMA_SAFETY_MODEL="llama-guard3:1b" +ollama run $OLLAMA_SAFETY_MODEL --keepalive 60m ``` -You will see outputs similar to following --- +## Running Llama Stack + +Now you are ready to run Llama Stack with Ollama as the inference provider. You can do this via Conda (build code) or Docker which has a pre-built image. + +### Via Docker + +This method allows you to get started quickly without having to build the distribution code. + ```bash -[ollama] | [GIN] 2024/10/18 - 21:19:41 | 200 | 226.841µs | ::1 | GET "/api/ps" -[ollama] | [GIN] 2024/10/18 - 21:19:42 | 200 | 60.908µs | ::1 | GET "/api/ps" -INFO: Started server process [1] -INFO: Waiting for application startup. -INFO: Application startup complete. -INFO: Uvicorn running on http://[::]:5000 (Press CTRL+C to quit) -[llamastack] | Resolved 12 providers -[llamastack] | inner-inference => ollama0 -[llamastack] | models => __routing_table__ -[llamastack] | inference => __autorouted__ +export LLAMA_STACK_PORT=5001 +docker run \ + -it \ + -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ + -v ~/.llama:/root/.llama \ + -v ./run.yaml:/root/my-run.yaml \ + llamastack/distribution-ollama \ + --yaml-config /root/my-run.yaml \ + --port $LLAMA_STACK_PORT \ + --env INFERENCE_MODEL=$INFERENCE_MODEL \ + --env OLLAMA_URL=http://host.docker.internal:11434 ``` -To kill the server +If you are using Llama Stack Safety / Shield APIs, use: + ```bash -docker compose down +docker run \ + -it \ + -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ + -v ~/.llama:/root/.llama \ + -v ./run-with-safety.yaml:/root/my-run.yaml \ + llamastack/distribution-ollama \ + --yaml-config /root/my-run.yaml \ + --port $LLAMA_STACK_PORT \ + --env INFERENCE_MODEL=$INFERENCE_MODEL \ + --env SAFETY_MODEL=$SAFETY_MODEL \ + --env OLLAMA_URL=http://host.docker.internal:11434 ``` -## Starting Ollama and Llama Stack separately +### Via Conda -If you wish to separately spin up a Ollama server, and connect with Llama Stack, you should use the following commands. - -#### Start Ollama server -- Please check the [Ollama Documentation](https://github.com/ollama/ollama) for more details. - -**Via Docker** -```bash -docker run -d -v ollama:/root/.ollama -p 11434:11434 --name ollama ollama/ollama -``` - -**Via CLI** -```bash -ollama run -``` - -#### Start Llama Stack server pointing to Ollama server - -**Via Conda** +Make sure you have done `pip install llama-stack` and have the Llama Stack CLI available. ```bash +export LLAMA_STACK_PORT=5001 + llama stack build --template ollama --image-type conda -llama stack run ./gpu/run.yaml +llama stack run ./run.yaml \ + --port $LLAMA_STACK_PORT \ + --env INFERENCE_MODEL=$INFERENCE_MODEL \ + --env OLLAMA_URL=http://localhost:11434 ``` -**Via Docker** -``` -docker run --network host -it -p 5000:5000 -v ~/.llama:/root/.llama -v ./gpu/run.yaml:/root/llamastack-run-ollama.yaml --gpus=all llamastack/distribution-ollama --yaml_config /root/llamastack-run-ollama.yaml +If you are using Llama Stack Safety / Shield APIs, use: + +```bash +llama stack run ./run-with-safety.yaml \ + --port $LLAMA_STACK_PORT \ + --env INFERENCE_MODEL=$INFERENCE_MODEL \ + --env SAFETY_MODEL=$SAFETY_MODEL \ + --env OLLAMA_URL=http://localhost:11434 ``` -Make sure in your `run.yaml` file, your inference provider is pointing to the correct Ollama endpoint. E.g. -```yaml -inference: - - provider_id: ollama0 - provider_type: remote::ollama - config: - url: http://127.0.0.1:14343 -``` ### (Optional) Update Model Serving Configuration -#### Downloading model via Ollama - -You can use ollama for managing model downloads. - -```bash -ollama pull llama3.1:8b-instruct-fp16 -ollama pull llama3.1:70b-instruct-fp16 -``` - > [!NOTE] > Please check the [OLLAMA_SUPPORTED_MODELS](https://github.com/meta-llama/llama-stack/blob/main/llama_stack/providers.remote/inference/ollama/ollama.py) for the supported Ollama models. diff --git a/docs/source/getting_started/distributions/self_hosted_distro/remote-vllm.md b/docs/source/getting_started/distributions/self_hosted_distro/remote-vllm.md new file mode 100644 index 000000000..748b98732 --- /dev/null +++ b/docs/source/getting_started/distributions/self_hosted_distro/remote-vllm.md @@ -0,0 +1,144 @@ +# Remote vLLM Distribution + +The `llamastack/distribution-remote-vllm` distribution consists of the following provider configurations: + +| API | Provider(s) | +|-----|-------------| +| agents | `inline::meta-reference` | +| inference | `remote::vllm` | +| memory | `inline::faiss`, `remote::chromadb`, `remote::pgvector` | +| safety | `inline::llama-guard` | +| telemetry | `inline::meta-reference` | + + +You can use this distribution if you have GPUs and want to run an independent vLLM server container for running inference. + +### Environment Variables + +The following environment variables can be configured: + +- `LLAMASTACK_PORT`: Port for the Llama Stack distribution server (default: `5001`) +- `INFERENCE_MODEL`: Inference model loaded into the vLLM server (default: `meta-llama/Llama-3.2-3B-Instruct`) +- `VLLM_URL`: URL of the vLLM server with the main inference model (default: `http://host.docker.internal:5100}/v1`) +- `MAX_TOKENS`: Maximum number of tokens for generation (default: `4096`) +- `SAFETY_VLLM_URL`: URL of the vLLM server with the safety model (default: `http://host.docker.internal:5101/v1`) +- `SAFETY_MODEL`: Name of the safety (Llama-Guard) model to use (default: `meta-llama/Llama-Guard-3-1B`) + + +## Setting up vLLM server + +Please check the [vLLM Documentation](https://docs.vllm.ai/en/v0.5.5/serving/deploying_with_docker.html) to get a vLLM endpoint. Here is a sample script to start a vLLM server locally via Docker: + +```bash +export INFERENCE_PORT=8000 +export INFERENCE_MODEL=meta-llama/Llama-3.2-3B-Instruct +export CUDA_VISIBLE_DEVICES=0 + +docker run \ + --runtime nvidia \ + --gpus $CUDA_VISIBLE_DEVICES \ + -v ~/.cache/huggingface:/root/.cache/huggingface \ + --env "HUGGING_FACE_HUB_TOKEN=$HF_TOKEN" \ + -p $INFERENCE_PORT:$INFERENCE_PORT \ + --ipc=host \ + vllm/vllm-openai:latest \ + --gpu-memory-utilization 0.7 \ + --model $INFERENCE_MODEL \ + --port $INFERENCE_PORT +``` + +If you are using Llama Stack Safety / Shield APIs, then you will need to also run another instance of a vLLM with a corresponding safety model like `meta-llama/Llama-Guard-3-1B` using a script like: + +```bash +export SAFETY_PORT=8081 +export SAFETY_MODEL=meta-llama/Llama-Guard-3-1B +export CUDA_VISIBLE_DEVICES=1 + +docker run \ + --runtime nvidia \ + --gpus $CUDA_VISIBLE_DEVICES \ + -v ~/.cache/huggingface:/root/.cache/huggingface \ + --env "HUGGING_FACE_HUB_TOKEN=$HF_TOKEN" \ + -p $SAFETY_PORT:$SAFETY_PORT \ + --ipc=host \ + vllm/vllm-openai:latest \ + --gpu-memory-utilization 0.7 \ + --model $SAFETY_MODEL \ + --port $SAFETY_PORT +``` + +## Running Llama Stack + +Now you are ready to run Llama Stack with vLLM as the inference provider. You can do this via Conda (build code) or Docker which has a pre-built image. + +### Via Docker + +This method allows you to get started quickly without having to build the distribution code. + +```bash +export INFERENCE_PORT=8000 +export INFERENCE_MODEL=meta-llama/Llama-3.2-3B-Instruct +export LLAMA_STACK_PORT=5001 + +docker run \ + -it \ + -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ + -v ./run.yaml:/root/my-run.yaml \ + llamastack/distribution-remote-vllm \ + --yaml-config /root/my-run.yaml \ + --port $LLAMA_STACK_PORT \ + --env INFERENCE_MODEL=$INFERENCE_MODEL \ + --env VLLM_URL=http://host.docker.internal:$INFERENCE_PORT/v1 +``` + +If you are using Llama Stack Safety / Shield APIs, use: + +```bash +export SAFETY_PORT=8081 +export SAFETY_MODEL=meta-llama/Llama-Guard-3-1B + +docker run \ + -it \ + -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ + -v ./run-with-safety.yaml:/root/my-run.yaml \ + llamastack/distribution-remote-vllm \ + --yaml-config /root/my-run.yaml \ + --port $LLAMA_STACK_PORT \ + --env INFERENCE_MODEL=$INFERENCE_MODEL \ + --env VLLM_URL=http://host.docker.internal:$INFERENCE_PORT/v1 \ + --env SAFETY_MODEL=$SAFETY_MODEL \ + --env SAFETY_VLLM_URL=http://host.docker.internal:$SAFETY_PORT/v1 +``` + + +### Via Conda + +Make sure you have done `pip install llama-stack` and have the Llama Stack CLI available. + +```bash +export INFERENCE_PORT=8000 +export INFERENCE_MODEL=meta-llama/Llama-3.2-3B-Instruct +export LLAMA_STACK_PORT=5001 + +cd distributions/remote-vllm +llama stack build --template remote-vllm --image-type conda + +llama stack run ./run.yaml \ + --port $LLAMA_STACK_PORT \ + --env INFERENCE_MODEL=$INFERENCE_MODEL \ + --env VLLM_URL=http://localhost:$INFERENCE_PORT/v1 +``` + +If you are using Llama Stack Safety / Shield APIs, use: + +```bash +export SAFETY_PORT=8081 +export SAFETY_MODEL=meta-llama/Llama-Guard-3-1B + +llama stack run ./run-with-safety.yaml \ + --port $LLAMA_STACK_PORT \ + --env INFERENCE_MODEL=$INFERENCE_MODEL \ + --env VLLM_URL=http://localhost:$INFERENCE_PORT/v1 \ + --env SAFETY_MODEL=$SAFETY_MODEL \ + --env SAFETY_VLLM_URL=http://localhost:$SAFETY_PORT/v1 +``` diff --git a/docs/source/getting_started/distributions/self_hosted_distro/remote_vllm.md b/docs/source/getting_started/distributions/self_hosted_distro/remote_vllm.md deleted file mode 100644 index 2ab8df7b7..000000000 --- a/docs/source/getting_started/distributions/self_hosted_distro/remote_vllm.md +++ /dev/null @@ -1,83 +0,0 @@ -# Remote vLLM Distribution - -The `llamastack/distribution-remote-vllm` distribution consists of the following provider configurations. - -| **API** | **Inference** | **Agents** | **Memory** | **Safety** | **Telemetry** | -|----------------- |---------------- |---------------- |------------------------------------ |---------------- |---------------- | -| **Provider(s)** | remote::vllm | meta-reference | remote::pgvector, remote::chromadb | meta-reference | meta-reference | - -You can use this distribution if you have GPUs and want to run an independent vLLM server container for running inference. - -## Using Docker Compose - -You can use `docker compose` to start a vLLM container and Llama Stack server container together. - -> [!NOTE] -> This assumes you have access to GPU to start a vLLM server with access to your GPU. - -```bash -$ cd distributions/remote-vllm; docker compose up -``` - -You will see outputs similar to following --- -``` - -``` - -To kill the server -```bash -docker compose down -``` - -## Starting vLLM and Llama Stack separately - -You may want to start a vLLM server and connect with Llama Stack manually. There are two ways to start a vLLM server and connect with Llama Stack. - - -#### Start vLLM server. - -```bash -docker run --runtime nvidia --gpus all \ - -v ~/.cache/huggingface:/root/.cache/huggingface \ - --env "HUGGING_FACE_HUB_TOKEN=" \ - -p 8000:8000 \ - --ipc=host \ - vllm/vllm-openai:latest \ - --model meta-llama/Llama-3.1-8B-Instruct -``` - -Please check the [vLLM Documentation](https://docs.vllm.ai/en/v0.5.5/serving/deploying_with_docker.html) for more details. - - -#### Start Llama Stack server pointing to your vLLM server - - -We have provided a template `run.yaml` file in the `distributions/remote-vllm` directory. Please make sure to modify the `inference.provider_id` to point to your vLLM server endpoint. As an example, if your vLLM server is running on `http://127.0.0.1:8000`, your `run.yaml` file should look like the following: -```yaml -inference: - - provider_id: vllm0 - provider_type: remote::vllm - config: - url: http://127.0.0.1:8000 -``` - -**Via Conda** - -If you are using Conda, you can build and run the Llama Stack server with the following commands: -```bash -cd distributions/remote-vllm -llama stack build --template remote_vllm --image-type conda -llama stack run run.yaml -``` - -**Via Docker** - -You can use the Llama Stack Docker image to start the server with the following command: -```bash -docker run --network host -it -p 5000:5000 \ - -v ~/.llama:/root/.llama \ - -v ./gpu/run.yaml:/root/llamastack-run-remote-vllm.yaml \ - --gpus=all \ - llamastack/distribution-remote-vllm \ - --yaml_config /root/llamastack-run-remote-vllm.yaml -``` diff --git a/docs/source/getting_started/distributions/self_hosted_distro/tgi.md b/docs/source/getting_started/distributions/self_hosted_distro/tgi.md index 8ad9de181..63631f937 100644 --- a/docs/source/getting_started/distributions/self_hosted_distro/tgi.md +++ b/docs/source/getting_started/distributions/self_hosted_distro/tgi.md @@ -2,94 +2,125 @@ The `llamastack/distribution-tgi` distribution consists of the following provider configurations. - -| **API** | **Inference** | **Agents** | **Memory** | **Safety** | **Telemetry** | -|----------------- |--------------- |---------------- |-------------------------------------------------- |---------------- |---------------- | -| **Provider(s)** | remote::tgi | meta-reference | meta-reference, remote::pgvector, remote::chroma | meta-reference | meta-reference | +| API | Provider(s) | +|-----|-------------| +| agents | `inline::meta-reference` | +| inference | `remote::tgi` | +| memory | `inline::faiss`, `remote::chromadb`, `remote::pgvector` | +| safety | `inline::llama-guard` | +| telemetry | `inline::meta-reference` | -### Docker: Start the Distribution (Single Node GPU) +You can use this distribution if you have GPUs and want to run an independent TGI server container for running inference. -> [!NOTE] -> This assumes you have access to GPU to start a TGI server with access to your GPU. +### Environment Variables + +The following environment variables can be configured: + +- `LLAMASTACK_PORT`: Port for the Llama Stack distribution server (default: `5001`) +- `INFERENCE_MODEL`: Inference model loaded into the TGI server (default: `meta-llama/Llama-3.2-3B-Instruct`) +- `TGI_URL`: URL of the TGI server with the main inference model (default: `http://127.0.0.1:8080}/v1`) +- `TGI_SAFETY_URL`: URL of the TGI server with the safety model (default: `http://127.0.0.1:8081/v1`) +- `SAFETY_MODEL`: Name of the safety (Llama-Guard) model to use (default: `meta-llama/Llama-Guard-3-1B`) -``` -$ cd distributions/tgi && docker compose up +## Setting up TGI server + +Please check the [TGI Getting Started Guide](https://github.com/huggingface/text-generation-inference?tab=readme-ov-file#get-started) to get a TGI endpoint. Here is a sample script to start a TGI server locally via Docker: + +```bash +export INFERENCE_PORT=8080 +export INFERENCE_MODEL=meta-llama/Llama-3.2-3B-Instruct +export CUDA_VISIBLE_DEVICES=0 + +docker run --rm -it \ + -v $HOME/.cache/huggingface:/data \ + -p $INFERENCE_PORT:$INFERENCE_PORT \ + --gpus $CUDA_VISIBLE_DEVICES \ + ghcr.io/huggingface/text-generation-inference:2.3.1 \ + --dtype bfloat16 \ + --usage-stats off \ + --sharded false \ + --cuda-memory-fraction 0.7 \ + --model-id $INFERENCE_MODEL \ + --port $INFERENCE_PORT ``` -The script will first start up TGI server, then start up Llama Stack distribution server hooking up to the remote TGI provider for inference. You should be able to see the following outputs -- -``` -[text-generation-inference] | 2024-10-15T18:56:33.810397Z INFO text_generation_router::server: router/src/server.rs:1813: Using config Some(Llama) -[text-generation-inference] | 2024-10-15T18:56:33.810448Z WARN text_generation_router::server: router/src/server.rs:1960: Invalid hostname, defaulting to 0.0.0.0 -[text-generation-inference] | 2024-10-15T18:56:33.864143Z INFO text_generation_router::server: router/src/server.rs:2353: Connected -INFO: Started server process [1] -INFO: Waiting for application startup. -INFO: Application startup complete. -INFO: Uvicorn running on http://[::]:5000 (Press CTRL+C to quit) +If you are using Llama Stack Safety / Shield APIs, then you will need to also run another instance of a TGI with a corresponding safety model like `meta-llama/Llama-Guard-3-1B` using a script like: + +```bash +export SAFETY_PORT=8081 +export SAFETY_MODEL=meta-llama/Llama-Guard-3-1B +export CUDA_VISIBLE_DEVICES=1 + +docker run --rm -it \ + -v $HOME/.cache/huggingface:/data \ + -p $SAFETY_PORT:$SAFETY_PORT \ + --gpus $CUDA_VISIBLE_DEVICES \ + ghcr.io/huggingface/text-generation-inference:2.3.1 \ + --dtype bfloat16 \ + --usage-stats off \ + --sharded false \ + --model-id $SAFETY_MODEL \ + --port $SAFETY_PORT ``` -To kill the server -``` -docker compose down +## Running Llama Stack + +Now you are ready to run Llama Stack with TGI as the inference provider. You can do this via Conda (build code) or Docker which has a pre-built image. + +### Via Docker + +This method allows you to get started quickly without having to build the distribution code. + +```bash +LLAMA_STACK_PORT=5001 +docker run \ + -it \ + -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ + -v ./run.yaml:/root/my-run.yaml \ + llamastack/distribution-tgi \ + --yaml-config /root/my-run.yaml \ + --port $LLAMA_STACK_PORT \ + --env INFERENCE_MODEL=$INFERENCE_MODEL \ + --env TGI_URL=http://host.docker.internal:$INFERENCE_PORT ``` +If you are using Llama Stack Safety / Shield APIs, use: -### Conda: TGI server + llama stack run - -If you wish to separately spin up a TGI server, and connect with Llama Stack, you may use the following commands. - -#### Start TGI server locally -- Please check the [TGI Getting Started Guide](https://github.com/huggingface/text-generation-inference?tab=readme-ov-file#get-started) to get a TGI endpoint. - -``` -docker run --rm -it -v $HOME/.cache/huggingface:/data -p 5009:5009 --gpus all ghcr.io/huggingface/text-generation-inference:latest --dtype bfloat16 --usage-stats on --sharded false --model-id meta-llama/Llama-3.1-8B-Instruct --port 5009 +```bash +docker run \ + -it \ + -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ + -v ./run-with-safety.yaml:/root/my-run.yaml \ + llamastack/distribution-tgi \ + --yaml-config /root/my-run.yaml \ + --port $LLAMA_STACK_PORT \ + --env INFERENCE_MODEL=$INFERENCE_MODEL \ + --env TGI_URL=http://host.docker.internal:$INFERENCE_PORT \ + --env SAFETY_MODEL=$SAFETY_MODEL \ + --env TGI_SAFETY_URL=http://host.docker.internal:$SAFETY_PORT ``` -#### Start Llama Stack server pointing to TGI server +### Via Conda -**Via Conda** +Make sure you have done `pip install llama-stack` and have the Llama Stack CLI available. ```bash llama stack build --template tgi --image-type conda -# -- start a TGI server endpoint -llama stack run ./gpu/run.yaml +llama stack run ./run.yaml + --port 5001 + --env INFERENCE_MODEL=$INFERENCE_MODEL + --env TGI_URL=http://127.0.0.1:$INFERENCE_PORT ``` -**Via Docker** -``` -docker run --network host -it -p 5000:5000 -v ./run.yaml:/root/my-run.yaml --gpus=all llamastack/distribution-tgi --yaml_config /root/my-run.yaml -``` +If you are using Llama Stack Safety / Shield APIs, use: -Make sure in you `run.yaml` file, you inference provider is pointing to the correct TGI server endpoint. E.g. -``` -inference: - - provider_id: tgi0 - provider_type: remote::tgi - config: - url: http://127.0.0.1:5009 -``` - - -### (Optional) Update Model Serving Configuration -To serve a new model with `tgi`, change the docker command flag `--model-id `. - -This can be done by edit the `command` args in `compose.yaml`. E.g. Replace "Llama-3.2-1B-Instruct" with the model you want to serve. - -``` -command: ["--dtype", "bfloat16", "--usage-stats", "on", "--sharded", "false", "--model-id", "meta-llama/Llama-3.2-1B-Instruct", "--port", "5009", "--cuda-memory-fraction", "0.3"] -``` - -or by changing the docker run command's `--model-id` flag -``` -docker run --rm -it -v $HOME/.cache/huggingface:/data -p 5009:5009 --gpus all ghcr.io/huggingface/text-generation-inference:latest --dtype bfloat16 --usage-stats on --sharded false --model-id meta-llama/Llama-3.2-1B-Instruct --port 5009 -``` - -In `run.yaml`, make sure you point the correct server endpoint to the TGI server endpoint serving your model. -``` -inference: - - provider_id: tgi0 - provider_type: remote::tgi - config: - url: http://127.0.0.1:5009 +```bash +llama stack run ./run-with-safety.yaml + --port 5001 + --env INFERENCE_MODEL=$INFERENCE_MODEL + --env TGI_URL=http://127.0.0.1:$INFERENCE_PORT + --env SAFETY_MODEL=$SAFETY_MODEL + --env TGI_SAFETY_URL=http://127.0.0.1:$SAFETY_PORT ``` diff --git a/docs/source/getting_started/distributions/self_hosted_distro/together.md b/docs/source/getting_started/distributions/self_hosted_distro/together.md index b9ea9f6e6..5d79fcf0c 100644 --- a/docs/source/getting_started/distributions/self_hosted_distro/together.md +++ b/docs/source/getting_started/distributions/self_hosted_distro/together.md @@ -1,62 +1,67 @@ -# Together Distribution - -### Connect to a Llama Stack Together Endpoint -- You may connect to a hosted endpoint `https://llama-stack.together.ai`, serving a Llama Stack distribution +# Fireworks Distribution The `llamastack/distribution-together` distribution consists of the following provider configurations. - -| **API** | **Inference** | **Agents** | **Memory** | **Safety** | **Telemetry** | -|----------------- |--------------- |---------------- |-------------------------------------------------- |---------------- |---------------- | -| **Provider(s)** | remote::together | meta-reference | meta-reference, remote::weaviate | meta-reference | meta-reference | +| API | Provider(s) | +|-----|-------------| +| agents | `inline::meta-reference` | +| inference | `remote::together` | +| memory | `inline::faiss`, `remote::chromadb`, `remote::pgvector` | +| safety | `inline::llama-guard` | +| telemetry | `inline::meta-reference` | -### Docker: Start the Distribution (Single Node CPU) +### Environment Variables -> [!NOTE] -> This assumes you have an hosted endpoint at Together with API Key. +The following environment variables can be configured: -``` -$ cd distributions/together && docker compose up +- `LLAMASTACK_PORT`: Port for the Llama Stack distribution server (default: `5001`) +- `TOGETHER_API_KEY`: Together.AI API Key (default: ``) + +### Models + +The following models are available by default: + +- `meta-llama/Llama-3.1-8B-Instruct` +- `meta-llama/Llama-3.1-70B-Instruct` +- `meta-llama/Llama-3.1-405B-Instruct-FP8` +- `meta-llama/Llama-3.2-3B-Instruct` +- `meta-llama/Llama-3.2-11B-Vision-Instruct` +- `meta-llama/Llama-3.2-90B-Vision-Instruct` +- `meta-llama/Llama-Guard-3-8B` +- `meta-llama/Llama-Guard-3-11B-Vision` + + +### Prerequisite: API Keys + +Make sure you have access to a Together API Key. You can get one by visiting [together.xyz](https://together.xyz/). + + +## Running Llama Stack with Together + +You can do this via Conda (build code) or Docker which has a pre-built image. + +### Via Docker + +This method allows you to get started quickly without having to build the distribution code. + +```bash +LLAMA_STACK_PORT=5001 +docker run \ + -it \ + -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ + -v ./run.yaml:/root/my-run.yaml \ + llamastack/distribution-together \ + --yaml-config /root/my-run.yaml \ + --port $LLAMA_STACK_PORT \ + --env TOGETHER_API_KEY=$TOGETHER_API_KEY ``` -Make sure in your `run.yaml` file, your inference provider is pointing to the correct Together URL server endpoint. E.g. -``` -inference: - - provider_id: together - provider_type: remote::together - config: - url: https://api.together.xyz/v1 - api_key: -``` - -### Conda llama stack run (Single Node CPU) +### Via Conda ```bash llama stack build --template together --image-type conda -# -- modify run.yaml to a valid Together server endpoint -llama stack run ./run.yaml -``` - -### (Optional) Update Model Serving Configuration - -Use `llama-stack-client models list` to check the available models served by together. - -``` -$ llama-stack-client models list -+------------------------------+------------------------------+---------------+------------+ -| identifier | llama_model | provider_id | metadata | -+==============================+==============================+===============+============+ -| Llama3.1-8B-Instruct | Llama3.1-8B-Instruct | together0 | {} | -+------------------------------+------------------------------+---------------+------------+ -| Llama3.1-70B-Instruct | Llama3.1-70B-Instruct | together0 | {} | -+------------------------------+------------------------------+---------------+------------+ -| Llama3.1-405B-Instruct | Llama3.1-405B-Instruct | together0 | {} | -+------------------------------+------------------------------+---------------+------------+ -| Llama3.2-3B-Instruct | Llama3.2-3B-Instruct | together0 | {} | -+------------------------------+------------------------------+---------------+------------+ -| Llama3.2-11B-Vision-Instruct | Llama3.2-11B-Vision-Instruct | together0 | {} | -+------------------------------+------------------------------+---------------+------------+ -| Llama3.2-90B-Vision-Instruct | Llama3.2-90B-Vision-Instruct | together0 | {} | -+------------------------------+------------------------------+---------------+------------+ +llama stack run ./run.yaml \ + --port 5001 \ + --env TOGETHER_API_KEY=$TOGETHER_API_KEY ``` diff --git a/docs/source/index.md b/docs/source/index.md index c5f339f21..a53952be7 100644 --- a/docs/source/index.md +++ b/docs/source/index.md @@ -74,7 +74,7 @@ A Distribution is where APIs and Providers are assembled together to provide a c | Python | [llama-stack-client-python](https://github.com/meta-llama/llama-stack-client-python) | [![PyPI version](https://img.shields.io/pypi/v/llama_stack_client.svg)](https://pypi.org/project/llama_stack_client/) | Swift | [llama-stack-client-swift](https://github.com/meta-llama/llama-stack-client-swift) | [![Swift Package Index](https://img.shields.io/endpoint?url=https%3A%2F%2Fswiftpackageindex.com%2Fapi%2Fpackages%2Fmeta-llama%2Fllama-stack-client-swift%2Fbadge%3Ftype%3Dswift-versions)](https://swiftpackageindex.com/meta-llama/llama-stack-client-swift) | Node | [llama-stack-client-node](https://github.com/meta-llama/llama-stack-client-node) | [![NPM version](https://img.shields.io/npm/v/llama-stack-client.svg)](https://npmjs.org/package/llama-stack-client) -| Kotlin | [llama-stack-client-kotlin](https://github.com/meta-llama/llama-stack-client-kotlin) | +| Kotlin | [llama-stack-client-kotlin](https://github.com/meta-llama/llama-stack-client-kotlin) | [![Maven version](https://img.shields.io/maven-central/v/com.llama.llamastack/llama-stack-client-kotlin)](https://central.sonatype.com/artifact/com.llama.llamastack/llama-stack-client-kotlin) Check out our client SDKs for connecting to Llama Stack server in your preferred language, you can choose from [python](https://github.com/meta-llama/llama-stack-client-python), [node](https://github.com/meta-llama/llama-stack-client-node), [swift](https://github.com/meta-llama/llama-stack-client-swift), and [kotlin](https://github.com/meta-llama/llama-stack-client-kotlin) programming languages to quickly build your applications. diff --git a/llama_stack/apis/batch_inference/batch_inference.py b/llama_stack/apis/batch_inference/batch_inference.py index 45a1a1593..4e15b28a6 100644 --- a/llama_stack/apis/batch_inference/batch_inference.py +++ b/llama_stack/apis/batch_inference/batch_inference.py @@ -49,7 +49,7 @@ class BatchChatCompletionResponse(BaseModel): @runtime_checkable class BatchInference(Protocol): - @webmethod(route="/batch_inference/completion") + @webmethod(route="/batch-inference/completion") async def batch_completion( self, model: str, @@ -58,7 +58,7 @@ class BatchInference(Protocol): logprobs: Optional[LogProbConfig] = None, ) -> BatchCompletionResponse: ... - @webmethod(route="/batch_inference/chat_completion") + @webmethod(route="/batch-inference/chat-completion") async def batch_chat_completion( self, model: str, diff --git a/llama_stack/apis/datasetio/datasetio.py b/llama_stack/apis/datasetio/datasetio.py index 49a07c9b1..c5052877a 100644 --- a/llama_stack/apis/datasetio/datasetio.py +++ b/llama_stack/apis/datasetio/datasetio.py @@ -29,7 +29,7 @@ class DatasetIO(Protocol): # keeping for aligning with inference/safety, but this is not used dataset_store: DatasetStore - @webmethod(route="/datasetio/get_rows_paginated", method="GET") + @webmethod(route="/datasetio/get-rows-paginated", method="GET") async def get_rows_paginated( self, dataset_id: str, diff --git a/llama_stack/apis/eval/eval.py b/llama_stack/apis/eval/eval.py index 04a5a55d5..e52d4dab6 100644 --- a/llama_stack/apis/eval/eval.py +++ b/llama_stack/apis/eval/eval.py @@ -74,14 +74,14 @@ class EvaluateResponse(BaseModel): class Eval(Protocol): - @webmethod(route="/eval/run_eval", method="POST") + @webmethod(route="/eval/run-eval", method="POST") async def run_eval( self, task_id: str, task_config: EvalTaskConfig, ) -> Job: ... - @webmethod(route="/eval/evaluate_rows", method="POST") + @webmethod(route="/eval/evaluate-rows", method="POST") async def evaluate_rows( self, task_id: str, diff --git a/llama_stack/apis/eval_tasks/eval_tasks.py b/llama_stack/apis/eval_tasks/eval_tasks.py index 940dafc06..083681289 100644 --- a/llama_stack/apis/eval_tasks/eval_tasks.py +++ b/llama_stack/apis/eval_tasks/eval_tasks.py @@ -42,13 +42,13 @@ class EvalTaskInput(CommonEvalTaskFields, BaseModel): @runtime_checkable class EvalTasks(Protocol): - @webmethod(route="/eval_tasks/list", method="GET") + @webmethod(route="/eval-tasks/list", method="GET") async def list_eval_tasks(self) -> List[EvalTask]: ... - @webmethod(route="/eval_tasks/get", method="GET") + @webmethod(route="/eval-tasks/get", method="GET") async def get_eval_task(self, name: str) -> Optional[EvalTask]: ... - @webmethod(route="/eval_tasks/register", method="POST") + @webmethod(route="/eval-tasks/register", method="POST") async def register_eval_task( self, eval_task_id: str, diff --git a/llama_stack/apis/inference/inference.py b/llama_stack/apis/inference/inference.py index b2681e578..5aadd97c7 100644 --- a/llama_stack/apis/inference/inference.py +++ b/llama_stack/apis/inference/inference.py @@ -234,7 +234,7 @@ class Inference(Protocol): logprobs: Optional[LogProbConfig] = None, ) -> Union[CompletionResponse, AsyncIterator[CompletionResponseStreamChunk]]: ... - @webmethod(route="/inference/chat_completion") + @webmethod(route="/inference/chat-completion") async def chat_completion( self, model_id: str, diff --git a/llama_stack/apis/memory_banks/memory_banks.py b/llama_stack/apis/memory_banks/memory_banks.py index c1abcb789..1b16af330 100644 --- a/llama_stack/apis/memory_banks/memory_banks.py +++ b/llama_stack/apis/memory_banks/memory_banks.py @@ -130,13 +130,13 @@ class MemoryBankInput(BaseModel): @runtime_checkable class MemoryBanks(Protocol): - @webmethod(route="/memory_banks/list", method="GET") + @webmethod(route="/memory-banks/list", method="GET") async def list_memory_banks(self) -> List[MemoryBank]: ... - @webmethod(route="/memory_banks/get", method="GET") + @webmethod(route="/memory-banks/get", method="GET") async def get_memory_bank(self, memory_bank_id: str) -> Optional[MemoryBank]: ... - @webmethod(route="/memory_banks/register", method="POST") + @webmethod(route="/memory-banks/register", method="POST") async def register_memory_bank( self, memory_bank_id: str, @@ -145,5 +145,5 @@ class MemoryBanks(Protocol): provider_memory_bank_id: Optional[str] = None, ) -> MemoryBank: ... - @webmethod(route="/memory_banks/unregister", method="POST") + @webmethod(route="/memory-banks/unregister", method="POST") async def unregister_memory_bank(self, memory_bank_id: str) -> None: ... diff --git a/llama_stack/apis/models/models.py b/llama_stack/apis/models/models.py index aabe78d85..cbd6265e2 100644 --- a/llama_stack/apis/models/models.py +++ b/llama_stack/apis/models/models.py @@ -31,6 +31,8 @@ class Model(CommonModelFields, Resource): def provider_model_id(self) -> str: return self.provider_resource_id + model_config = ConfigDict(protected_namespaces=()) + class ModelInput(CommonModelFields): model_id: str diff --git a/llama_stack/apis/post_training/post_training.py b/llama_stack/apis/post_training/post_training.py index eb4992cc6..2999d43af 100644 --- a/llama_stack/apis/post_training/post_training.py +++ b/llama_stack/apis/post_training/post_training.py @@ -176,7 +176,7 @@ class PostTrainingJobArtifactsResponse(BaseModel): class PostTraining(Protocol): - @webmethod(route="/post_training/supervised_fine_tune") + @webmethod(route="/post-training/supervised-fine-tune") def supervised_fine_tune( self, job_uuid: str, @@ -193,7 +193,7 @@ class PostTraining(Protocol): logger_config: Dict[str, Any], ) -> PostTrainingJob: ... - @webmethod(route="/post_training/preference_optimize") + @webmethod(route="/post-training/preference-optimize") def preference_optimize( self, job_uuid: str, @@ -208,22 +208,22 @@ class PostTraining(Protocol): logger_config: Dict[str, Any], ) -> PostTrainingJob: ... - @webmethod(route="/post_training/jobs") + @webmethod(route="/post-training/jobs") def get_training_jobs(self) -> List[PostTrainingJob]: ... # sends SSE stream of logs - @webmethod(route="/post_training/job/logs") + @webmethod(route="/post-training/job/logs") def get_training_job_logstream(self, job_uuid: str) -> PostTrainingJobLogStream: ... - @webmethod(route="/post_training/job/status") + @webmethod(route="/post-training/job/status") def get_training_job_status( self, job_uuid: str ) -> PostTrainingJobStatusResponse: ... - @webmethod(route="/post_training/job/cancel") + @webmethod(route="/post-training/job/cancel") def cancel_training_job(self, job_uuid: str) -> None: ... - @webmethod(route="/post_training/job/artifacts") + @webmethod(route="/post-training/job/artifacts") def get_training_job_artifacts( self, job_uuid: str ) -> PostTrainingJobArtifactsResponse: ... diff --git a/llama_stack/apis/safety/safety.py b/llama_stack/apis/safety/safety.py index d4dfd5986..724f8dc96 100644 --- a/llama_stack/apis/safety/safety.py +++ b/llama_stack/apis/safety/safety.py @@ -46,7 +46,7 @@ class ShieldStore(Protocol): class Safety(Protocol): shield_store: ShieldStore - @webmethod(route="/safety/run_shield") + @webmethod(route="/safety/run-shield") async def run_shield( self, shield_id: str, diff --git a/llama_stack/apis/scoring/scoring.py b/llama_stack/apis/scoring/scoring.py index 2c643a28e..a47620a3d 100644 --- a/llama_stack/apis/scoring/scoring.py +++ b/llama_stack/apis/scoring/scoring.py @@ -44,7 +44,7 @@ class ScoringFunctionStore(Protocol): class Scoring(Protocol): scoring_function_store: ScoringFunctionStore - @webmethod(route="/scoring/score_batch") + @webmethod(route="/scoring/score-batch") async def score_batch( self, dataset_id: str, diff --git a/llama_stack/apis/scoring_functions/scoring_functions.py b/llama_stack/apis/scoring_functions/scoring_functions.py index 251a683c1..4dce5a46d 100644 --- a/llama_stack/apis/scoring_functions/scoring_functions.py +++ b/llama_stack/apis/scoring_functions/scoring_functions.py @@ -104,13 +104,13 @@ class ScoringFnInput(CommonScoringFnFields, BaseModel): @runtime_checkable class ScoringFunctions(Protocol): - @webmethod(route="/scoring_functions/list", method="GET") + @webmethod(route="/scoring-functions/list", method="GET") async def list_scoring_functions(self) -> List[ScoringFn]: ... - @webmethod(route="/scoring_functions/get", method="GET") + @webmethod(route="/scoring-functions/get", method="GET") async def get_scoring_function(self, scoring_fn_id: str) -> Optional[ScoringFn]: ... - @webmethod(route="/scoring_functions/register", method="POST") + @webmethod(route="/scoring-functions/register", method="POST") async def register_scoring_function( self, scoring_fn_id: str, diff --git a/llama_stack/apis/synthetic_data_generation/synthetic_data_generation.py b/llama_stack/apis/synthetic_data_generation/synthetic_data_generation.py index 05b49036d..717a0ec2f 100644 --- a/llama_stack/apis/synthetic_data_generation/synthetic_data_generation.py +++ b/llama_stack/apis/synthetic_data_generation/synthetic_data_generation.py @@ -44,7 +44,7 @@ class SyntheticDataGenerationResponse(BaseModel): class SyntheticDataGeneration(Protocol): - @webmethod(route="/synthetic_data_generation/generate") + @webmethod(route="/synthetic-data-generation/generate") def synthetic_data_generate( self, dialogs: List[Message], diff --git a/llama_stack/apis/telemetry/telemetry.py b/llama_stack/apis/telemetry/telemetry.py index 8374192f2..31f64733b 100644 --- a/llama_stack/apis/telemetry/telemetry.py +++ b/llama_stack/apis/telemetry/telemetry.py @@ -125,8 +125,8 @@ Event = Annotated[ @runtime_checkable class Telemetry(Protocol): - @webmethod(route="/telemetry/log_event") + @webmethod(route="/telemetry/log-event") async def log_event(self, event: Event) -> None: ... - @webmethod(route="/telemetry/get_trace", method="GET") + @webmethod(route="/telemetry/get-trace", method="GET") async def get_trace(self, trace_id: str) -> Trace: ... diff --git a/llama_stack/apis/version.py b/llama_stack/apis/version.py new file mode 100644 index 000000000..f178712ba --- /dev/null +++ b/llama_stack/apis/version.py @@ -0,0 +1,7 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +LLAMA_STACK_API_VERSION = "alpha" diff --git a/llama_stack/cli/download.py b/llama_stack/cli/download.py index 07b40bd21..bb57186e5 100644 --- a/llama_stack/cli/download.py +++ b/llama_stack/cli/download.py @@ -19,7 +19,7 @@ import httpx from llama_models.datatypes import Model from llama_models.sku_list import LlamaDownloadInfo -from pydantic import BaseModel +from pydantic import BaseModel, ConfigDict from rich.console import Console from rich.progress import ( @@ -293,8 +293,8 @@ class ParallelDownloader: if free_space < required_space: self.console.print( - f"[red]Not enough disk space. Required: {required_space // (1024*1024)} MB, " - f"Available: {free_space // (1024*1024)} MB[/red]" + f"[red]Not enough disk space. Required: {required_space // (1024 * 1024)} MB, " + f"Available: {free_space // (1024 * 1024)} MB[/red]" ) return False return True @@ -413,8 +413,7 @@ class ModelEntry(BaseModel): model_id: str files: Dict[str, str] - class Config: - protected_namespaces = () + model_config = ConfigDict(protected_namespaces=()) class Manifest(BaseModel): diff --git a/llama_stack/cli/stack/build.py b/llama_stack/cli/stack/build.py index 94d41cfab..e9760c9cb 100644 --- a/llama_stack/cli/stack/build.py +++ b/llama_stack/cli/stack/build.py @@ -8,10 +8,14 @@ import argparse from llama_stack.cli.subcommand import Subcommand from llama_stack.distribution.datatypes import * # noqa: F403 +import importlib import os +import shutil from functools import lru_cache from pathlib import Path +import pkg_resources + from llama_stack.distribution.distribution import get_provider_registry from llama_stack.distribution.utils.dynamic import instantiate_class_type @@ -99,7 +103,9 @@ class StackBuild(Subcommand): self.parser.error( f"Please specify a image-type (docker | conda) for {args.template}" ) - self._run_stack_build_command_from_build_config(build_config) + self._run_stack_build_command_from_build_config( + build_config, template_name=args.template + ) return self.parser.error( @@ -193,7 +199,6 @@ class StackBuild(Subcommand): apis = list(build_config.distribution_spec.providers.keys()) run_config = StackRunConfig( - built_at=datetime.now(), docker_image=( build_config.name if build_config.image_type == ImageType.docker.value @@ -217,15 +222,23 @@ class StackBuild(Subcommand): provider_types = [provider_types] for i, provider_type in enumerate(provider_types): - p_spec = Provider( - provider_id=f"{provider_type}-{i}", - provider_type=provider_type, - config={}, - ) + pid = provider_type.split("::")[-1] + config_type = instantiate_class_type( provider_registry[Api(api)][provider_type].config_class ) - p_spec.config = config_type() + if hasattr(config_type, "sample_run_config"): + config = config_type.sample_run_config( + __distro_dir__=f"distributions/{build_config.name}" + ) + else: + config = {} + + p_spec = Provider( + provider_id=f"{pid}-{i}" if len(provider_types) > 1 else pid, + provider_type=provider_type, + config=config, + ) run_config.providers[api].append(p_spec) os.makedirs(build_dir, exist_ok=True) @@ -241,12 +254,13 @@ class StackBuild(Subcommand): ) def _run_stack_build_command_from_build_config( - self, build_config: BuildConfig + self, build_config: BuildConfig, template_name: Optional[str] = None ) -> None: import json import os import yaml + from termcolor import cprint from llama_stack.distribution.build import build_image from llama_stack.distribution.utils.config_dirs import DISTRIBS_BASE_DIR @@ -264,7 +278,29 @@ class StackBuild(Subcommand): if return_code != 0: return - self._generate_run_config(build_config, build_dir) + if template_name: + # copy run.yaml from template to build_dir instead of generating it again + template_path = pkg_resources.resource_filename( + "llama_stack", f"templates/{template_name}/run.yaml" + ) + os.makedirs(build_dir, exist_ok=True) + run_config_file = build_dir / f"{build_config.name}-run.yaml" + shutil.copy(template_path, run_config_file) + module_name = f"llama_stack.templates.{template_name}" + module = importlib.import_module(module_name) + distribution_template = module.get_distribution_template() + cprint("Build Successful! Next steps: ", color="green") + env_vars = ", ".join(distribution_template.run_config_env_vars.keys()) + cprint( + f" 1. Set the environment variables: {env_vars}", + color="green", + ) + cprint( + f" 2. `llama stack run {run_config_file}`", + color="green", + ) + else: + self._generate_run_config(build_config, build_dir) def _run_template_list_cmd(self, args: argparse.Namespace) -> None: import json diff --git a/llama_stack/cli/stack/run.py b/llama_stack/cli/stack/run.py index 5fce8c92c..c3ea174da 100644 --- a/llama_stack/cli/stack/run.py +++ b/llama_stack/cli/stack/run.py @@ -39,6 +39,13 @@ class StackRun(Subcommand): help="Disable IPv6 support", default=False, ) + self.parser.add_argument( + "--env", + action="append", + help="Environment variables to pass to the server in KEY=VALUE format. Can be specified multiple times.", + default=[], + metavar="KEY=VALUE", + ) def _run_stack_run_cmd(self, args: argparse.Namespace) -> None: from pathlib import Path @@ -108,4 +115,16 @@ class StackRun(Subcommand): if args.disable_ipv6: run_args.append("--disable-ipv6") + for env_var in args.env: + if "=" not in env_var: + self.parser.error( + f"Environment variable '{env_var}' must be in KEY=VALUE format" + ) + return + key, value = env_var.split("=", 1) # split on first = only + if not key: + self.parser.error(f"Environment variable '{env_var}' has empty key") + return + run_args.extend(["--env", f"{key}={value}"]) + run_with_pty(run_args) diff --git a/llama_stack/distribution/build_container.sh b/llama_stack/distribution/build_container.sh index 0764fee62..139883618 100755 --- a/llama_stack/distribution/build_container.sh +++ b/llama_stack/distribution/build_container.sh @@ -146,6 +146,8 @@ fi # Set version tag based on PyPI version if [ -n "$TEST_PYPI_VERSION" ]; then version_tag="test-$TEST_PYPI_VERSION" +elif [[ -n "$LLAMA_STACK_DIR" || -n "$LLAMA_MODELS_DIR" ]]; then + version_tag="dev" else URL="https://pypi.org/pypi/llama-stack/json" version_tag=$(curl -s $URL | jq -r '.info.version') diff --git a/llama_stack/distribution/configure.py b/llama_stack/distribution/configure.py index f91fbfc43..09e277dad 100644 --- a/llama_stack/distribution/configure.py +++ b/llama_stack/distribution/configure.py @@ -186,6 +186,5 @@ def parse_and_maybe_upgrade_config(config_dict: Dict[str, Any]) -> StackRunConfi config_dict = upgrade_from_routing_table(config_dict) config_dict["version"] = LLAMA_STACK_RUN_CONFIG_VERSION - config_dict["built_at"] = datetime.now().isoformat() return StackRunConfig(**config_dict) diff --git a/llama_stack/distribution/datatypes.py b/llama_stack/distribution/datatypes.py index 4aaf9c38a..c2bff4eed 100644 --- a/llama_stack/distribution/datatypes.py +++ b/llama_stack/distribution/datatypes.py @@ -4,8 +4,6 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. -from datetime import datetime - from typing import Dict, List, Optional, Union from pydantic import BaseModel, Field @@ -115,7 +113,6 @@ class Provider(BaseModel): class StackRunConfig(BaseModel): version: str = LLAMA_STACK_RUN_CONFIG_VERSION - built_at: datetime image_name: str = Field( ..., diff --git a/llama_stack/distribution/server/server.py b/llama_stack/distribution/server/server.py index 0cfd11eda..fecc41b5d 100644 --- a/llama_stack/distribution/server/server.py +++ b/llama_stack/distribution/server/server.py @@ -4,12 +4,12 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. +import argparse import asyncio import functools import inspect import json import os -import re import signal import sys import traceback @@ -19,7 +19,6 @@ from contextlib import asynccontextmanager from ssl import SSLError from typing import Any, Dict, Optional -import fire import httpx import yaml @@ -41,7 +40,11 @@ from llama_stack.providers.utils.telemetry.tracing import ( from llama_stack.distribution.datatypes import * # noqa: F403 from llama_stack.distribution.request_headers import set_request_provider_data from llama_stack.distribution.resolver import InvalidProviderError -from llama_stack.distribution.stack import construct_stack +from llama_stack.distribution.stack import ( + construct_stack, + replace_env_vars, + validate_env_pair, +) from .endpoints import get_all_api_endpoints @@ -271,64 +274,36 @@ def create_dynamic_typed_route(func: Any, method: str): return endpoint -class EnvVarError(Exception): - def __init__(self, var_name: str, path: str = ""): - self.var_name = var_name - self.path = path - super().__init__( - f"Environment variable '{var_name}' not set or empty{f' at {path}' if path else ''}" - ) +def main(): + """Start the LlamaStack server.""" + parser = argparse.ArgumentParser(description="Start the LlamaStack server.") + parser.add_argument( + "--yaml-config", + default="llamastack-run.yaml", + help="Path to YAML configuration file", + ) + parser.add_argument("--port", type=int, default=5000, help="Port to listen on") + parser.add_argument( + "--disable-ipv6", action="store_true", help="Whether to disable IPv6 support" + ) + parser.add_argument( + "--env", + action="append", + help="Environment variables in KEY=value format. Can be specified multiple times.", + ) - -def replace_env_vars(config: Any, path: str = "") -> Any: - if isinstance(config, dict): - result = {} - for k, v in config.items(): + args = parser.parse_args() + if args.env: + for env_pair in args.env: try: - result[k] = replace_env_vars(v, f"{path}.{k}" if path else k) - except EnvVarError as e: - raise EnvVarError(e.var_name, e.path) from None - return result + key, value = validate_env_pair(env_pair) + print(f"Setting CLI environment variable {key} => {value}") + os.environ[key] = value + except ValueError as e: + print(f"Error: {str(e)}") + sys.exit(1) - elif isinstance(config, list): - result = [] - for i, v in enumerate(config): - try: - result.append(replace_env_vars(v, f"{path}[{i}]")) - except EnvVarError as e: - raise EnvVarError(e.var_name, e.path) from None - return result - - elif isinstance(config, str): - pattern = r"\${env\.([A-Z0-9_]+)(?::([^}]*))?}" - - def get_env_var(match): - env_var = match.group(1) - default_val = match.group(2) - - value = os.environ.get(env_var) - if not value: - if default_val is None: - raise EnvVarError(env_var, path) - else: - value = default_val - - return value - - try: - return re.sub(pattern, get_env_var, config) - except EnvVarError as e: - raise EnvVarError(e.var_name, e.path) from None - - return config - - -def main( - yaml_config: str = "llamastack-run.yaml", - port: int = 5000, - disable_ipv6: bool = False, -): - with open(yaml_config, "r") as fp: + with open(args.yaml_config, "r") as fp: config = replace_env_vars(yaml.safe_load(fp)) config = StackRunConfig(**config) @@ -395,10 +370,10 @@ def main( # FYI this does not do hot-reloads - listen_host = ["::", "0.0.0.0"] if not disable_ipv6 else "0.0.0.0" - print(f"Listening on {listen_host}:{port}") - uvicorn.run(app, host=listen_host, port=port) + listen_host = ["::", "0.0.0.0"] if not args.disable_ipv6 else "0.0.0.0" + print(f"Listening on {listen_host}:{args.port}") + uvicorn.run(app, host=listen_host, port=args.port) if __name__ == "__main__": - fire.Fire(main) + main() diff --git a/llama_stack/distribution/stack.py b/llama_stack/distribution/stack.py index 1cffd7749..9bd058400 100644 --- a/llama_stack/distribution/stack.py +++ b/llama_stack/distribution/stack.py @@ -4,8 +4,13 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. +import os +from pathlib import Path from typing import Any, Dict +import pkg_resources +import yaml + from termcolor import colored from llama_models.llama3.api.datatypes import * # noqa: F403 @@ -35,6 +40,9 @@ from llama_stack.distribution.store.registry import create_dist_registry from llama_stack.providers.datatypes import Api +LLAMA_STACK_API_VERSION = "alpha" + + class LlamaStack( MemoryBanks, Inference, @@ -92,6 +100,77 @@ async def register_resources(run_config: StackRunConfig, impls: Dict[Api, Any]): print("") +class EnvVarError(Exception): + def __init__(self, var_name: str, path: str = ""): + self.var_name = var_name + self.path = path + super().__init__( + f"Environment variable '{var_name}' not set or empty{f' at {path}' if path else ''}" + ) + + +def replace_env_vars(config: Any, path: str = "") -> Any: + if isinstance(config, dict): + result = {} + for k, v in config.items(): + try: + result[k] = replace_env_vars(v, f"{path}.{k}" if path else k) + except EnvVarError as e: + raise EnvVarError(e.var_name, e.path) from None + return result + + elif isinstance(config, list): + result = [] + for i, v in enumerate(config): + try: + result.append(replace_env_vars(v, f"{path}[{i}]")) + except EnvVarError as e: + raise EnvVarError(e.var_name, e.path) from None + return result + + elif isinstance(config, str): + pattern = r"\${env\.([A-Z0-9_]+)(?::([^}]*))?}" + + def get_env_var(match): + env_var = match.group(1) + default_val = match.group(2) + + value = os.environ.get(env_var) + if not value: + if default_val is None: + raise EnvVarError(env_var, path) + else: + value = default_val + + # expand "~" from the values + return os.path.expanduser(value) + + try: + return re.sub(pattern, get_env_var, config) + except EnvVarError as e: + raise EnvVarError(e.var_name, e.path) from None + + return config + + +def validate_env_pair(env_pair: str) -> tuple[str, str]: + """Validate and split an environment variable key-value pair.""" + try: + key, value = env_pair.split("=", 1) + key = key.strip() + if not key: + raise ValueError(f"Empty key in environment variable pair: {env_pair}") + if not all(c.isalnum() or c == "_" for c in key): + raise ValueError( + f"Key must contain only alphanumeric characters and underscores: {key}" + ) + return key, value + except ValueError as e: + raise ValueError( + f"Invalid environment variable format '{env_pair}': {str(e)}. Expected format: KEY=value" + ) from e + + # Produces a stack of providers for the given run config. Not all APIs may be # asked for in the run config. async def construct_stack( @@ -105,3 +184,17 @@ async def construct_stack( ) await register_resources(run_config, impls) return impls + + +def get_stack_run_config_from_template(template: str) -> StackRunConfig: + template_path = pkg_resources.resource_filename( + "llama_stack", f"templates/{template}/run.yaml" + ) + + if not Path(template_path).exists(): + raise ValueError(f"Template '{template}' not found at {template_path}") + + with open(template_path) as f: + run_config = yaml.safe_load(f) + + return StackRunConfig(**replace_env_vars(run_config)) diff --git a/llama_stack/distribution/start_conda_env.sh b/llama_stack/distribution/start_conda_env.sh index 3d91564b8..f478a8bd8 100755 --- a/llama_stack/distribution/start_conda_env.sh +++ b/llama_stack/distribution/start_conda_env.sh @@ -33,10 +33,33 @@ shift port="$1" shift +# Process environment variables from --env arguments +env_vars="" +while [[ $# -gt 0 ]]; do + case "$1" in + --env) + + if [[ -n "$2" ]]; then + # collect environment variables so we can set them after activating the conda env + env_vars="$env_vars --env $2" + shift 2 + else + echo -e "${RED}Error: --env requires a KEY=VALUE argument${NC}" >&2 + exit 1 + fi + ;; + *) + shift + ;; + esac +done + eval "$(conda shell.bash hook)" conda deactivate && conda activate "$env_name" +set -x $CONDA_PREFIX/bin/python \ -m llama_stack.distribution.server.server \ - --yaml_config "$yaml_config" \ - --port "$port" "$@" + --yaml-config "$yaml_config" \ + --port "$port" \ + $env_vars diff --git a/llama_stack/distribution/start_container.sh b/llama_stack/distribution/start_container.sh index 1efb76fb9..34476c8e0 100755 --- a/llama_stack/distribution/start_container.sh +++ b/llama_stack/distribution/start_container.sh @@ -31,7 +31,7 @@ if [ $# -lt 3 ]; then fi build_name="$1" -docker_image="distribution-$build_name" +docker_image="localhost/distribution-$build_name" shift yaml_config="$1" @@ -40,6 +40,26 @@ shift port="$1" shift +# Process environment variables from --env arguments +env_vars="" +while [[ $# -gt 0 ]]; do + case "$1" in + --env) + echo "env = $2" + if [[ -n "$2" ]]; then + env_vars="$env_vars -e $2" + shift 2 + else + echo -e "${RED}Error: --env requires a KEY=VALUE argument${NC}" >&2 + exit 1 + fi + ;; + *) + shift + ;; + esac +done + set -x if command -v selinuxenabled &> /dev/null && selinuxenabled; then @@ -59,15 +79,18 @@ fi version_tag="latest" if [ -n "$PYPI_VERSION" ]; then version_tag="$PYPI_VERSION" +elif [ -n "$LLAMA_STACK_DIR" ]; then + version_tag="dev" elif [ -n "$TEST_PYPI_VERSION" ]; then version_tag="test-$TEST_PYPI_VERSION" fi $DOCKER_BINARY run $DOCKER_OPTS -it \ -p $port:$port \ + $env_vars \ -v "$yaml_config:/app/config.yaml" \ $mounts \ $docker_image:$version_tag \ python -m llama_stack.distribution.server.server \ - --yaml_config /app/config.yaml \ - --port $port "$@" + --yaml-config /app/config.yaml \ + --port "$port" diff --git a/llama_stack/providers/inline/agents/meta_reference/config.py b/llama_stack/providers/inline/agents/meta_reference/config.py index 2770ed13c..ff34e5d5f 100644 --- a/llama_stack/providers/inline/agents/meta_reference/config.py +++ b/llama_stack/providers/inline/agents/meta_reference/config.py @@ -4,11 +4,22 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. -from pydantic import BaseModel, Field +from typing import Any, Dict + +from pydantic import BaseModel from llama_stack.providers.utils.kvstore import KVStoreConfig from llama_stack.providers.utils.kvstore.config import SqliteKVStoreConfig class MetaReferenceAgentsImplConfig(BaseModel): - persistence_store: KVStoreConfig = Field(default=SqliteKVStoreConfig()) + persistence_store: KVStoreConfig + + @classmethod + def sample_run_config(cls, __distro_dir__: str) -> Dict[str, Any]: + return { + "persistence_store": SqliteKVStoreConfig.sample_run_config( + __distro_dir__=__distro_dir__, + db_name="agents_store.db", + ) + } diff --git a/llama_stack/providers/inline/eval/meta_reference/__init__.py b/llama_stack/providers/inline/eval/meta_reference/__init__.py index fb285c668..56c115322 100644 --- a/llama_stack/providers/inline/eval/meta_reference/__init__.py +++ b/llama_stack/providers/inline/eval/meta_reference/__init__.py @@ -22,6 +22,7 @@ async def get_provider_impl( deps[Api.datasets], deps[Api.scoring], deps[Api.inference], + deps[Api.agents], ) await impl.initialize() return impl diff --git a/llama_stack/providers/inline/eval/meta_reference/eval.py b/llama_stack/providers/inline/eval/meta_reference/eval.py index aa22ad31b..d1df869b4 100644 --- a/llama_stack/providers/inline/eval/meta_reference/eval.py +++ b/llama_stack/providers/inline/eval/meta_reference/eval.py @@ -9,6 +9,7 @@ from llama_models.llama3.api.datatypes import * # noqa: F403 from .....apis.common.job_types import Job from .....apis.eval.eval import Eval, EvalTaskConfig, EvaluateResponse, JobStatus from llama_stack.apis.common.type_system import * # noqa: F403 +from llama_stack.apis.agents import Agents from llama_stack.apis.datasetio import DatasetIO from llama_stack.apis.datasets import Datasets from llama_stack.apis.eval_tasks import EvalTask @@ -39,12 +40,14 @@ class MetaReferenceEvalImpl(Eval, EvalTasksProtocolPrivate): datasets_api: Datasets, scoring_api: Scoring, inference_api: Inference, + agents_api: Agents, ) -> None: self.config = config self.datasetio_api = datasetio_api self.datasets_api = datasets_api self.scoring_api = scoring_api self.inference_api = inference_api + self.agents_api = agents_api # TODO: assume sync job, will need jobs API for async scheduling self.jobs = {} @@ -126,18 +129,50 @@ class MetaReferenceEvalImpl(Eval, EvalTasksProtocolPrivate): self.jobs[job_id] = res return Job(job_id=job_id) - async def evaluate_rows( - self, - task_id: str, - input_rows: List[Dict[str, Any]], - scoring_functions: List[str], - task_config: EvalTaskConfig, - ) -> EvaluateResponse: + async def _run_agent_generation( + self, input_rows: List[Dict[str, Any]], task_config: EvalTaskConfig + ) -> List[Dict[str, Any]]: candidate = task_config.eval_candidate - if candidate.type == "agent": - raise NotImplementedError( - "Evaluation with generation has not been implemented for agents" + create_response = await self.agents_api.create_agent(candidate.config) + agent_id = create_response.agent_id + + generations = [] + for i, x in tqdm(enumerate(input_rows)): + assert ColumnName.chat_completion_input.value in x, "Invalid input row" + input_messages = eval(str(x[ColumnName.chat_completion_input.value])) + input_messages = [UserMessage(**x) for x in input_messages] + + # NOTE: only single-turn agent generation is supported. Create a new session for each input row + session_create_response = await self.agents_api.create_agent_session( + agent_id, f"session-{i}" ) + session_id = session_create_response.session_id + + turn_request = dict( + agent_id=agent_id, + session_id=session_id, + messages=input_messages, + stream=True, + ) + turn_response = [ + chunk + async for chunk in await self.agents_api.create_agent_turn( + **turn_request + ) + ] + final_event = turn_response[-1].event.payload + generations.append( + { + ColumnName.generated_answer.value: final_event.turn.output_message.content + } + ) + + return generations + + async def _run_model_generation( + self, input_rows: List[Dict[str, Any]], task_config: EvalTaskConfig + ) -> List[Dict[str, Any]]: + candidate = task_config.eval_candidate assert ( candidate.sampling_params.max_tokens is not None ), "SamplingParams.max_tokens must be provided" @@ -179,6 +214,23 @@ class MetaReferenceEvalImpl(Eval, EvalTasksProtocolPrivate): else: raise ValueError("Invalid input row") + return generations + + async def evaluate_rows( + self, + task_id: str, + input_rows: List[Dict[str, Any]], + scoring_functions: List[str], + task_config: EvalTaskConfig, + ) -> EvaluateResponse: + candidate = task_config.eval_candidate + if candidate.type == "agent": + generations = await self._run_agent_generation(input_rows, task_config) + elif candidate.type == "model": + generations = await self._run_model_generation(input_rows, task_config) + else: + raise ValueError(f"Invalid candidate type: {candidate.type}") + # scoring with generated_answer score_input_rows = [ input_r | generated_r diff --git a/llama_stack/providers/inline/inference/meta_reference/config.py b/llama_stack/providers/inline/inference/meta_reference/config.py index 48cba645b..11648b117 100644 --- a/llama_stack/providers/inline/inference/meta_reference/config.py +++ b/llama_stack/providers/inline/inference/meta_reference/config.py @@ -49,6 +49,18 @@ class MetaReferenceInferenceConfig(BaseModel): resolved = resolve_model(self.model) return resolved.pth_file_count + @classmethod + def sample_run_config( + cls, + model: str = "Llama3.2-3B-Instruct", + checkpoint_dir: str = "${env.CHECKPOINT_DIR:null}", + ) -> Dict[str, Any]: + return { + "model": model, + "max_seq_len": 4096, + "checkpoint_dir": checkpoint_dir, + } + class MetaReferenceQuantizedInferenceConfig(MetaReferenceInferenceConfig): quantization: QuantizationConfig diff --git a/llama_stack/providers/inline/inference/meta_reference/generation.py b/llama_stack/providers/inline/inference/meta_reference/generation.py index 38c982473..577f5184b 100644 --- a/llama_stack/providers/inline/inference/meta_reference/generation.py +++ b/llama_stack/providers/inline/inference/meta_reference/generation.py @@ -107,7 +107,7 @@ class Llama: sys.stdout = open(os.devnull, "w") start_time = time.time() - if config.checkpoint_dir: + if config.checkpoint_dir and config.checkpoint_dir != "null": ckpt_dir = config.checkpoint_dir else: ckpt_dir = model_checkpoint_dir(model) @@ -137,7 +137,6 @@ class Llama: ), f"model_args vocab = {model_args.vocab_size} but tokenizer vocab = {tokenizer.n_words}" if isinstance(config, MetaReferenceQuantizedInferenceConfig): - if isinstance(config.quantization, Fp8QuantizationConfig): from .quantization.loader import convert_to_fp8_quantized_model diff --git a/llama_stack/providers/inline/inference/vllm/config.py b/llama_stack/providers/inline/inference/vllm/config.py index a7469ebde..e5516673c 100644 --- a/llama_stack/providers/inline/inference/vllm/config.py +++ b/llama_stack/providers/inline/inference/vllm/config.py @@ -34,6 +34,16 @@ class VLLMConfig(BaseModel): default=0.3, ) + @classmethod + def sample_run_config(cls): + return { + "model": "${env.VLLM_INFERENCE_MODEL:Llama3.2-3B-Instruct}", + "tensor_parallel_size": "${env.VLLM_TENSOR_PARALLEL_SIZE:1}", + "max_tokens": "${env.VLLM_MAX_TOKENS:4096}", + "enforce_eager": "${env.VLLM_ENFORCE_EAGER:False}", + "gpu_memory_utilization": "${env.VLLM_GPU_MEMORY_UTILIZATION:0.3}", + } + @field_validator("model") @classmethod def validate_model(cls, model: str) -> str: diff --git a/llama_stack/providers/inline/memory/faiss/config.py b/llama_stack/providers/inline/memory/faiss/config.py index 41970b05f..d82104477 100644 --- a/llama_stack/providers/inline/memory/faiss/config.py +++ b/llama_stack/providers/inline/memory/faiss/config.py @@ -4,10 +4,11 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. +from typing import Any, Dict + from llama_models.schema_utils import json_schema_type from pydantic import BaseModel -from llama_stack.distribution.utils.config_dirs import RUNTIME_BASE_DIR from llama_stack.providers.utils.kvstore.config import ( KVStoreConfig, SqliteKVStoreConfig, @@ -16,6 +17,13 @@ from llama_stack.providers.utils.kvstore.config import ( @json_schema_type class FaissImplConfig(BaseModel): - kvstore: KVStoreConfig = SqliteKVStoreConfig( - db_path=(RUNTIME_BASE_DIR / "faiss_store.db").as_posix() - ) # Uses SQLite config specific to FAISS storage + kvstore: KVStoreConfig + + @classmethod + def sample_run_config(cls, __distro_dir__: str) -> Dict[str, Any]: + return { + "kvstore": SqliteKVStoreConfig.sample_run_config( + __distro_dir__=__distro_dir__, + db_name="faiss_store.db", + ) + } diff --git a/llama_stack/providers/inline/safety/llama_guard/llama_guard.py b/llama_stack/providers/inline/safety/llama_guard/llama_guard.py index 9950064a4..f201d550f 100644 --- a/llama_stack/providers/inline/safety/llama_guard/llama_guard.py +++ b/llama_stack/providers/inline/safety/llama_guard/llama_guard.py @@ -73,18 +73,21 @@ DEFAULT_LG_V3_SAFETY_CATEGORIES = [ CAT_ELECTIONS, ] -LLAMA_GUARD_MODEL_IDS = [ - CoreModelId.llama_guard_3_8b.value, - CoreModelId.llama_guard_3_1b.value, - CoreModelId.llama_guard_3_11b_vision.value, -] +# accept both CoreModelId and huggingface repo id +LLAMA_GUARD_MODEL_IDS = { + CoreModelId.llama_guard_3_8b.value: "meta-llama/Llama-Guard-3-8B", + "meta-llama/Llama-Guard-3-8B": "meta-llama/Llama-Guard-3-8B", + CoreModelId.llama_guard_3_1b.value: "meta-llama/Llama-Guard-3-1B", + "meta-llama/Llama-Guard-3-1B": "meta-llama/Llama-Guard-3-1B", + CoreModelId.llama_guard_3_11b_vision.value: "meta-llama/Llama-Guard-3-11B-Vision", + "meta-llama/Llama-Guard-3-11B-Vision": "meta-llama/Llama-Guard-3-11B-Vision", +} MODEL_TO_SAFETY_CATEGORIES_MAP = { - CoreModelId.llama_guard_3_8b.value: ( - DEFAULT_LG_V3_SAFETY_CATEGORIES + [CAT_CODE_INTERPRETER_ABUSE] - ), - CoreModelId.llama_guard_3_1b.value: DEFAULT_LG_V3_SAFETY_CATEGORIES, - CoreModelId.llama_guard_3_11b_vision.value: DEFAULT_LG_V3_SAFETY_CATEGORIES, + "meta-llama/Llama-Guard-3-8B": DEFAULT_LG_V3_SAFETY_CATEGORIES + + [CAT_CODE_INTERPRETER_ABUSE], + "meta-llama/Llama-Guard-3-1B": DEFAULT_LG_V3_SAFETY_CATEGORIES, + "meta-llama/Llama-Guard-3-11B-Vision": DEFAULT_LG_V3_SAFETY_CATEGORIES, } @@ -150,8 +153,9 @@ class LlamaGuardSafetyImpl(Safety, ShieldsProtocolPrivate): if len(messages) > 0 and messages[0].role != Role.user.value: messages[0] = UserMessage(content=messages[0].content) + model = LLAMA_GUARD_MODEL_IDS[shield.provider_resource_id] impl = LlamaGuardShield( - model=shield.provider_resource_id, + model=model, inference_api=self.inference_api, excluded_categories=self.config.excluded_categories, ) diff --git a/llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/fn_defs/llm_as_judge_405b_simpleqa.py b/llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/fn_defs/llm_as_judge_405b_simpleqa.py new file mode 100644 index 000000000..8ed501099 --- /dev/null +++ b/llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/fn_defs/llm_as_judge_405b_simpleqa.py @@ -0,0 +1,91 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +from llama_stack.apis.common.type_system import NumberType +from llama_stack.apis.scoring_functions import LLMAsJudgeScoringFnParams, ScoringFn + +GRADER_TEMPLATE = """ +Your job is to look at a question, a gold target, and a predicted answer, and then assign a grade of either ["CORRECT", "INCORRECT", "NOT_ATTEMPTED"]. +First, I will give examples of each grade, and then you will grade a new example. +The following are examples of CORRECT predicted answers. +``` +Question: What are the names of Barack Obama's children? +Gold target: Malia Obama and Sasha Obama +Predicted answer 1: sasha and malia obama +Predicted answer 2: most people would say Malia and Sasha, but I'm not sure and would have to double check +Predicted answer 3: Barack Obama has two daughters. Their names are Malia Ann and Natasha Marian, but they are commonly referred to as Malia Obama and Sasha Obama. Malia was born on July 4, 1998, and Sasha was born on June 10, 2001. +``` +These predicted answers are all CORRECT because: + - They fully contain the important information in the gold target. + - They do not contain any information that contradicts the gold target. + - Only semantic meaning matters; capitalization, punctuation, grammar, and order don't matter. + - Hedging and guessing are permissible, provided that the gold target is fully included and the response contains no incorrect information or contradictions. +The following are examples of INCORRECT predicted answers. +``` +Question: What are the names of Barack Obama's children? +Gold target: Malia and Sasha +Predicted answer 1: Malia. +Predicted answer 2: Malia, Sasha, and Susan. +Predicted answer 3: Barack Obama does not have any children. +Predicted answer 4: I think it's either Malia and Sasha. Or it could be Malia and Jackie. Or it could be Joey and Malia. +Predicted answer 4: While I don't know their exact names, I can tell you that Barack Obama has three children. +Predicted answer 5: It's possible you may mean Betsy and Olivia. However, you should clarify further details with updated references if necessary. Is that the correct answer? +Predicted answer 6: It may be the case that Obama's child is named James. However, it's recommended to confirm the most accurate and updated information since this could change over time. This model may not always reflect the most current information. +``` +These predicted answers are all INCORRECT because: + - A factual statement in the answer contradicts the gold target. Incorrect statements that have some hedging (e.g., "it is possible that", "although i'm not sure, i think") are also considered incorrect. +The following are examples of NOT_ATTEMPTED predicted answers. +``` +Question: What are the names of Barack Obama's children? +Gold target: Malia and Sasha +Predicted answer 1: I don't know. +Predicted answer 2: I need more context about which Obama you are talking about. +Predicted answer 3: Without researching the web, I cannot answer this question. However, I can tell you that Barack Obama has two children. +Predicted answer 4: Barack Obama has two children. I know that one of them is Malia, but I'm not sure about the other one. +``` +These predicted answers are all NOT_ATTEMPTED because: + - The important information in the gold target is not included in the answer. + - No statements in the answer contradict the gold target. +Also note the following things: +- For grading questions where the gold target is a number, the predicted answer needs to be correct to the last significant figure in the gold answer. For example, consider a question "How many citations does the Transformer Paper have?" with gold target "120k". + - Predicted answers "120k", "124k", and 115k" are all CORRECT. + - Predicted answers "100k" and "113k" are INCORRECT. + - Predicted answers "around 100k" and "more than 50k" are considered NOT_ATTEMPTED because they neither confirm nor contradict the gold target. +- The gold target may contain more information than the question. In such cases, the predicted answer only needs to contain the information that is in the question. + - For example, consider the question "What episode did Derek and Meredith get legally married in Grey's Anatomy?" with gold target "Season 7, Episode 20: White Wedding". Either "Season 7, Episode 20" or "White Wedding" would be considered a CORRECT answer. +- Do not punish predicted answers if they omit information that would be clearly inferred from the question. + - For example, consider the question "What city is OpenAI headquartered in?" and the gold target "San Francisco, California". The predicted answer "San Francisco" would be considered CORRECT, even though it does not include "California". + - Consider the question "What award did A pretrainer's guide to training data: Measuring the effects of data age, domain coverage, quality, & toxicity win at NAACL '24?", the gold target is "Outstanding Paper Award". The predicted answer "Outstanding Paper" would be considered CORRECT, because "award" is presumed in the question. + - For the question "What is the height of Jason Wei in meters?", the gold target is "1.73 m". The predicted answer "1.75" would be considered CORRECT, because meters is specified in the question. + - For the question "What is the name of Barack Obama's wife?", the gold target is "Michelle Obama". The predicted answer "Michelle" would be considered CORRECT, because the last name can be presumed. +- Do not punish for typos in people's name if it's clearly the same name. + - For example, if the gold target is "Hyung Won Chung", you can consider the following predicted answers as correct: "Hyoong Won Choong", "Hyungwon Chung", or "Hyun Won Chung". +Here is a new example. Simply reply with either CORRECT, INCORRECT, NOT ATTEMPTED. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer. +``` +Question: {input_query} +Gold target: {expected_answer} +Predicted answer: {generated_answer} +``` +Grade the predicted answer of this new question as one of: +A: CORRECT +B: INCORRECT +C: NOT_ATTEMPTED +Just return the letters "A", "B", or "C", with no text around it. +""".strip() + + +llm_as_judge_405b_simpleqa = ScoringFn( + identifier="llm-as-judge::405b-simpleqa", + description="Llm As Judge Scoring Function for SimpleQA Benchmark (https://github.com/openai/simple-evals/blob/main/simpleqa_eval.py)", + return_type=NumberType(), + provider_id="llm-as-judge", + provider_resource_id="llm-as-judge-405b-simpleqa", + params=LLMAsJudgeScoringFnParams( + judge_model="Llama3.1-405B-Instruct", + prompt_template=GRADER_TEMPLATE, + judge_score_regexes=[r"(A|B|C)"], + ), +) diff --git a/llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/fn_defs/llm_as_judge_base.py b/llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/fn_defs/llm_as_judge_base.py index 51517a0b0..b00b9a7db 100644 --- a/llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/fn_defs/llm_as_judge_base.py +++ b/llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/fn_defs/llm_as_judge_base.py @@ -9,7 +9,7 @@ from llama_stack.apis.scoring_functions import ScoringFn llm_as_judge_base = ScoringFn( - identifier="llm-as-judge::llm_as_judge_base", + identifier="llm-as-judge::base", description="Llm As Judge Scoring Function", return_type=NumberType(), provider_id="llm-as-judge", diff --git a/llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/llm_as_judge_scoring_fn.py b/llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/llm_as_judge_scoring_fn.py index 857b8a653..3f4df3304 100644 --- a/llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/llm_as_judge_scoring_fn.py +++ b/llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/llm_as_judge_scoring_fn.py @@ -11,6 +11,8 @@ from llama_stack.apis.scoring import * # noqa: F401, F403 from llama_stack.apis.common.type_system import * # noqa: F403 import re +from .fn_defs.llm_as_judge_405b_simpleqa import llm_as_judge_405b_simpleqa + from .fn_defs.llm_as_judge_base import llm_as_judge_base @@ -24,6 +26,7 @@ class LlmAsJudgeScoringFn(BaseScoringFn): self.inference_api = inference_api self.supported_fn_defs_registry = { llm_as_judge_base.identifier: llm_as_judge_base, + llm_as_judge_405b_simpleqa.identifier: llm_as_judge_405b_simpleqa, } async def score_row( diff --git a/llama_stack/providers/registry/eval.py b/llama_stack/providers/registry/eval.py index 3fa5c75e0..718c7eae5 100644 --- a/llama_stack/providers/registry/eval.py +++ b/llama_stack/providers/registry/eval.py @@ -22,6 +22,7 @@ def available_providers() -> List[ProviderSpec]: Api.datasets, Api.scoring, Api.inference, + Api.agents, ], ), ] diff --git a/llama_stack/providers/remote/inference/fireworks/config.py b/llama_stack/providers/remote/inference/fireworks/config.py index 275ce99e7..062c1e1ea 100644 --- a/llama_stack/providers/remote/inference/fireworks/config.py +++ b/llama_stack/providers/remote/inference/fireworks/config.py @@ -4,7 +4,7 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. -from typing import Optional +from typing import Any, Dict, Optional from llama_models.schema_utils import json_schema_type from pydantic import BaseModel, Field @@ -20,3 +20,10 @@ class FireworksImplConfig(BaseModel): default=None, description="The Fireworks.ai API Key", ) + + @classmethod + def sample_run_config(cls) -> Dict[str, Any]: + return { + "url": "https://api.fireworks.ai/inference", + "api_key": "${env.FIREWORKS_API_KEY}", + } diff --git a/llama_stack/providers/remote/inference/fireworks/fireworks.py b/llama_stack/providers/remote/inference/fireworks/fireworks.py index 42075eff7..3ff50d378 100644 --- a/llama_stack/providers/remote/inference/fireworks/fireworks.py +++ b/llama_stack/providers/remote/inference/fireworks/fireworks.py @@ -35,7 +35,7 @@ from llama_stack.providers.utils.inference.prompt_adapter import ( from .config import FireworksImplConfig -model_aliases = [ +MODEL_ALIASES = [ build_model_alias( "fireworks/llama-v3p1-8b-instruct", CoreModelId.llama3_1_8b_instruct.value, @@ -79,7 +79,7 @@ class FireworksInferenceAdapter( ModelRegistryHelper, Inference, NeedsRequestProviderData ): def __init__(self, config: FireworksImplConfig) -> None: - ModelRegistryHelper.__init__(self, model_aliases) + ModelRegistryHelper.__init__(self, MODEL_ALIASES) self.config = config self.formatter = ChatFormat(Tokenizer.get_instance()) diff --git a/llama_stack/providers/remote/inference/nvidia/_nvidia.py b/llama_stack/providers/remote/inference/nvidia/_nvidia.py index e5667b728..c5bfa0f25 100644 --- a/llama_stack/providers/remote/inference/nvidia/_nvidia.py +++ b/llama_stack/providers/remote/inference/nvidia/_nvidia.py @@ -30,7 +30,7 @@ from llama_stack.apis.inference import ( ResponseFormat, ) from llama_stack.providers.utils.inference.model_registry import ( - build_model_alias, + build_model_alias_with_just_provider_model_id, ModelRegistryHelper, ) @@ -43,39 +43,39 @@ from ._openai_utils import ( from ._utils import check_health _MODEL_ALIASES = [ - build_model_alias( + build_model_alias_with_just_provider_model_id( "meta/llama3-8b-instruct", CoreModelId.llama3_8b_instruct.value, ), - build_model_alias( + build_model_alias_with_just_provider_model_id( "meta/llama3-70b-instruct", CoreModelId.llama3_70b_instruct.value, ), - build_model_alias( + build_model_alias_with_just_provider_model_id( "meta/llama-3.1-8b-instruct", CoreModelId.llama3_1_8b_instruct.value, ), - build_model_alias( + build_model_alias_with_just_provider_model_id( "meta/llama-3.1-70b-instruct", CoreModelId.llama3_1_70b_instruct.value, ), - build_model_alias( + build_model_alias_with_just_provider_model_id( "meta/llama-3.1-405b-instruct", CoreModelId.llama3_1_405b_instruct.value, ), - build_model_alias( + build_model_alias_with_just_provider_model_id( "meta/llama-3.2-1b-instruct", CoreModelId.llama3_2_1b_instruct.value, ), - build_model_alias( + build_model_alias_with_just_provider_model_id( "meta/llama-3.2-3b-instruct", CoreModelId.llama3_2_3b_instruct.value, ), - build_model_alias( + build_model_alias_with_just_provider_model_id( "meta/llama-3.2-11b-vision-instruct", CoreModelId.llama3_2_11b_vision_instruct.value, ), - build_model_alias( + build_model_alias_with_just_provider_model_id( "meta/llama-3.2-90b-vision-instruct", CoreModelId.llama3_2_90b_vision_instruct.value, ), diff --git a/llama_stack/providers/remote/inference/ollama/__init__.py b/llama_stack/providers/remote/inference/ollama/__init__.py index 7763af8d1..073c31cde 100644 --- a/llama_stack/providers/remote/inference/ollama/__init__.py +++ b/llama_stack/providers/remote/inference/ollama/__init__.py @@ -4,14 +4,10 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. -from llama_stack.distribution.datatypes import RemoteProviderConfig +from .config import OllamaImplConfig -class OllamaImplConfig(RemoteProviderConfig): - port: int = 11434 - - -async def get_adapter_impl(config: RemoteProviderConfig, _deps): +async def get_adapter_impl(config: OllamaImplConfig, _deps): from .ollama import OllamaInferenceAdapter impl = OllamaInferenceAdapter(config.url) diff --git a/llama_stack/providers/remote/inference/ollama/config.py b/llama_stack/providers/remote/inference/ollama/config.py new file mode 100644 index 000000000..ad16cac62 --- /dev/null +++ b/llama_stack/providers/remote/inference/ollama/config.py @@ -0,0 +1,22 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +from typing import Any, Dict + +from pydantic import BaseModel + + +DEFAULT_OLLAMA_URL = "http://localhost:11434" + + +class OllamaImplConfig(BaseModel): + url: str = DEFAULT_OLLAMA_URL + + @classmethod + def sample_run_config( + cls, url: str = "${env.OLLAMA_URL:http://localhost:11434}", **kwargs + ) -> Dict[str, Any]: + return {"url": url} diff --git a/llama_stack/providers/remote/inference/ollama/ollama.py b/llama_stack/providers/remote/inference/ollama/ollama.py index 3b3f3868b..f53ed4e14 100644 --- a/llama_stack/providers/remote/inference/ollama/ollama.py +++ b/llama_stack/providers/remote/inference/ollama/ollama.py @@ -16,6 +16,7 @@ from ollama import AsyncClient from llama_stack.providers.utils.inference.model_registry import ( build_model_alias, + build_model_alias_with_just_provider_model_id, ModelRegistryHelper, ) @@ -44,10 +45,18 @@ model_aliases = [ "llama3.1:8b-instruct-fp16", CoreModelId.llama3_1_8b_instruct.value, ), + build_model_alias_with_just_provider_model_id( + "llama3.1:8b", + CoreModelId.llama3_1_8b_instruct.value, + ), build_model_alias( "llama3.1:70b-instruct-fp16", CoreModelId.llama3_1_70b_instruct.value, ), + build_model_alias_with_just_provider_model_id( + "llama3.1:70b", + CoreModelId.llama3_1_70b_instruct.value, + ), build_model_alias( "llama3.2:1b-instruct-fp16", CoreModelId.llama3_2_1b_instruct.value, @@ -56,6 +65,24 @@ model_aliases = [ "llama3.2:3b-instruct-fp16", CoreModelId.llama3_2_3b_instruct.value, ), + build_model_alias_with_just_provider_model_id( + "llama3.2:1b", + CoreModelId.llama3_2_1b_instruct.value, + ), + build_model_alias_with_just_provider_model_id( + "llama3.2:3b", + CoreModelId.llama3_2_3b_instruct.value, + ), + build_model_alias( + "llama3.2-vision:11b-instruct-fp16", + CoreModelId.llama3_2_11b_vision_instruct.value, + ), + build_model_alias_with_just_provider_model_id( + "llama3.2-vision", + CoreModelId.llama3_2_11b_vision_instruct.value, + ), + # The Llama Guard models don't have their full fp16 versions + # so we are going to alias their default version to the canonical SKU build_model_alias( "llama-guard3:8b", CoreModelId.llama_guard_3_8b.value, @@ -64,10 +91,6 @@ model_aliases = [ "llama-guard3:1b", CoreModelId.llama_guard_3_1b.value, ), - build_model_alias( - "x/llama3.2-vision:11b-instruct-fp16", - CoreModelId.llama3_2_11b_vision_instruct.value, - ), ] @@ -82,7 +105,7 @@ class OllamaInferenceAdapter(Inference, ModelsProtocolPrivate): return AsyncClient(host=self.url) async def initialize(self) -> None: - print("Initializing Ollama, checking connectivity to server...") + print(f"checking connectivity to Ollama at `{self.url}`...") try: await self.client.ps() except httpx.ConnectError as e: diff --git a/llama_stack/providers/remote/inference/tgi/config.py b/llama_stack/providers/remote/inference/tgi/config.py index 863f81bf7..55bda4179 100644 --- a/llama_stack/providers/remote/inference/tgi/config.py +++ b/llama_stack/providers/remote/inference/tgi/config.py @@ -12,19 +12,20 @@ from pydantic import BaseModel, Field @json_schema_type class TGIImplConfig(BaseModel): - host: str = "localhost" - port: int = 8080 - protocol: str = "http" - - @property - def url(self) -> str: - return f"{self.protocol}://{self.host}:{self.port}" - + url: str = Field( + description="The URL for the TGI serving endpoint", + ) api_token: Optional[str] = Field( default=None, description="A bearer token if your TGI endpoint is protected.", ) + @classmethod + def sample_run_config(cls, url: str = "${env.TGI_URL}", **kwargs): + return { + "url": url, + } + @json_schema_type class InferenceEndpointImplConfig(BaseModel): diff --git a/llama_stack/providers/remote/inference/together/config.py b/llama_stack/providers/remote/inference/together/config.py index e928a771d..ecbe9ec06 100644 --- a/llama_stack/providers/remote/inference/together/config.py +++ b/llama_stack/providers/remote/inference/together/config.py @@ -4,7 +4,7 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. -from typing import Optional +from typing import Any, Dict, Optional from llama_models.schema_utils import json_schema_type from pydantic import BaseModel, Field @@ -20,3 +20,10 @@ class TogetherImplConfig(BaseModel): default=None, description="The Together AI API Key", ) + + @classmethod + def sample_run_config(cls, **kwargs) -> Dict[str, Any]: + return { + "url": "https://api.together.xyz/v1", + "api_key": "${env.TOGETHER_API_KEY}", + } diff --git a/llama_stack/providers/remote/inference/together/together.py b/llama_stack/providers/remote/inference/together/together.py index aae34bb87..e7c96ce98 100644 --- a/llama_stack/providers/remote/inference/together/together.py +++ b/llama_stack/providers/remote/inference/together/together.py @@ -38,7 +38,7 @@ from llama_stack.providers.utils.inference.prompt_adapter import ( from .config import TogetherImplConfig -model_aliases = [ +MODEL_ALIASES = [ build_model_alias( "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo", CoreModelId.llama3_1_8b_instruct.value, @@ -78,7 +78,7 @@ class TogetherInferenceAdapter( ModelRegistryHelper, Inference, NeedsRequestProviderData ): def __init__(self, config: TogetherImplConfig) -> None: - ModelRegistryHelper.__init__(self, model_aliases) + ModelRegistryHelper.__init__(self, MODEL_ALIASES) self.config = config self.formatter = ChatFormat(Tokenizer.get_instance()) diff --git a/llama_stack/providers/remote/inference/vllm/config.py b/llama_stack/providers/remote/inference/vllm/config.py index 50a174589..a3a4c6930 100644 --- a/llama_stack/providers/remote/inference/vllm/config.py +++ b/llama_stack/providers/remote/inference/vllm/config.py @@ -24,3 +24,15 @@ class VLLMInferenceAdapterConfig(BaseModel): default="fake", description="The API token", ) + + @classmethod + def sample_run_config( + cls, + url: str = "${env.VLLM_URL}", + **kwargs, + ): + return { + "url": url, + "max_tokens": "${env.VLLM_MAX_TOKENS:4096}", + "api_token": "${env.VLLM_API_TOKEN:fake}", + } diff --git a/llama_stack/providers/tests/README.md b/llama_stack/providers/tests/README.md index 90b41a631..4b406b321 100644 --- a/llama_stack/providers/tests/README.md +++ b/llama_stack/providers/tests/README.md @@ -44,7 +44,7 @@ Finally, you can override the model completely by doing: ```bash pytest -s -v llama_stack/providers/tests/inference/test_text_inference.py \ -m fireworks \ - --inference-model "Llama3.1-70B-Instruct" \ + --inference-model "meta-llama/Llama3.1-70B-Instruct" \ --env FIREWORKS_API_KEY=<...> ``` diff --git a/llama_stack/providers/tests/agents/conftest.py b/llama_stack/providers/tests/agents/conftest.py index 6ce7913d7..7d8d4d089 100644 --- a/llama_stack/providers/tests/agents/conftest.py +++ b/llama_stack/providers/tests/agents/conftest.py @@ -81,13 +81,13 @@ def pytest_addoption(parser): parser.addoption( "--inference-model", action="store", - default="Llama3.1-8B-Instruct", + default="meta-llama/Llama-3.1-8B-Instruct", help="Specify the inference model to use for testing", ) parser.addoption( "--safety-shield", action="store", - default="Llama-Guard-3-8B", + default="meta-llama/Llama-Guard-3-8B", help="Specify the safety shield to use for testing", ) diff --git a/llama_stack/providers/tests/agents/fixtures.py b/llama_stack/providers/tests/agents/fixtures.py index 1f89b909a..93a011c95 100644 --- a/llama_stack/providers/tests/agents/fixtures.py +++ b/llama_stack/providers/tests/agents/fixtures.py @@ -83,6 +83,6 @@ async def agents_stack(request, inference_model, safety_shield): ) for model in inference_models ], - shields=[safety_shield], + shields=[safety_shield] if safety_shield else [], ) return test_stack diff --git a/llama_stack/providers/tests/eval/conftest.py b/llama_stack/providers/tests/eval/conftest.py index caf7f0290..171fae51a 100644 --- a/llama_stack/providers/tests/eval/conftest.py +++ b/llama_stack/providers/tests/eval/conftest.py @@ -63,7 +63,7 @@ def pytest_addoption(parser): parser.addoption( "--inference-model", action="store", - default="Llama3.2-3B-Instruct", + default="meta-llama/Llama-3.2-3B-Instruct", help="Specify the inference model to use for testing", ) diff --git a/llama_stack/providers/tests/inference/conftest.py b/llama_stack/providers/tests/inference/conftest.py index ba60b9925..d013d6a9e 100644 --- a/llama_stack/providers/tests/inference/conftest.py +++ b/llama_stack/providers/tests/inference/conftest.py @@ -32,8 +32,12 @@ def pytest_configure(config): MODEL_PARAMS = [ - pytest.param("Llama3.1-8B-Instruct", marks=pytest.mark.llama_8b, id="llama_8b"), - pytest.param("Llama3.2-3B-Instruct", marks=pytest.mark.llama_3b, id="llama_3b"), + pytest.param( + "meta-llama/Llama-3.1-8B-Instruct", marks=pytest.mark.llama_8b, id="llama_8b" + ), + pytest.param( + "meta-llama/Llama-3.2-3B-Instruct", marks=pytest.mark.llama_3b, id="llama_3b" + ), ] VISION_MODEL_PARAMS = [ diff --git a/llama_stack/providers/tests/inference/test_model_registration.py b/llama_stack/providers/tests/inference/test_model_registration.py index 0f07badfa..07100c982 100644 --- a/llama_stack/providers/tests/inference/test_model_registration.py +++ b/llama_stack/providers/tests/inference/test_model_registration.py @@ -6,7 +6,6 @@ import pytest -from llama_models.datatypes import CoreModelId # How to run this test: # @@ -17,11 +16,22 @@ from llama_models.datatypes import CoreModelId class TestModelRegistration: @pytest.mark.asyncio - async def test_register_unsupported_model(self, inference_stack): - _, models_impl = inference_stack + async def test_register_unsupported_model(self, inference_stack, inference_model): + inference_impl, models_impl = inference_stack + + provider = inference_impl.routing_table.get_provider_impl(inference_model) + if provider.__provider_spec__.provider_type not in ( + "meta-reference", + "remote::ollama", + "remote::vllm", + "remote::tgi", + ): + pytest.skip( + "Skipping test for remote inference providers since they can handle large models like 70B instruct" + ) # Try to register a model that's too large for local inference - with pytest.raises(Exception) as exc_info: + with pytest.raises(ValueError) as exc_info: await models_impl.register_model( model_id="Llama3.1-70B-Instruct", ) @@ -37,21 +47,27 @@ class TestModelRegistration: ) @pytest.mark.asyncio - async def test_update_model(self, inference_stack): + async def test_register_with_llama_model(self, inference_stack): _, models_impl = inference_stack - # Register a model to update - model_id = CoreModelId.llama3_1_8b_instruct.value - old_model = await models_impl.register_model(model_id=model_id) - - # Update the model - new_model_id = CoreModelId.llama3_2_3b_instruct.value - updated_model = await models_impl.update_model( - model_id=model_id, provider_model_id=new_model_id + _ = await models_impl.register_model( + model_id="custom-model", + metadata={"llama_model": "meta-llama/Llama-2-7b"}, ) - # Retrieve the updated model to verify changes - assert updated_model.provider_resource_id != old_model.provider_resource_id + with pytest.raises(ValueError) as exc_info: + await models_impl.register_model( + model_id="custom-model-2", + metadata={"llama_model": "meta-llama/Llama-2-7b"}, + provider_model_id="custom-model", + ) - # Cleanup - await models_impl.unregister_model(model_id=model_id) + @pytest.mark.asyncio + async def test_register_with_invalid_llama_model(self, inference_stack): + _, models_impl = inference_stack + + with pytest.raises(ValueError) as exc_info: + await models_impl.register_model( + model_id="custom-model-2", + metadata={"llama_model": "invalid-llama-model"}, + ) diff --git a/llama_stack/providers/tests/resolver.py b/llama_stack/providers/tests/resolver.py index df927926e..8bbb902cd 100644 --- a/llama_stack/providers/tests/resolver.py +++ b/llama_stack/providers/tests/resolver.py @@ -6,7 +6,6 @@ import json import tempfile -from datetime import datetime from typing import Any, Dict, List, Optional from llama_stack.distribution.datatypes import * # noqa: F403 @@ -37,7 +36,6 @@ async def construct_stack_for_test( ) -> TestStack: sqlite_file = tempfile.NamedTemporaryFile(delete=False, suffix=".db") run_config = dict( - built_at=datetime.now(), image_name="test-fixture", apis=apis, providers=providers, diff --git a/llama_stack/providers/tests/safety/fixtures.py b/llama_stack/providers/tests/safety/fixtures.py index a706316dd..32883bfab 100644 --- a/llama_stack/providers/tests/safety/fixtures.py +++ b/llama_stack/providers/tests/safety/fixtures.py @@ -47,6 +47,9 @@ def safety_shield(request): else: params = {} + if not shield_id: + return None + return ShieldInput( shield_id=shield_id, params=params, diff --git a/llama_stack/providers/tests/scoring/conftest.py b/llama_stack/providers/tests/scoring/conftest.py index e8ecfaa68..327acab84 100644 --- a/llama_stack/providers/tests/scoring/conftest.py +++ b/llama_stack/providers/tests/scoring/conftest.py @@ -58,7 +58,7 @@ def pytest_addoption(parser): parser.addoption( "--inference-model", action="store", - default="Llama3.2-3B-Instruct", + default="meta-llama/Llama-3.2-3B-Instruct", help="Specify the inference model to use for testing", ) diff --git a/llama_stack/providers/utils/inference/__init__.py b/llama_stack/providers/utils/inference/__init__.py index 55f72a791..7d268ed38 100644 --- a/llama_stack/providers/utils/inference/__init__.py +++ b/llama_stack/providers/utils/inference/__init__.py @@ -31,3 +31,8 @@ def supported_inference_models() -> List[str]: or is_supported_safety_model(m) ) ] + + +ALL_HUGGINGFACE_REPOS_TO_MODEL_DESCRIPTOR = { + m.huggingface_repo: m.descriptor() for m in all_registered_models() +} diff --git a/llama_stack/providers/utils/inference/model_registry.py b/llama_stack/providers/utils/inference/model_registry.py index 77eb5b415..07225fac0 100644 --- a/llama_stack/providers/utils/inference/model_registry.py +++ b/llama_stack/providers/utils/inference/model_registry.py @@ -11,6 +11,10 @@ from llama_models.sku_list import all_registered_models from llama_stack.providers.datatypes import Model, ModelsProtocolPrivate +from llama_stack.providers.utils.inference import ( + ALL_HUGGINGFACE_REPOS_TO_MODEL_DESCRIPTOR, +) + ModelAlias = namedtuple("ModelAlias", ["provider_model_id", "aliases", "llama_model"]) @@ -32,6 +36,16 @@ def build_model_alias(provider_model_id: str, model_descriptor: str) -> ModelAli ) +def build_model_alias_with_just_provider_model_id( + provider_model_id: str, model_descriptor: str +) -> ModelAlias: + return ModelAlias( + provider_model_id=provider_model_id, + aliases=[], + llama_model=model_descriptor, + ) + + class ModelRegistryHelper(ModelsProtocolPrivate): def __init__(self, model_aliases: List[ModelAlias]): self.alias_to_provider_id_map = {} @@ -51,7 +65,7 @@ class ModelRegistryHelper(ModelsProtocolPrivate): if identifier in self.alias_to_provider_id_map: return self.alias_to_provider_id_map[identifier] else: - raise ValueError(f"Unknown model: `{identifier}`") + return None def get_llama_model(self, provider_model_id: str) -> str: if provider_model_id in self.provider_id_to_llama_model_map: @@ -60,8 +74,34 @@ class ModelRegistryHelper(ModelsProtocolPrivate): return None async def register_model(self, model: Model) -> Model: - model.provider_resource_id = self.get_provider_model_id( - model.provider_resource_id - ) + provider_resource_id = self.get_provider_model_id(model.provider_resource_id) + if provider_resource_id: + model.provider_resource_id = provider_resource_id + else: + if model.metadata.get("llama_model") is None: + raise ValueError( + f"Model '{model.provider_resource_id}' is not available and no llama_model was specified in metadata. " + "Please specify a llama_model in metadata or use a supported model identifier" + ) + existing_llama_model = self.get_llama_model(model.provider_resource_id) + if existing_llama_model: + if existing_llama_model != model.metadata["llama_model"]: + raise ValueError( + f"Provider model id '{model.provider_resource_id}' is already registered to a different llama model: '{existing_llama_model}'" + ) + else: + if ( + model.metadata["llama_model"] + not in ALL_HUGGINGFACE_REPOS_TO_MODEL_DESCRIPTOR + ): + raise ValueError( + f"Invalid llama_model '{model.metadata['llama_model']}' specified in metadata. " + f"Must be one of: {', '.join(ALL_HUGGINGFACE_REPOS_TO_MODEL_DESCRIPTOR.keys())}" + ) + self.provider_id_to_llama_model_map[model.provider_resource_id] = ( + ALL_HUGGINGFACE_REPOS_TO_MODEL_DESCRIPTOR[ + model.metadata["llama_model"] + ] + ) return model diff --git a/llama_stack/providers/utils/kvstore/config.py b/llama_stack/providers/utils/kvstore/config.py index 0a21bf4ca..ed400efae 100644 --- a/llama_stack/providers/utils/kvstore/config.py +++ b/llama_stack/providers/utils/kvstore/config.py @@ -36,6 +36,15 @@ class RedisKVStoreConfig(CommonConfig): def url(self) -> str: return f"redis://{self.host}:{self.port}" + @classmethod + def sample_run_config(cls): + return { + "type": "redis", + "namespace": None, + "host": "${env.REDIS_HOST:localhost}", + "port": "${env.REDIS_PORT:6379}", + } + class SqliteKVStoreConfig(CommonConfig): type: Literal[KVStoreType.sqlite.value] = KVStoreType.sqlite.value @@ -44,6 +53,19 @@ class SqliteKVStoreConfig(CommonConfig): description="File path for the sqlite database", ) + @classmethod + def sample_run_config( + cls, __distro_dir__: str = "runtime", db_name: str = "kvstore.db" + ): + return { + "type": "sqlite", + "namespace": None, + "db_path": "${env.SQLITE_STORE_DIR:~/.llama/" + + __distro_dir__ + + "}/" + + db_name, + } + class PostgresKVStoreConfig(CommonConfig): type: Literal[KVStoreType.postgres.value] = KVStoreType.postgres.value @@ -54,6 +76,19 @@ class PostgresKVStoreConfig(CommonConfig): password: Optional[str] = None table_name: str = "llamastack_kvstore" + @classmethod + def sample_run_config(cls, table_name: str = "llamastack_kvstore"): + return { + "type": "postgres", + "namespace": None, + "host": "${env.POSTGRES_HOST:localhost}", + "port": "${env.POSTGRES_PORT:5432}", + "db": "${env.POSTGRES_DB}", + "user": "${env.POSTGRES_USER}", + "password": "${env.POSTGRES_PASSWORD}", + "table_name": "${env.POSTGRES_TABLE_NAME:" + table_name + "}", + } + @classmethod @field_validator("table_name") def validate_table_name(cls, v: str) -> str: diff --git a/llama_stack/scripts/distro_codegen.py b/llama_stack/scripts/distro_codegen.py new file mode 100644 index 000000000..f0d3bb4b9 --- /dev/null +++ b/llama_stack/scripts/distro_codegen.py @@ -0,0 +1,102 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +import concurrent.futures +import importlib +import subprocess +import sys +from functools import partial +from pathlib import Path +from typing import Iterator + +from rich.progress import Progress, SpinnerColumn, TextColumn + + +REPO_ROOT = Path(__file__).parent.parent.parent + + +def find_template_dirs(templates_dir: Path) -> Iterator[Path]: + """Find immediate subdirectories in the templates folder.""" + if not templates_dir.exists(): + raise FileNotFoundError(f"Templates directory not found: {templates_dir}") + + return ( + d for d in templates_dir.iterdir() if d.is_dir() and d.name != "__pycache__" + ) + + +def process_template(template_dir: Path, progress) -> None: + """Process a single template directory.""" + progress.print(f"Processing {template_dir.name}") + + try: + # Import the module directly + module_name = f"llama_stack.templates.{template_dir.name}" + module = importlib.import_module(module_name) + + # Get and save the distribution template + if template_func := getattr(module, "get_distribution_template", None): + template = template_func() + + template.save_distribution( + yaml_output_dir=REPO_ROOT / "llama_stack" / "templates" / template.name, + doc_output_dir=REPO_ROOT + / "docs/source/getting_started/distributions" + / f"{template.distro_type}_distro", + ) + else: + progress.print( + f"[yellow]Warning: {template_dir.name} has no get_distribution_template function" + ) + + except Exception as e: + progress.print(f"[red]Error processing {template_dir.name}: {str(e)}") + raise e + + +def check_for_changes() -> bool: + """Check if there are any uncommitted changes.""" + result = subprocess.run( + ["git", "diff", "--exit-code"], + cwd=REPO_ROOT, + capture_output=True, + ) + return result.returncode != 0 + + +def main(): + templates_dir = REPO_ROOT / "llama_stack" / "templates" + + with Progress( + SpinnerColumn(), + TextColumn("[progress.description]{task.description}"), + ) as progress: + template_dirs = list(find_template_dirs(templates_dir)) + task = progress.add_task( + "Processing distribution templates...", total=len(template_dirs) + ) + + # Create a partial function with the progress bar + process_func = partial(process_template, progress=progress) + + # Process templates in parallel + with concurrent.futures.ThreadPoolExecutor() as executor: + # Submit all tasks and wait for completion + list(executor.map(process_func, template_dirs)) + progress.update(task, advance=len(template_dirs)) + + if check_for_changes(): + print( + "Distribution template changes detected. Please commit the changes.", + file=sys.stderr, + ) + sys.exit(1) + + sys.exit(0) + + +if __name__ == "__main__": + main() diff --git a/llama_stack/templates/__init__.py b/llama_stack/templates/__init__.py new file mode 100644 index 000000000..756f351d8 --- /dev/null +++ b/llama_stack/templates/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. diff --git a/llama_stack/templates/fireworks/__init__.py b/llama_stack/templates/fireworks/__init__.py new file mode 100644 index 000000000..1d85c66db --- /dev/null +++ b/llama_stack/templates/fireworks/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +from .fireworks import get_distribution_template # noqa: F401 diff --git a/llama_stack/templates/fireworks/build.yaml b/llama_stack/templates/fireworks/build.yaml index ffd67738d..c16e3f5d6 100644 --- a/llama_stack/templates/fireworks/build.yaml +++ b/llama_stack/templates/fireworks/build.yaml @@ -1,11 +1,19 @@ +version: '2' name: fireworks distribution_spec: - description: Use Fireworks.ai for running LLM inference + description: Use Fireworks.AI for running LLM inference + docker_image: null providers: - inference: remote::fireworks + inference: + - remote::fireworks memory: - inline::faiss - - remote::weaviate - safety: inline::llama-guard - agents: inline::meta-reference - telemetry: inline::meta-reference + - remote::chromadb + - remote::pgvector + safety: + - inline::llama-guard + agents: + - inline::meta-reference + telemetry: + - inline::meta-reference +image_type: conda diff --git a/llama_stack/templates/fireworks/doc_template.md b/llama_stack/templates/fireworks/doc_template.md new file mode 100644 index 000000000..2a91ece07 --- /dev/null +++ b/llama_stack/templates/fireworks/doc_template.md @@ -0,0 +1,60 @@ +# Fireworks Distribution + +The `llamastack/distribution-{{ name }}` distribution consists of the following provider configurations. + +{{ providers_table }} + +{% if run_config_env_vars %} +### Environment Variables + +The following environment variables can be configured: + +{% for var, (default_value, description) in run_config_env_vars.items() %} +- `{{ var }}`: {{ description }} (default: `{{ default_value }}`) +{% endfor %} +{% endif %} + +{% if default_models %} +### Models + +The following models are available by default: + +{% for model in default_models %} +- `{{ model.model_id }} ({{ model.provider_model_id }})` +{% endfor %} +{% endif %} + + +### Prerequisite: API Keys + +Make sure you have access to a Fireworks API Key. You can get one by visiting [fireworks.ai](https://fireworks.ai/). + + +## Running Llama Stack with Fireworks + +You can do this via Conda (build code) or Docker which has a pre-built image. + +### Via Docker + +This method allows you to get started quickly without having to build the distribution code. + +```bash +LLAMA_STACK_PORT=5001 +docker run \ + -it \ + -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ + -v ./run.yaml:/root/my-run.yaml \ + llamastack/distribution-{{ name }} \ + --yaml-config /root/my-run.yaml \ + --port $LLAMA_STACK_PORT \ + --env FIREWORKS_API_KEY=$FIREWORKS_API_KEY +``` + +### Via Conda + +```bash +llama stack build --template fireworks --image-type conda +llama stack run ./run.yaml \ + --port 5001 \ + --env FIREWORKS_API_KEY=$FIREWORKS_API_KEY +``` diff --git a/llama_stack/templates/fireworks/fireworks.py b/llama_stack/templates/fireworks/fireworks.py new file mode 100644 index 000000000..5f744cae0 --- /dev/null +++ b/llama_stack/templates/fireworks/fireworks.py @@ -0,0 +1,71 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +from pathlib import Path + +from llama_models.sku_list import all_registered_models + +from llama_stack.distribution.datatypes import ModelInput, Provider, ShieldInput +from llama_stack.providers.remote.inference.fireworks import FireworksImplConfig +from llama_stack.providers.remote.inference.fireworks.fireworks import MODEL_ALIASES + +from llama_stack.templates.template import DistributionTemplate, RunConfigSettings + + +def get_distribution_template() -> DistributionTemplate: + providers = { + "inference": ["remote::fireworks"], + "memory": ["inline::faiss", "remote::chromadb", "remote::pgvector"], + "safety": ["inline::llama-guard"], + "agents": ["inline::meta-reference"], + "telemetry": ["inline::meta-reference"], + } + + inference_provider = Provider( + provider_id="fireworks", + provider_type="remote::fireworks", + config=FireworksImplConfig.sample_run_config(), + ) + + core_model_to_hf_repo = { + m.descriptor(): m.huggingface_repo for m in all_registered_models() + } + default_models = [ + ModelInput( + model_id=core_model_to_hf_repo[m.llama_model], + provider_model_id=m.provider_model_id, + ) + for m in MODEL_ALIASES + ] + + return DistributionTemplate( + name="fireworks", + distro_type="self_hosted", + description="Use Fireworks.AI for running LLM inference", + docker_image=None, + template_path=Path(__file__).parent / "doc_template.md", + providers=providers, + default_models=default_models, + run_configs={ + "run.yaml": RunConfigSettings( + provider_overrides={ + "inference": [inference_provider], + }, + default_models=default_models, + default_shields=[ShieldInput(shield_id="meta-llama/Llama-Guard-3-8B")], + ), + }, + run_config_env_vars={ + "LLAMASTACK_PORT": ( + "5001", + "Port for the Llama Stack distribution server", + ), + "FIREWORKS_API_KEY": ( + "", + "Fireworks.AI API Key", + ), + }, + ) diff --git a/llama_stack/templates/fireworks/run.yaml b/llama_stack/templates/fireworks/run.yaml new file mode 100644 index 000000000..c9c05a8e0 --- /dev/null +++ b/llama_stack/templates/fireworks/run.yaml @@ -0,0 +1,91 @@ +version: '2' +image_name: fireworks +docker_image: null +conda_env: fireworks +apis: +- agents +- inference +- memory +- safety +- telemetry +providers: + inference: + - provider_id: fireworks + provider_type: remote::fireworks + config: + url: https://api.fireworks.ai/inference + api_key: ${env.FIREWORKS_API_KEY} + memory: + - provider_id: faiss + provider_type: inline::faiss + config: + kvstore: + type: sqlite + namespace: null + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/fireworks}/faiss_store.db + safety: + - provider_id: llama-guard + provider_type: inline::llama-guard + config: {} + agents: + - provider_id: meta-reference + provider_type: inline::meta-reference + config: + persistence_store: + type: sqlite + namespace: null + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/fireworks}/agents_store.db + telemetry: + - provider_id: meta-reference + provider_type: inline::meta-reference + config: {} +metadata_store: + namespace: null + type: sqlite + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/fireworks}/registry.db +models: +- metadata: {} + model_id: meta-llama/Llama-3.1-8B-Instruct + provider_id: null + provider_model_id: fireworks/llama-v3p1-8b-instruct +- metadata: {} + model_id: meta-llama/Llama-3.1-70B-Instruct + provider_id: null + provider_model_id: fireworks/llama-v3p1-70b-instruct +- metadata: {} + model_id: meta-llama/Llama-3.1-405B-Instruct-FP8 + provider_id: null + provider_model_id: fireworks/llama-v3p1-405b-instruct +- metadata: {} + model_id: meta-llama/Llama-3.2-3B-Instruct + provider_id: null + provider_model_id: fireworks/llama-v3p2-1b-instruct +- metadata: {} + model_id: meta-llama/Llama-3.2-11B-Vision-Instruct + provider_id: null + provider_model_id: fireworks/llama-v3p2-3b-instruct +- metadata: {} + model_id: meta-llama/Llama-3.2-11B-Vision-Instruct + provider_id: null + provider_model_id: fireworks/llama-v3p2-11b-vision-instruct +- metadata: {} + model_id: meta-llama/Llama-3.2-90B-Vision-Instruct + provider_id: null + provider_model_id: fireworks/llama-v3p2-90b-vision-instruct +- metadata: {} + model_id: meta-llama/Llama-Guard-3-8B + provider_id: null + provider_model_id: fireworks/llama-guard-3-8b +- metadata: {} + model_id: meta-llama/Llama-Guard-3-11B-Vision + provider_id: null + provider_model_id: fireworks/llama-guard-3-11b-vision +shields: +- params: null + shield_id: meta-llama/Llama-Guard-3-8B + provider_id: null + provider_shield_id: null +memory_banks: [] +datasets: [] +scoring_fns: [] +eval_tasks: [] diff --git a/llama_stack/templates/meta-reference-gpu/__init__.py b/llama_stack/templates/meta-reference-gpu/__init__.py new file mode 100644 index 000000000..1cfdb2c6a --- /dev/null +++ b/llama_stack/templates/meta-reference-gpu/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +from .meta_reference import get_distribution_template # noqa: F401 diff --git a/llama_stack/templates/meta-reference-gpu/build.yaml b/llama_stack/templates/meta-reference-gpu/build.yaml index 7c468e41c..ef075d098 100644 --- a/llama_stack/templates/meta-reference-gpu/build.yaml +++ b/llama_stack/templates/meta-reference-gpu/build.yaml @@ -1,13 +1,19 @@ +version: '2' name: meta-reference-gpu distribution_spec: - docker_image: pytorch/pytorch:2.5.0-cuda12.4-cudnn9-runtime - description: Use code from `llama_stack` itself to serve all llama stack APIs + description: Use Meta Reference for running LLM inference + docker_image: null providers: - inference: meta-reference + inference: + - inline::meta-reference memory: - inline::faiss - remote::chromadb - remote::pgvector - safety: inline::llama-guard - agents: inline::meta-reference - telemetry: inline::meta-reference + safety: + - inline::llama-guard + agents: + - inline::meta-reference + telemetry: + - inline::meta-reference +image_type: conda diff --git a/llama_stack/templates/meta-reference-gpu/doc_template.md b/llama_stack/templates/meta-reference-gpu/doc_template.md new file mode 100644 index 000000000..9a61ff691 --- /dev/null +++ b/llama_stack/templates/meta-reference-gpu/doc_template.md @@ -0,0 +1,82 @@ +# Meta Reference Distribution + +The `llamastack/distribution-{{ name }}` distribution consists of the following provider configurations: + +{{ providers_table }} + +Note that you need access to nvidia GPUs to run this distribution. This distribution is not compatible with CPU-only machines or machines with AMD GPUs. + +{% if run_config_env_vars %} +### Environment Variables + +The following environment variables can be configured: + +{% for var, (default_value, description) in run_config_env_vars.items() %} +- `{{ var }}`: {{ description }} (default: `{{ default_value }}`) +{% endfor %} +{% endif %} + + +## Prerequisite: Downloading Models + +Please make sure you have llama model checkpoints downloaded in `~/.llama` before proceeding. See [installation guide](https://llama-stack.readthedocs.io/en/latest/cli_reference/download_models.html) here to download the models. Run `llama model list` to see the available models to download, and `llama model download` to download the checkpoints. + +``` +$ ls ~/.llama/checkpoints +Llama3.1-8B Llama3.2-11B-Vision-Instruct Llama3.2-1B-Instruct Llama3.2-90B-Vision-Instruct Llama-Guard-3-8B +Llama3.1-8B-Instruct Llama3.2-1B Llama3.2-3B-Instruct Llama-Guard-3-1B Prompt-Guard-86M +``` + +## Running the Distribution + +You can do this via Conda (build code) or Docker which has a pre-built image. + +### Via Docker + +This method allows you to get started quickly without having to build the distribution code. + +```bash +LLAMA_STACK_PORT=5001 +docker run \ + -it \ + -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ + -v ./run.yaml:/root/my-run.yaml \ + llamastack/distribution-{{ name }} \ + /root/my-run.yaml \ + --port $LLAMA_STACK_PORT \ + --env INFERENCE_MODEL=meta-llama/Llama-3.2-3B-Instruct +``` + +If you are using Llama Stack Safety / Shield APIs, use: + +```bash +docker run \ + -it \ + -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ + -v ./run-with-safety.yaml:/root/my-run.yaml \ + llamastack/distribution-{{ name }} \ + /root/my-run.yaml \ + --port $LLAMA_STACK_PORT \ + --env INFERENCE_MODEL=meta-llama/Llama-3.2-3B-Instruct \ + --env SAFETY_MODEL=meta-llama/Llama-Guard-3-1B +``` + +### Via Conda + +Make sure you have done `pip install llama-stack` and have the Llama Stack CLI available. + +```bash +llama stack build --template meta-reference-gpu --image-type conda +llama stack run ./run.yaml \ + --port 5001 \ + --env INFERENCE_MODEL=meta-llama/Llama-3.2-3B-Instruct +``` + +If you are using Llama Stack Safety / Shield APIs, use: + +```bash +llama stack run ./run-with-safety.yaml \ + --port 5001 \ + --env INFERENCE_MODEL=meta-llama/Llama-3.2-3B-Instruct \ + --env SAFETY_MODEL=meta-llama/Llama-Guard-3-1B +``` diff --git a/llama_stack/templates/meta-reference-gpu/meta_reference.py b/llama_stack/templates/meta-reference-gpu/meta_reference.py new file mode 100644 index 000000000..f254bc920 --- /dev/null +++ b/llama_stack/templates/meta-reference-gpu/meta_reference.py @@ -0,0 +1,100 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +from pathlib import Path + +from llama_stack.distribution.datatypes import ModelInput, Provider, ShieldInput +from llama_stack.providers.inline.inference.meta_reference import ( + MetaReferenceInferenceConfig, +) +from llama_stack.templates.template import DistributionTemplate, RunConfigSettings + + +def get_distribution_template() -> DistributionTemplate: + providers = { + "inference": ["inline::meta-reference"], + "memory": ["inline::faiss", "remote::chromadb", "remote::pgvector"], + "safety": ["inline::llama-guard"], + "agents": ["inline::meta-reference"], + "telemetry": ["inline::meta-reference"], + } + + inference_provider = Provider( + provider_id="meta-reference-inference", + provider_type="inline::meta-reference", + config=MetaReferenceInferenceConfig.sample_run_config( + model="${env.INFERENCE_MODEL}", + checkpoint_dir="${env.INFERENCE_CHECKPOINT_DIR:null}", + ), + ) + + inference_model = ModelInput( + model_id="${env.INFERENCE_MODEL}", + provider_id="meta-reference-inference", + ) + safety_model = ModelInput( + model_id="${env.SAFETY_MODEL}", + provider_id="meta-reference-safety", + ) + + return DistributionTemplate( + name="meta-reference-gpu", + distro_type="self_hosted", + description="Use Meta Reference for running LLM inference", + template_path=Path(__file__).parent / "doc_template.md", + providers=providers, + default_models=[inference_model, safety_model], + run_configs={ + "run.yaml": RunConfigSettings( + provider_overrides={ + "inference": [inference_provider], + }, + default_models=[inference_model], + ), + "run-with-safety.yaml": RunConfigSettings( + provider_overrides={ + "inference": [ + inference_provider, + Provider( + provider_id="meta-reference-safety", + provider_type="inline::meta-reference", + config=MetaReferenceInferenceConfig.sample_run_config( + model="${env.SAFETY_MODEL}", + checkpoint_dir="${env.SAFETY_CHECKPOINT_DIR:null}", + ), + ), + ], + }, + default_models=[ + inference_model, + safety_model, + ], + default_shields=[ShieldInput(shield_id="${env.SAFETY_MODEL}")], + ), + }, + run_config_env_vars={ + "LLAMASTACK_PORT": ( + "5001", + "Port for the Llama Stack distribution server", + ), + "INFERENCE_MODEL": ( + "meta-llama/Llama-3.2-3B-Instruct", + "Inference model loaded into the Meta Reference server", + ), + "INFERENCE_CHECKPOINT_DIR": ( + "null", + "Directory containing the Meta Reference model checkpoint", + ), + "SAFETY_MODEL": ( + "meta-llama/Llama-Guard-3-1B", + "Name of the safety (Llama-Guard) model to use", + ), + "SAFETY_CHECKPOINT_DIR": ( + "null", + "Directory containing the Llama-Guard model checkpoint", + ), + }, + ) diff --git a/llama_stack/templates/meta-reference-gpu/run-with-safety.yaml b/llama_stack/templates/meta-reference-gpu/run-with-safety.yaml new file mode 100644 index 000000000..f82e0c938 --- /dev/null +++ b/llama_stack/templates/meta-reference-gpu/run-with-safety.yaml @@ -0,0 +1,70 @@ +version: '2' +image_name: meta-reference-gpu +docker_image: null +conda_env: meta-reference-gpu +apis: +- agents +- inference +- memory +- safety +- telemetry +providers: + inference: + - provider_id: meta-reference-inference + provider_type: inline::meta-reference + config: + model: ${env.INFERENCE_MODEL} + max_seq_len: 4096 + checkpoint_dir: ${env.INFERENCE_CHECKPOINT_DIR:null} + - provider_id: meta-reference-safety + provider_type: inline::meta-reference + config: + model: ${env.SAFETY_MODEL} + max_seq_len: 4096 + checkpoint_dir: ${env.SAFETY_CHECKPOINT_DIR:null} + memory: + - provider_id: faiss + provider_type: inline::faiss + config: + kvstore: + type: sqlite + namespace: null + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/meta-reference-gpu}/faiss_store.db + safety: + - provider_id: llama-guard + provider_type: inline::llama-guard + config: {} + agents: + - provider_id: meta-reference + provider_type: inline::meta-reference + config: + persistence_store: + type: sqlite + namespace: null + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/meta-reference-gpu}/agents_store.db + telemetry: + - provider_id: meta-reference + provider_type: inline::meta-reference + config: {} +metadata_store: + namespace: null + type: sqlite + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/meta-reference-gpu}/registry.db +models: +- metadata: {} + model_id: ${env.INFERENCE_MODEL} + provider_id: meta-reference-inference + provider_model_id: null +- metadata: {} + model_id: ${env.SAFETY_MODEL} + provider_id: meta-reference-safety + provider_model_id: null +shields: +- params: null + shield_id: ${env.SAFETY_MODEL} + provider_id: null + provider_shield_id: null +memory_banks: [] +datasets: [] +scoring_fns: [] +eval_tasks: [] diff --git a/llama_stack/templates/meta-reference-gpu/run.yaml b/llama_stack/templates/meta-reference-gpu/run.yaml new file mode 100644 index 000000000..b125169a3 --- /dev/null +++ b/llama_stack/templates/meta-reference-gpu/run.yaml @@ -0,0 +1,56 @@ +version: '2' +image_name: meta-reference-gpu +docker_image: null +conda_env: meta-reference-gpu +apis: +- agents +- inference +- memory +- safety +- telemetry +providers: + inference: + - provider_id: meta-reference-inference + provider_type: inline::meta-reference + config: + model: ${env.INFERENCE_MODEL} + max_seq_len: 4096 + checkpoint_dir: ${env.INFERENCE_CHECKPOINT_DIR:null} + memory: + - provider_id: faiss + provider_type: inline::faiss + config: + kvstore: + type: sqlite + namespace: null + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/meta-reference-gpu}/faiss_store.db + safety: + - provider_id: llama-guard + provider_type: inline::llama-guard + config: {} + agents: + - provider_id: meta-reference + provider_type: inline::meta-reference + config: + persistence_store: + type: sqlite + namespace: null + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/meta-reference-gpu}/agents_store.db + telemetry: + - provider_id: meta-reference + provider_type: inline::meta-reference + config: {} +metadata_store: + namespace: null + type: sqlite + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/meta-reference-gpu}/registry.db +models: +- metadata: {} + model_id: ${env.INFERENCE_MODEL} + provider_id: meta-reference-inference + provider_model_id: null +shields: [] +memory_banks: [] +datasets: [] +scoring_fns: [] +eval_tasks: [] diff --git a/llama_stack/templates/ollama/__init__.py b/llama_stack/templates/ollama/__init__.py new file mode 100644 index 000000000..3a2c40f27 --- /dev/null +++ b/llama_stack/templates/ollama/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +from .ollama import get_distribution_template # noqa: F401 diff --git a/llama_stack/templates/ollama/build.yaml b/llama_stack/templates/ollama/build.yaml index 8cab877ea..106449309 100644 --- a/llama_stack/templates/ollama/build.yaml +++ b/llama_stack/templates/ollama/build.yaml @@ -1,12 +1,19 @@ +version: '2' name: ollama distribution_spec: - description: Use ollama for running LLM inference + description: Use (an external) Ollama server for running LLM inference + docker_image: null providers: - inference: remote::ollama + inference: + - remote::ollama memory: - inline::faiss - remote::chromadb - remote::pgvector - safety: inline::llama-guard - agents: inline::meta-reference - telemetry: inline::meta-reference + safety: + - inline::llama-guard + agents: + - inline::meta-reference + telemetry: + - inline::meta-reference +image_type: conda diff --git a/llama_stack/templates/ollama/doc_template.md b/llama_stack/templates/ollama/doc_template.md new file mode 100644 index 000000000..5a7a0d2f7 --- /dev/null +++ b/llama_stack/templates/ollama/doc_template.md @@ -0,0 +1,134 @@ +# Ollama Distribution + +The `llamastack/distribution-{{ name }}` distribution consists of the following provider configurations. + +{{ providers_table }} + +You should use this distribution if you have a regular desktop machine without very powerful GPUs. Of course, if you have powerful GPUs, you can still continue using this distribution since Ollama supports GPU acceleration. + +{%- if run_config_env_vars %} +### Environment Variables + +The following environment variables can be configured: + +{% for var, (default_value, description) in run_config_env_vars.items() %} +- `{{ var }}`: {{ description }} (default: `{{ default_value }}`) +{% endfor %} +{% endif %} + + +## Setting up Ollama server + +Please check the [Ollama Documentation](https://github.com/ollama/ollama) on how to install and run Ollama. After installing Ollama, you need to run `ollama serve` to start the server. + +In order to load models, you can run: + +```bash +export INFERENCE_MODEL="meta-llama/Llama-3.2-3B-Instruct" + +# ollama names this model differently, and we must use the ollama name when loading the model +export OLLAMA_INFERENCE_MODEL="llama3.2:3b-instruct-fp16" +ollama run $OLLAMA_INFERENCE_MODEL --keepalive 60m +``` + +If you are using Llama Stack Safety / Shield APIs, you will also need to pull and run the safety model. + +```bash +export SAFETY_MODEL="meta-llama/Llama-Guard-3-1B" + +# ollama names this model differently, and we must use the ollama name when loading the model +export OLLAMA_SAFETY_MODEL="llama-guard3:1b" +ollama run $OLLAMA_SAFETY_MODEL --keepalive 60m +``` + +## Running Llama Stack + +Now you are ready to run Llama Stack with Ollama as the inference provider. You can do this via Conda (build code) or Docker which has a pre-built image. + +### Via Docker + +This method allows you to get started quickly without having to build the distribution code. + +```bash +export LLAMA_STACK_PORT=5001 +docker run \ + -it \ + -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ + -v ~/.llama:/root/.llama \ + -v ./run.yaml:/root/my-run.yaml \ + llamastack/distribution-{{ name }} \ + --yaml-config /root/my-run.yaml \ + --port $LLAMA_STACK_PORT \ + --env INFERENCE_MODEL=$INFERENCE_MODEL \ + --env OLLAMA_URL=http://host.docker.internal:11434 +``` + +If you are using Llama Stack Safety / Shield APIs, use: + +```bash +docker run \ + -it \ + -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ + -v ~/.llama:/root/.llama \ + -v ./run-with-safety.yaml:/root/my-run.yaml \ + llamastack/distribution-{{ name }} \ + --yaml-config /root/my-run.yaml \ + --port $LLAMA_STACK_PORT \ + --env INFERENCE_MODEL=$INFERENCE_MODEL \ + --env SAFETY_MODEL=$SAFETY_MODEL \ + --env OLLAMA_URL=http://host.docker.internal:11434 +``` + +### Via Conda + +Make sure you have done `pip install llama-stack` and have the Llama Stack CLI available. + +```bash +export LLAMA_STACK_PORT=5001 + +llama stack build --template ollama --image-type conda +llama stack run ./run.yaml \ + --port $LLAMA_STACK_PORT \ + --env INFERENCE_MODEL=$INFERENCE_MODEL \ + --env OLLAMA_URL=http://localhost:11434 +``` + +If you are using Llama Stack Safety / Shield APIs, use: + +```bash +llama stack run ./run-with-safety.yaml \ + --port $LLAMA_STACK_PORT \ + --env INFERENCE_MODEL=$INFERENCE_MODEL \ + --env SAFETY_MODEL=$SAFETY_MODEL \ + --env OLLAMA_URL=http://localhost:11434 +``` + + +### (Optional) Update Model Serving Configuration + +> [!NOTE] +> Please check the [OLLAMA_SUPPORTED_MODELS](https://github.com/meta-llama/llama-stack/blob/main/llama_stack/providers.remote/inference/ollama/ollama.py) for the supported Ollama models. + + +To serve a new model with `ollama` +```bash +ollama run +``` + +To make sure that the model is being served correctly, run `ollama ps` to get a list of models being served by ollama. +``` +$ ollama ps + +NAME ID SIZE PROCESSOR UNTIL +llama3.1:8b-instruct-fp16 4aacac419454 17 GB 100% GPU 4 minutes from now +``` + +To verify that the model served by ollama is correctly connected to Llama Stack server +```bash +$ llama-stack-client models list ++----------------------+----------------------+---------------+-----------------------------------------------+ +| identifier | llama_model | provider_id | metadata | ++======================+======================+===============+===============================================+ +| Llama3.1-8B-Instruct | Llama3.1-8B-Instruct | ollama0 | {'ollama_model': 'llama3.1:8b-instruct-fp16'} | ++----------------------+----------------------+---------------+-----------------------------------------------+ +``` diff --git a/llama_stack/templates/ollama/ollama.py b/llama_stack/templates/ollama/ollama.py new file mode 100644 index 000000000..b30c75bb5 --- /dev/null +++ b/llama_stack/templates/ollama/ollama.py @@ -0,0 +1,84 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +from pathlib import Path + +from llama_stack.distribution.datatypes import ModelInput, Provider, ShieldInput +from llama_stack.providers.remote.inference.ollama import OllamaImplConfig +from llama_stack.templates.template import DistributionTemplate, RunConfigSettings + + +def get_distribution_template() -> DistributionTemplate: + providers = { + "inference": ["remote::ollama"], + "memory": ["inline::faiss", "remote::chromadb", "remote::pgvector"], + "safety": ["inline::llama-guard"], + "agents": ["inline::meta-reference"], + "telemetry": ["inline::meta-reference"], + } + + inference_provider = Provider( + provider_id="ollama", + provider_type="remote::ollama", + config=OllamaImplConfig.sample_run_config(), + ) + + inference_model = ModelInput( + model_id="${env.INFERENCE_MODEL}", + provider_id="ollama", + ) + safety_model = ModelInput( + model_id="${env.SAFETY_MODEL}", + provider_id="ollama", + ) + + return DistributionTemplate( + name="ollama", + distro_type="self_hosted", + description="Use (an external) Ollama server for running LLM inference", + docker_image=None, + template_path=Path(__file__).parent / "doc_template.md", + providers=providers, + default_models=[inference_model, safety_model], + run_configs={ + "run.yaml": RunConfigSettings( + provider_overrides={ + "inference": [inference_provider], + }, + default_models=[inference_model], + ), + "run-with-safety.yaml": RunConfigSettings( + provider_overrides={ + "inference": [ + inference_provider, + ] + }, + default_models=[ + inference_model, + safety_model, + ], + default_shields=[ShieldInput(shield_id="${env.SAFETY_MODEL}")], + ), + }, + run_config_env_vars={ + "LLAMASTACK_PORT": ( + "5001", + "Port for the Llama Stack distribution server", + ), + "OLLAMA_URL": ( + "http://127.0.0.1:11434", + "URL of the Ollama server", + ), + "INFERENCE_MODEL": ( + "meta-llama/Llama-3.2-3B-Instruct", + "Inference model loaded into the Ollama server", + ), + "SAFETY_MODEL": ( + "meta-llama/Llama-Guard-3-1B", + "Safety model loaded into the Ollama server", + ), + }, + ) diff --git a/llama_stack/templates/ollama/run-with-safety.yaml b/llama_stack/templates/ollama/run-with-safety.yaml new file mode 100644 index 000000000..6c86677b3 --- /dev/null +++ b/llama_stack/templates/ollama/run-with-safety.yaml @@ -0,0 +1,62 @@ +version: '2' +image_name: ollama +docker_image: null +conda_env: ollama +apis: +- agents +- inference +- memory +- safety +- telemetry +providers: + inference: + - provider_id: ollama + provider_type: remote::ollama + config: + url: ${env.OLLAMA_URL:http://localhost:11434} + memory: + - provider_id: faiss + provider_type: inline::faiss + config: + kvstore: + type: sqlite + namespace: null + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/ollama}/faiss_store.db + safety: + - provider_id: llama-guard + provider_type: inline::llama-guard + config: {} + agents: + - provider_id: meta-reference + provider_type: inline::meta-reference + config: + persistence_store: + type: sqlite + namespace: null + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/ollama}/agents_store.db + telemetry: + - provider_id: meta-reference + provider_type: inline::meta-reference + config: {} +metadata_store: + namespace: null + type: sqlite + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/ollama}/registry.db +models: +- metadata: {} + model_id: ${env.INFERENCE_MODEL} + provider_id: ollama + provider_model_id: null +- metadata: {} + model_id: ${env.SAFETY_MODEL} + provider_id: ollama + provider_model_id: null +shields: +- params: null + shield_id: ${env.SAFETY_MODEL} + provider_id: null + provider_shield_id: null +memory_banks: [] +datasets: [] +scoring_fns: [] +eval_tasks: [] diff --git a/llama_stack/templates/ollama/run.yaml b/llama_stack/templates/ollama/run.yaml new file mode 100644 index 000000000..b2d6f2c18 --- /dev/null +++ b/llama_stack/templates/ollama/run.yaml @@ -0,0 +1,54 @@ +version: '2' +image_name: ollama +docker_image: null +conda_env: ollama +apis: +- agents +- inference +- memory +- safety +- telemetry +providers: + inference: + - provider_id: ollama + provider_type: remote::ollama + config: + url: ${env.OLLAMA_URL:http://localhost:11434} + memory: + - provider_id: faiss + provider_type: inline::faiss + config: + kvstore: + type: sqlite + namespace: null + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/ollama}/faiss_store.db + safety: + - provider_id: llama-guard + provider_type: inline::llama-guard + config: {} + agents: + - provider_id: meta-reference + provider_type: inline::meta-reference + config: + persistence_store: + type: sqlite + namespace: null + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/ollama}/agents_store.db + telemetry: + - provider_id: meta-reference + provider_type: inline::meta-reference + config: {} +metadata_store: + namespace: null + type: sqlite + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/ollama}/registry.db +models: +- metadata: {} + model_id: ${env.INFERENCE_MODEL} + provider_id: ollama + provider_model_id: null +shields: [] +memory_banks: [] +datasets: [] +scoring_fns: [] +eval_tasks: [] diff --git a/llama_stack/templates/remote-vllm/__init__.py b/llama_stack/templates/remote-vllm/__init__.py new file mode 100644 index 000000000..7b3d59a01 --- /dev/null +++ b/llama_stack/templates/remote-vllm/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +from .vllm import get_distribution_template # noqa: F401 diff --git a/llama_stack/templates/remote-vllm/build.yaml b/llama_stack/templates/remote-vllm/build.yaml index 39abb10af..9f4597cb0 100644 --- a/llama_stack/templates/remote-vllm/build.yaml +++ b/llama_stack/templates/remote-vllm/build.yaml @@ -1,12 +1,19 @@ +version: '2' name: remote-vllm distribution_spec: description: Use (an external) vLLM server for running LLM inference + docker_image: null providers: - inference: remote::vllm + inference: + - remote::vllm memory: - inline::faiss - remote::chromadb - remote::pgvector - safety: inline::llama-guard - agents: inline::meta-reference - telemetry: inline::meta-reference + safety: + - inline::llama-guard + agents: + - inline::meta-reference + telemetry: + - inline::meta-reference +image_type: conda diff --git a/llama_stack/templates/remote-vllm/doc_template.md b/llama_stack/templates/remote-vllm/doc_template.md new file mode 100644 index 000000000..63432fb70 --- /dev/null +++ b/llama_stack/templates/remote-vllm/doc_template.md @@ -0,0 +1,136 @@ +# Remote vLLM Distribution + +The `llamastack/distribution-{{ name }}` distribution consists of the following provider configurations: + +{{ providers_table }} + +You can use this distribution if you have GPUs and want to run an independent vLLM server container for running inference. + +{% if run_config_env_vars %} +### Environment Variables + +The following environment variables can be configured: + +{% for var, (default_value, description) in run_config_env_vars.items() %} +- `{{ var }}`: {{ description }} (default: `{{ default_value }}`) +{% endfor %} +{% endif %} + + +## Setting up vLLM server + +Please check the [vLLM Documentation](https://docs.vllm.ai/en/v0.5.5/serving/deploying_with_docker.html) to get a vLLM endpoint. Here is a sample script to start a vLLM server locally via Docker: + +```bash +export INFERENCE_PORT=8000 +export INFERENCE_MODEL=meta-llama/Llama-3.2-3B-Instruct +export CUDA_VISIBLE_DEVICES=0 + +docker run \ + --runtime nvidia \ + --gpus $CUDA_VISIBLE_DEVICES \ + -v ~/.cache/huggingface:/root/.cache/huggingface \ + --env "HUGGING_FACE_HUB_TOKEN=$HF_TOKEN" \ + -p $INFERENCE_PORT:$INFERENCE_PORT \ + --ipc=host \ + vllm/vllm-openai:latest \ + --gpu-memory-utilization 0.7 \ + --model $INFERENCE_MODEL \ + --port $INFERENCE_PORT +``` + +If you are using Llama Stack Safety / Shield APIs, then you will need to also run another instance of a vLLM with a corresponding safety model like `meta-llama/Llama-Guard-3-1B` using a script like: + +```bash +export SAFETY_PORT=8081 +export SAFETY_MODEL=meta-llama/Llama-Guard-3-1B +export CUDA_VISIBLE_DEVICES=1 + +docker run \ + --runtime nvidia \ + --gpus $CUDA_VISIBLE_DEVICES \ + -v ~/.cache/huggingface:/root/.cache/huggingface \ + --env "HUGGING_FACE_HUB_TOKEN=$HF_TOKEN" \ + -p $SAFETY_PORT:$SAFETY_PORT \ + --ipc=host \ + vllm/vllm-openai:latest \ + --gpu-memory-utilization 0.7 \ + --model $SAFETY_MODEL \ + --port $SAFETY_PORT +``` + +## Running Llama Stack + +Now you are ready to run Llama Stack with vLLM as the inference provider. You can do this via Conda (build code) or Docker which has a pre-built image. + +### Via Docker + +This method allows you to get started quickly without having to build the distribution code. + +```bash +export INFERENCE_PORT=8000 +export INFERENCE_MODEL=meta-llama/Llama-3.2-3B-Instruct +export LLAMA_STACK_PORT=5001 + +docker run \ + -it \ + -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ + -v ./run.yaml:/root/my-run.yaml \ + llamastack/distribution-{{ name }} \ + --yaml-config /root/my-run.yaml \ + --port $LLAMA_STACK_PORT \ + --env INFERENCE_MODEL=$INFERENCE_MODEL \ + --env VLLM_URL=http://host.docker.internal:$INFERENCE_PORT/v1 +``` + +If you are using Llama Stack Safety / Shield APIs, use: + +```bash +export SAFETY_PORT=8081 +export SAFETY_MODEL=meta-llama/Llama-Guard-3-1B + +docker run \ + -it \ + -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ + -v ./run-with-safety.yaml:/root/my-run.yaml \ + llamastack/distribution-{{ name }} \ + --yaml-config /root/my-run.yaml \ + --port $LLAMA_STACK_PORT \ + --env INFERENCE_MODEL=$INFERENCE_MODEL \ + --env VLLM_URL=http://host.docker.internal:$INFERENCE_PORT/v1 \ + --env SAFETY_MODEL=$SAFETY_MODEL \ + --env SAFETY_VLLM_URL=http://host.docker.internal:$SAFETY_PORT/v1 +``` + + +### Via Conda + +Make sure you have done `pip install llama-stack` and have the Llama Stack CLI available. + +```bash +export INFERENCE_PORT=8000 +export INFERENCE_MODEL=meta-llama/Llama-3.2-3B-Instruct +export LLAMA_STACK_PORT=5001 + +cd distributions/remote-vllm +llama stack build --template remote-vllm --image-type conda + +llama stack run ./run.yaml \ + --port $LLAMA_STACK_PORT \ + --env INFERENCE_MODEL=$INFERENCE_MODEL \ + --env VLLM_URL=http://localhost:$INFERENCE_PORT/v1 +``` + +If you are using Llama Stack Safety / Shield APIs, use: + +```bash +export SAFETY_PORT=8081 +export SAFETY_MODEL=meta-llama/Llama-Guard-3-1B + +llama stack run ./run-with-safety.yaml \ + --port $LLAMA_STACK_PORT \ + --env INFERENCE_MODEL=$INFERENCE_MODEL \ + --env VLLM_URL=http://localhost:$INFERENCE_PORT/v1 \ + --env SAFETY_MODEL=$SAFETY_MODEL \ + --env SAFETY_VLLM_URL=http://localhost:$SAFETY_PORT/v1 +``` diff --git a/llama_stack/templates/remote-vllm/run-with-safety.yaml b/llama_stack/templates/remote-vllm/run-with-safety.yaml new file mode 100644 index 000000000..c0849e2d0 --- /dev/null +++ b/llama_stack/templates/remote-vllm/run-with-safety.yaml @@ -0,0 +1,70 @@ +version: '2' +image_name: remote-vllm +docker_image: null +conda_env: remote-vllm +apis: +- agents +- inference +- memory +- safety +- telemetry +providers: + inference: + - provider_id: vllm-inference + provider_type: remote::vllm + config: + url: ${env.VLLM_URL} + max_tokens: ${env.VLLM_MAX_TOKENS:4096} + api_token: ${env.VLLM_API_TOKEN:fake} + - provider_id: vllm-safety + provider_type: remote::vllm + config: + url: ${env.SAFETY_VLLM_URL} + max_tokens: ${env.VLLM_MAX_TOKENS:4096} + api_token: ${env.VLLM_API_TOKEN:fake} + memory: + - provider_id: faiss + provider_type: inline::faiss + config: + kvstore: + type: sqlite + namespace: null + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/remote-vllm}/faiss_store.db + safety: + - provider_id: llama-guard + provider_type: inline::llama-guard + config: {} + agents: + - provider_id: meta-reference + provider_type: inline::meta-reference + config: + persistence_store: + type: sqlite + namespace: null + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/remote-vllm}/agents_store.db + telemetry: + - provider_id: meta-reference + provider_type: inline::meta-reference + config: {} +metadata_store: + namespace: null + type: sqlite + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/remote-vllm}/registry.db +models: +- metadata: {} + model_id: ${env.INFERENCE_MODEL} + provider_id: vllm-inference + provider_model_id: null +- metadata: {} + model_id: ${env.SAFETY_MODEL} + provider_id: vllm-safety + provider_model_id: null +shields: +- params: null + shield_id: ${env.SAFETY_MODEL} + provider_id: null + provider_shield_id: null +memory_banks: [] +datasets: [] +scoring_fns: [] +eval_tasks: [] diff --git a/llama_stack/templates/remote-vllm/run.yaml b/llama_stack/templates/remote-vllm/run.yaml new file mode 100644 index 000000000..3457afdd6 --- /dev/null +++ b/llama_stack/templates/remote-vllm/run.yaml @@ -0,0 +1,56 @@ +version: '2' +image_name: remote-vllm +docker_image: null +conda_env: remote-vllm +apis: +- agents +- inference +- memory +- safety +- telemetry +providers: + inference: + - provider_id: vllm-inference + provider_type: remote::vllm + config: + url: ${env.VLLM_URL} + max_tokens: ${env.VLLM_MAX_TOKENS:4096} + api_token: ${env.VLLM_API_TOKEN:fake} + memory: + - provider_id: faiss + provider_type: inline::faiss + config: + kvstore: + type: sqlite + namespace: null + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/remote-vllm}/faiss_store.db + safety: + - provider_id: llama-guard + provider_type: inline::llama-guard + config: {} + agents: + - provider_id: meta-reference + provider_type: inline::meta-reference + config: + persistence_store: + type: sqlite + namespace: null + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/remote-vllm}/agents_store.db + telemetry: + - provider_id: meta-reference + provider_type: inline::meta-reference + config: {} +metadata_store: + namespace: null + type: sqlite + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/remote-vllm}/registry.db +models: +- metadata: {} + model_id: ${env.INFERENCE_MODEL} + provider_id: vllm-inference + provider_model_id: null +shields: [] +memory_banks: [] +datasets: [] +scoring_fns: [] +eval_tasks: [] diff --git a/llama_stack/templates/remote-vllm/vllm.py b/llama_stack/templates/remote-vllm/vllm.py new file mode 100644 index 000000000..c3858f7e5 --- /dev/null +++ b/llama_stack/templates/remote-vllm/vllm.py @@ -0,0 +1,100 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +from pathlib import Path + +from llama_stack.distribution.datatypes import ModelInput, Provider, ShieldInput +from llama_stack.providers.remote.inference.vllm import VLLMInferenceAdapterConfig +from llama_stack.templates.template import DistributionTemplate, RunConfigSettings + + +def get_distribution_template() -> DistributionTemplate: + providers = { + "inference": ["remote::vllm"], + "memory": ["inline::faiss", "remote::chromadb", "remote::pgvector"], + "safety": ["inline::llama-guard"], + "agents": ["inline::meta-reference"], + "telemetry": ["inline::meta-reference"], + } + + inference_provider = Provider( + provider_id="vllm-inference", + provider_type="remote::vllm", + config=VLLMInferenceAdapterConfig.sample_run_config( + url="${env.VLLM_URL}", + ), + ) + + inference_model = ModelInput( + model_id="${env.INFERENCE_MODEL}", + provider_id="vllm-inference", + ) + safety_model = ModelInput( + model_id="${env.SAFETY_MODEL}", + provider_id="vllm-safety", + ) + + return DistributionTemplate( + name="remote-vllm", + distro_type="self_hosted", + description="Use (an external) vLLM server for running LLM inference", + template_path=Path(__file__).parent / "doc_template.md", + providers=providers, + default_models=[inference_model, safety_model], + run_configs={ + "run.yaml": RunConfigSettings( + provider_overrides={ + "inference": [inference_provider], + }, + default_models=[inference_model], + ), + "run-with-safety.yaml": RunConfigSettings( + provider_overrides={ + "inference": [ + inference_provider, + Provider( + provider_id="vllm-safety", + provider_type="remote::vllm", + config=VLLMInferenceAdapterConfig.sample_run_config( + url="${env.SAFETY_VLLM_URL}", + ), + ), + ], + }, + default_models=[ + inference_model, + safety_model, + ], + default_shields=[ShieldInput(shield_id="${env.SAFETY_MODEL}")], + ), + }, + run_config_env_vars={ + "LLAMASTACK_PORT": ( + "5001", + "Port for the Llama Stack distribution server", + ), + "INFERENCE_MODEL": ( + "meta-llama/Llama-3.2-3B-Instruct", + "Inference model loaded into the vLLM server", + ), + "VLLM_URL": ( + "http://host.docker.internal:5100}/v1", + "URL of the vLLM server with the main inference model", + ), + "MAX_TOKENS": ( + "4096", + "Maximum number of tokens for generation", + ), + "SAFETY_VLLM_URL": ( + "http://host.docker.internal:5101/v1", + "URL of the vLLM server with the safety model", + ), + "SAFETY_MODEL": ( + "meta-llama/Llama-Guard-3-1B", + "Name of the safety (Llama-Guard) model to use", + ), + }, + ) diff --git a/llama_stack/templates/template.py b/llama_stack/templates/template.py new file mode 100644 index 000000000..fd37016f8 --- /dev/null +++ b/llama_stack/templates/template.py @@ -0,0 +1,164 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +from pathlib import Path +from typing import Dict, List, Literal, Optional, Tuple + +import jinja2 +import yaml +from pydantic import BaseModel, Field + +from llama_stack.distribution.datatypes import ( + Api, + BuildConfig, + DistributionSpec, + ModelInput, + Provider, + ShieldInput, + StackRunConfig, +) +from llama_stack.distribution.distribution import get_provider_registry +from llama_stack.distribution.utils.dynamic import instantiate_class_type +from llama_stack.providers.utils.kvstore.config import SqliteKVStoreConfig + + +class RunConfigSettings(BaseModel): + provider_overrides: Dict[str, List[Provider]] = Field(default_factory=dict) + default_models: List[ModelInput] + default_shields: Optional[List[ShieldInput]] = None + + def run_config( + self, + name: str, + providers: Dict[str, List[str]], + docker_image: Optional[str] = None, + ) -> StackRunConfig: + provider_registry = get_provider_registry() + + provider_configs = {} + for api_str, provider_types in providers.items(): + if api_providers := self.provider_overrides.get(api_str): + provider_configs[api_str] = api_providers + continue + + provider_type = provider_types[0] + provider_id = provider_type.split("::")[-1] + + api = Api(api_str) + if provider_type not in provider_registry[api]: + raise ValueError( + f"Unknown provider type: {provider_type} for API: {api_str}" + ) + + config_class = provider_registry[api][provider_type].config_class + assert ( + config_class is not None + ), f"No config class for provider type: {provider_type} for API: {api_str}" + + config_class = instantiate_class_type(config_class) + if hasattr(config_class, "sample_run_config"): + config = config_class.sample_run_config( + __distro_dir__=f"distributions/{name}" + ) + else: + config = {} + + provider_configs[api_str] = [ + Provider( + provider_id=provider_id, + provider_type=provider_type, + config=config, + ) + ] + + # Get unique set of APIs from providers + apis = list(sorted(providers.keys())) + + return StackRunConfig( + image_name=name, + docker_image=docker_image, + conda_env=name, + apis=apis, + providers=provider_configs, + metadata_store=SqliteKVStoreConfig.sample_run_config( + __distro_dir__=f"distributions/{name}", + db_name="registry.db", + ), + models=self.default_models, + shields=self.default_shields or [], + ) + + +class DistributionTemplate(BaseModel): + """ + Represents a Llama Stack distribution instance that can generate configuration + and documentation files. + """ + + name: str + description: str + distro_type: Literal["self_hosted", "remote_hosted", "ondevice"] + + providers: Dict[str, List[str]] + run_configs: Dict[str, RunConfigSettings] + template_path: Path + + # Optional configuration + run_config_env_vars: Optional[Dict[str, Tuple[str, str]]] = None + docker_image: Optional[str] = None + + default_models: Optional[List[ModelInput]] = None + + def build_config(self) -> BuildConfig: + return BuildConfig( + name=self.name, + distribution_spec=DistributionSpec( + description=self.description, + docker_image=self.docker_image, + providers=self.providers, + ), + image_type="conda", # default to conda, can be overridden + ) + + def generate_markdown_docs(self) -> str: + providers_table = "| API | Provider(s) |\n" + providers_table += "|-----|-------------|\n" + + for api, providers in sorted(self.providers.items()): + providers_str = ", ".join(f"`{p}`" for p in providers) + providers_table += f"| {api} | {providers_str} |\n" + + template = self.template_path.read_text() + # Render template with rich-generated table + env = jinja2.Environment(trim_blocks=True, lstrip_blocks=True) + template = env.from_string(template) + return template.render( + name=self.name, + description=self.description, + providers=self.providers, + providers_table=providers_table, + run_config_env_vars=self.run_config_env_vars, + default_models=self.default_models, + ) + + def save_distribution(self, yaml_output_dir: Path, doc_output_dir: Path) -> None: + for output_dir in [yaml_output_dir, doc_output_dir]: + output_dir.mkdir(parents=True, exist_ok=True) + + build_config = self.build_config() + with open(yaml_output_dir / "build.yaml", "w") as f: + yaml.safe_dump(build_config.model_dump(), f, sort_keys=False) + + for yaml_pth, settings in self.run_configs.items(): + run_config = settings.run_config( + self.name, self.providers, self.docker_image + ) + with open(yaml_output_dir / yaml_pth, "w") as f: + yaml.safe_dump(run_config.model_dump(), f, sort_keys=False) + + docs = self.generate_markdown_docs() + with open(doc_output_dir / f"{self.name}.md", "w") as f: + f.write(docs) diff --git a/llama_stack/templates/tgi/__init__.py b/llama_stack/templates/tgi/__init__.py new file mode 100644 index 000000000..fa1932f6a --- /dev/null +++ b/llama_stack/templates/tgi/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +from .tgi import get_distribution_template # noqa: F401 diff --git a/llama_stack/templates/tgi/build.yaml b/llama_stack/templates/tgi/build.yaml index 5500361c4..5f44c2d86 100644 --- a/llama_stack/templates/tgi/build.yaml +++ b/llama_stack/templates/tgi/build.yaml @@ -1,12 +1,19 @@ +version: '2' name: tgi distribution_spec: - description: Use TGI for running LLM inference + description: Use (an external) TGI server for running LLM inference + docker_image: llamastack/distribution-tgi:test-0.0.52rc3 providers: - inference: remote::tgi + inference: + - remote::tgi memory: - inline::faiss - remote::chromadb - remote::pgvector - safety: inline::llama-guard - agents: inline::meta-reference - telemetry: inline::meta-reference + safety: + - inline::llama-guard + agents: + - inline::meta-reference + telemetry: + - inline::meta-reference +image_type: conda diff --git a/llama_stack/templates/tgi/doc_template.md b/llama_stack/templates/tgi/doc_template.md new file mode 100644 index 000000000..0f6001e1a --- /dev/null +++ b/llama_stack/templates/tgi/doc_template.md @@ -0,0 +1,119 @@ +# TGI Distribution + +The `llamastack/distribution-{{ name }}` distribution consists of the following provider configurations. + +{{ providers_table }} + +You can use this distribution if you have GPUs and want to run an independent TGI server container for running inference. + +{% if run_config_env_vars %} +### Environment Variables + +The following environment variables can be configured: + +{% for var, (default_value, description) in run_config_env_vars.items() %} +- `{{ var }}`: {{ description }} (default: `{{ default_value }}`) +{% endfor %} +{% endif %} + + +## Setting up TGI server + +Please check the [TGI Getting Started Guide](https://github.com/huggingface/text-generation-inference?tab=readme-ov-file#get-started) to get a TGI endpoint. Here is a sample script to start a TGI server locally via Docker: + +```bash +export INFERENCE_PORT=8080 +export INFERENCE_MODEL=meta-llama/Llama-3.2-3B-Instruct +export CUDA_VISIBLE_DEVICES=0 + +docker run --rm -it \ + -v $HOME/.cache/huggingface:/data \ + -p $INFERENCE_PORT:$INFERENCE_PORT \ + --gpus $CUDA_VISIBLE_DEVICES \ + ghcr.io/huggingface/text-generation-inference:2.3.1 \ + --dtype bfloat16 \ + --usage-stats off \ + --sharded false \ + --cuda-memory-fraction 0.7 \ + --model-id $INFERENCE_MODEL \ + --port $INFERENCE_PORT +``` + +If you are using Llama Stack Safety / Shield APIs, then you will need to also run another instance of a TGI with a corresponding safety model like `meta-llama/Llama-Guard-3-1B` using a script like: + +```bash +export SAFETY_PORT=8081 +export SAFETY_MODEL=meta-llama/Llama-Guard-3-1B +export CUDA_VISIBLE_DEVICES=1 + +docker run --rm -it \ + -v $HOME/.cache/huggingface:/data \ + -p $SAFETY_PORT:$SAFETY_PORT \ + --gpus $CUDA_VISIBLE_DEVICES \ + ghcr.io/huggingface/text-generation-inference:2.3.1 \ + --dtype bfloat16 \ + --usage-stats off \ + --sharded false \ + --model-id $SAFETY_MODEL \ + --port $SAFETY_PORT +``` + +## Running Llama Stack + +Now you are ready to run Llama Stack with TGI as the inference provider. You can do this via Conda (build code) or Docker which has a pre-built image. + +### Via Docker + +This method allows you to get started quickly without having to build the distribution code. + +```bash +LLAMA_STACK_PORT=5001 +docker run \ + -it \ + -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ + -v ./run.yaml:/root/my-run.yaml \ + llamastack/distribution-{{ name }} \ + --yaml-config /root/my-run.yaml \ + --port $LLAMA_STACK_PORT \ + --env INFERENCE_MODEL=$INFERENCE_MODEL \ + --env TGI_URL=http://host.docker.internal:$INFERENCE_PORT +``` + +If you are using Llama Stack Safety / Shield APIs, use: + +```bash +docker run \ + -it \ + -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ + -v ./run-with-safety.yaml:/root/my-run.yaml \ + llamastack/distribution-{{ name }} \ + --yaml-config /root/my-run.yaml \ + --port $LLAMA_STACK_PORT \ + --env INFERENCE_MODEL=$INFERENCE_MODEL \ + --env TGI_URL=http://host.docker.internal:$INFERENCE_PORT \ + --env SAFETY_MODEL=$SAFETY_MODEL \ + --env TGI_SAFETY_URL=http://host.docker.internal:$SAFETY_PORT +``` + +### Via Conda + +Make sure you have done `pip install llama-stack` and have the Llama Stack CLI available. + +```bash +llama stack build --template {{ name }} --image-type conda +llama stack run ./run.yaml + --port 5001 + --env INFERENCE_MODEL=$INFERENCE_MODEL + --env TGI_URL=http://127.0.0.1:$INFERENCE_PORT +``` + +If you are using Llama Stack Safety / Shield APIs, use: + +```bash +llama stack run ./run-with-safety.yaml + --port 5001 + --env INFERENCE_MODEL=$INFERENCE_MODEL + --env TGI_URL=http://127.0.0.1:$INFERENCE_PORT + --env SAFETY_MODEL=$SAFETY_MODEL + --env TGI_SAFETY_URL=http://127.0.0.1:$SAFETY_PORT +``` diff --git a/llama_stack/templates/tgi/run-with-safety.yaml b/llama_stack/templates/tgi/run-with-safety.yaml new file mode 100644 index 000000000..b988c28e1 --- /dev/null +++ b/llama_stack/templates/tgi/run-with-safety.yaml @@ -0,0 +1,66 @@ +version: '2' +image_name: tgi +docker_image: llamastack/distribution-tgi:test-0.0.52rc3 +conda_env: tgi +apis: +- agents +- inference +- memory +- safety +- telemetry +providers: + inference: + - provider_id: tgi-inference + provider_type: remote::tgi + config: + url: ${env.TGI_URL} + - provider_id: tgi-safety + provider_type: remote::tgi + config: + url: ${env.TGI_SAFETY_URL} + memory: + - provider_id: faiss + provider_type: inline::faiss + config: + kvstore: + type: sqlite + namespace: null + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/tgi}/faiss_store.db + safety: + - provider_id: llama-guard + provider_type: inline::llama-guard + config: {} + agents: + - provider_id: meta-reference + provider_type: inline::meta-reference + config: + persistence_store: + type: sqlite + namespace: null + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/tgi}/agents_store.db + telemetry: + - provider_id: meta-reference + provider_type: inline::meta-reference + config: {} +metadata_store: + namespace: null + type: sqlite + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/tgi}/registry.db +models: +- metadata: {} + model_id: ${env.INFERENCE_MODEL} + provider_id: tgi-inference + provider_model_id: null +- metadata: {} + model_id: ${env.SAFETY_MODEL} + provider_id: tgi-safety + provider_model_id: null +shields: +- params: null + shield_id: ${env.SAFETY_MODEL} + provider_id: null + provider_shield_id: null +memory_banks: [] +datasets: [] +scoring_fns: [] +eval_tasks: [] diff --git a/llama_stack/templates/tgi/run.yaml b/llama_stack/templates/tgi/run.yaml new file mode 100644 index 000000000..485c02ad8 --- /dev/null +++ b/llama_stack/templates/tgi/run.yaml @@ -0,0 +1,54 @@ +version: '2' +image_name: tgi +docker_image: llamastack/distribution-tgi:test-0.0.52rc3 +conda_env: tgi +apis: +- agents +- inference +- memory +- safety +- telemetry +providers: + inference: + - provider_id: tgi-inference + provider_type: remote::tgi + config: + url: ${env.TGI_URL} + memory: + - provider_id: faiss + provider_type: inline::faiss + config: + kvstore: + type: sqlite + namespace: null + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/tgi}/faiss_store.db + safety: + - provider_id: llama-guard + provider_type: inline::llama-guard + config: {} + agents: + - provider_id: meta-reference + provider_type: inline::meta-reference + config: + persistence_store: + type: sqlite + namespace: null + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/tgi}/agents_store.db + telemetry: + - provider_id: meta-reference + provider_type: inline::meta-reference + config: {} +metadata_store: + namespace: null + type: sqlite + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/tgi}/registry.db +models: +- metadata: {} + model_id: ${env.INFERENCE_MODEL} + provider_id: tgi-inference + provider_model_id: null +shields: [] +memory_banks: [] +datasets: [] +scoring_fns: [] +eval_tasks: [] diff --git a/llama_stack/templates/tgi/tgi.py b/llama_stack/templates/tgi/tgi.py new file mode 100644 index 000000000..79f2ad395 --- /dev/null +++ b/llama_stack/templates/tgi/tgi.py @@ -0,0 +1,97 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +from pathlib import Path + +from llama_stack.distribution.datatypes import ModelInput, Provider, ShieldInput +from llama_stack.providers.remote.inference.tgi import TGIImplConfig +from llama_stack.templates.template import DistributionTemplate, RunConfigSettings + + +def get_distribution_template() -> DistributionTemplate: + providers = { + "inference": ["remote::tgi"], + "memory": ["inline::faiss", "remote::chromadb", "remote::pgvector"], + "safety": ["inline::llama-guard"], + "agents": ["inline::meta-reference"], + "telemetry": ["inline::meta-reference"], + } + + inference_provider = Provider( + provider_id="tgi-inference", + provider_type="remote::tgi", + config=TGIImplConfig.sample_run_config( + url="${env.TGI_URL}", + ), + ) + + inference_model = ModelInput( + model_id="${env.INFERENCE_MODEL}", + provider_id="tgi-inference", + ) + safety_model = ModelInput( + model_id="${env.SAFETY_MODEL}", + provider_id="tgi-safety", + ) + + return DistributionTemplate( + name="tgi", + distro_type="self_hosted", + description="Use (an external) TGI server for running LLM inference", + docker_image="llamastack/distribution-tgi:test-0.0.52rc3", + template_path=Path(__file__).parent / "doc_template.md", + providers=providers, + default_models=[inference_model, safety_model], + run_configs={ + "run.yaml": RunConfigSettings( + provider_overrides={ + "inference": [inference_provider], + }, + default_models=[inference_model], + ), + "run-with-safety.yaml": RunConfigSettings( + provider_overrides={ + "inference": [ + inference_provider, + Provider( + provider_id="tgi-safety", + provider_type="remote::tgi", + config=TGIImplConfig.sample_run_config( + url="${env.TGI_SAFETY_URL}", + ), + ), + ], + }, + default_models=[ + inference_model, + safety_model, + ], + default_shields=[ShieldInput(shield_id="${env.SAFETY_MODEL}")], + ), + }, + run_config_env_vars={ + "LLAMASTACK_PORT": ( + "5001", + "Port for the Llama Stack distribution server", + ), + "INFERENCE_MODEL": ( + "meta-llama/Llama-3.2-3B-Instruct", + "Inference model loaded into the TGI server", + ), + "TGI_URL": ( + "http://127.0.0.1:8080}/v1", + "URL of the TGI server with the main inference model", + ), + "TGI_SAFETY_URL": ( + "http://127.0.0.1:8081/v1", + "URL of the TGI server with the safety model", + ), + "SAFETY_MODEL": ( + "meta-llama/Llama-Guard-3-1B", + "Name of the safety (Llama-Guard) model to use", + ), + }, + ) diff --git a/llama_stack/templates/together/__init__.py b/llama_stack/templates/together/__init__.py new file mode 100644 index 000000000..757995b6b --- /dev/null +++ b/llama_stack/templates/together/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +from .together import get_distribution_template # noqa: F401 diff --git a/llama_stack/templates/together/build.yaml b/llama_stack/templates/together/build.yaml index 5c149272d..a4402ba93 100644 --- a/llama_stack/templates/together/build.yaml +++ b/llama_stack/templates/together/build.yaml @@ -1,11 +1,19 @@ +version: '2' name: together distribution_spec: - description: Use Together.ai for running LLM inference + description: Use Together.AI for running LLM inference + docker_image: null providers: - inference: remote::together + inference: + - remote::together memory: - inline::faiss - - remote::weaviate - safety: inline::llama-guard - agents: inline::meta-reference - telemetry: inline::meta-reference + - remote::chromadb + - remote::pgvector + safety: + - inline::llama-guard + agents: + - inline::meta-reference + telemetry: + - inline::meta-reference +image_type: conda diff --git a/llama_stack/templates/together/doc_template.md b/llama_stack/templates/together/doc_template.md new file mode 100644 index 000000000..5c1580dac --- /dev/null +++ b/llama_stack/templates/together/doc_template.md @@ -0,0 +1,60 @@ +# Fireworks Distribution + +The `llamastack/distribution-{{ name }}` distribution consists of the following provider configurations. + +{{ providers_table }} + +{% if run_config_env_vars %} +### Environment Variables + +The following environment variables can be configured: + +{% for var, (default_value, description) in run_config_env_vars.items() %} +- `{{ var }}`: {{ description }} (default: `{{ default_value }}`) +{% endfor %} +{% endif %} + +{% if default_models %} +### Models + +The following models are available by default: + +{% for model in default_models %} +- `{{ model.model_id }}` +{% endfor %} +{% endif %} + + +### Prerequisite: API Keys + +Make sure you have access to a Together API Key. You can get one by visiting [together.xyz](https://together.xyz/). + + +## Running Llama Stack with Together + +You can do this via Conda (build code) or Docker which has a pre-built image. + +### Via Docker + +This method allows you to get started quickly without having to build the distribution code. + +```bash +LLAMA_STACK_PORT=5001 +docker run \ + -it \ + -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ + -v ./run.yaml:/root/my-run.yaml \ + llamastack/distribution-{{ name }} \ + --yaml-config /root/my-run.yaml \ + --port $LLAMA_STACK_PORT \ + --env TOGETHER_API_KEY=$TOGETHER_API_KEY +``` + +### Via Conda + +```bash +llama stack build --template together --image-type conda +llama stack run ./run.yaml \ + --port 5001 \ + --env TOGETHER_API_KEY=$TOGETHER_API_KEY +``` diff --git a/llama_stack/templates/together/run.yaml b/llama_stack/templates/together/run.yaml new file mode 100644 index 000000000..855ba0626 --- /dev/null +++ b/llama_stack/templates/together/run.yaml @@ -0,0 +1,87 @@ +version: '2' +image_name: together +docker_image: null +conda_env: together +apis: +- agents +- inference +- memory +- safety +- telemetry +providers: + inference: + - provider_id: together + provider_type: remote::together + config: + url: https://api.together.xyz/v1 + api_key: ${env.TOGETHER_API_KEY} + memory: + - provider_id: faiss + provider_type: inline::faiss + config: + kvstore: + type: sqlite + namespace: null + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/together}/faiss_store.db + safety: + - provider_id: llama-guard + provider_type: inline::llama-guard + config: {} + agents: + - provider_id: meta-reference + provider_type: inline::meta-reference + config: + persistence_store: + type: sqlite + namespace: null + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/together}/agents_store.db + telemetry: + - provider_id: meta-reference + provider_type: inline::meta-reference + config: {} +metadata_store: + namespace: null + type: sqlite + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/together}/registry.db +models: +- metadata: {} + model_id: meta-llama/Llama-3.1-8B-Instruct + provider_id: null + provider_model_id: meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo +- metadata: {} + model_id: meta-llama/Llama-3.1-70B-Instruct + provider_id: null + provider_model_id: meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo +- metadata: {} + model_id: meta-llama/Llama-3.1-405B-Instruct-FP8 + provider_id: null + provider_model_id: meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo +- metadata: {} + model_id: meta-llama/Llama-3.2-3B-Instruct + provider_id: null + provider_model_id: meta-llama/Llama-3.2-3B-Instruct-Turbo +- metadata: {} + model_id: meta-llama/Llama-3.2-11B-Vision-Instruct + provider_id: null + provider_model_id: meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo +- metadata: {} + model_id: meta-llama/Llama-3.2-90B-Vision-Instruct + provider_id: null + provider_model_id: meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo +- metadata: {} + model_id: meta-llama/Llama-Guard-3-8B + provider_id: null + provider_model_id: meta-llama/Meta-Llama-Guard-3-8B +- metadata: {} + model_id: meta-llama/Llama-Guard-3-11B-Vision + provider_id: null + provider_model_id: meta-llama/Llama-Guard-3-11B-Vision-Turbo +shields: +- params: null + shield_id: meta-llama/Llama-Guard-3-8B + provider_id: null + provider_shield_id: null +memory_banks: [] +datasets: [] +scoring_fns: [] +eval_tasks: [] diff --git a/llama_stack/templates/together/together.py b/llama_stack/templates/together/together.py new file mode 100644 index 000000000..16265b04f --- /dev/null +++ b/llama_stack/templates/together/together.py @@ -0,0 +1,71 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +from pathlib import Path + +from llama_models.sku_list import all_registered_models + +from llama_stack.distribution.datatypes import ModelInput, Provider, ShieldInput +from llama_stack.providers.remote.inference.together import TogetherImplConfig +from llama_stack.providers.remote.inference.together.together import MODEL_ALIASES + +from llama_stack.templates.template import DistributionTemplate, RunConfigSettings + + +def get_distribution_template() -> DistributionTemplate: + providers = { + "inference": ["remote::together"], + "memory": ["inline::faiss", "remote::chromadb", "remote::pgvector"], + "safety": ["inline::llama-guard"], + "agents": ["inline::meta-reference"], + "telemetry": ["inline::meta-reference"], + } + + inference_provider = Provider( + provider_id="together", + provider_type="remote::together", + config=TogetherImplConfig.sample_run_config(), + ) + + core_model_to_hf_repo = { + m.descriptor(): m.huggingface_repo for m in all_registered_models() + } + default_models = [ + ModelInput( + model_id=core_model_to_hf_repo[m.llama_model], + provider_model_id=m.provider_model_id, + ) + for m in MODEL_ALIASES + ] + + return DistributionTemplate( + name="together", + distro_type="self_hosted", + description="Use Together.AI for running LLM inference", + docker_image=None, + template_path=Path(__file__).parent / "doc_template.md", + providers=providers, + default_models=default_models, + run_configs={ + "run.yaml": RunConfigSettings( + provider_overrides={ + "inference": [inference_provider], + }, + default_models=default_models, + default_shields=[ShieldInput(shield_id="meta-llama/Llama-Guard-3-8B")], + ), + }, + run_config_env_vars={ + "LLAMASTACK_PORT": ( + "5001", + "Port for the Llama Stack distribution server", + ), + "TOGETHER_API_KEY": ( + "", + "Together.AI API Key", + ), + }, + )