From 74b7a8cb2ed15dada5a15f235d151452c33606af Mon Sep 17 00:00:00 2001 From: Xi Yan Date: Mon, 21 Oct 2024 17:50:18 -0700 Subject: [PATCH] together distro verified --- distributions/ollama/gpu/compose.yaml | 2 +- distributions/together/README.md | 58 +++------------------------ distributions/together/build.yaml | 4 +- distributions/together/compose.yaml | 18 +++++++++ distributions/together/run.yaml | 42 +++++++++++++++++++ 5 files changed, 68 insertions(+), 56 deletions(-) diff --git a/distributions/ollama/gpu/compose.yaml b/distributions/ollama/gpu/compose.yaml index 2e3f85e45..7f9663a8d 100644 --- a/distributions/ollama/gpu/compose.yaml +++ b/distributions/ollama/gpu/compose.yaml @@ -33,7 +33,7 @@ services: volumes: - ~/.llama:/root/.llama # Link to ollama run.yaml file - - ./ollama-run.yaml:/root/llamastack-run-ollama.yaml + - ./run.yaml:/root/llamastack-run-ollama.yaml ports: - "5000:5000" # Hack: wait for ollama server to start before starting docker diff --git a/distributions/together/README.md b/distributions/together/README.md index 481525be2..d94887bab 100644 --- a/distributions/together/README.md +++ b/distributions/together/README.md @@ -3,57 +3,21 @@ ### Connect to a Llama Stack Together Endpoint - You may connect to a hosted endpoint `https://llama-stack.together.ai`, serving a Llama Stack distribution -### Start a Together distribution -``` - -``` - -# TGI Distribution - -The `llamastack/distribution-tgi` distribution consists of the following provider configurations. +The `llamastack/distribution-together` distribution consists of the following provider configurations. | **API** | **Inference** | **Agents** | **Memory** | **Safety** | **Telemetry** | |----------------- |--------------- |---------------- |-------------------------------------------------- |---------------- |---------------- | -| **Provider(s)** | remote::tgi | meta-reference | meta-reference, remote::pgvector, remote::chroma | meta-reference | meta-reference | +| **Provider(s)** | remote::together | meta-reference | remote::weaviate | meta-reference | meta-reference | -### Start the Distribution (Single Node GPU) - -> [!NOTE] -> This assumes you have access to GPU to start a TGI server with access to your GPU. - - -``` -$ cd llama_stack/distribution/docker/tgi -$ ls -compose.yaml tgi-run.yaml -$ docker compose up -``` - -The script will first start up TGI server, then start up Llama Stack distribution server hooking up to the remote TGI provider for inference. You should be able to see the following outputs -- -``` -[text-generation-inference] | 2024-10-15T18:56:33.810397Z INFO text_generation_router::server: router/src/server.rs:1813: Using config Some(Llama) -[text-generation-inference] | 2024-10-15T18:56:33.810448Z WARN text_generation_router::server: router/src/server.rs:1960: Invalid hostname, defaulting to 0.0.0.0 -[text-generation-inference] | 2024-10-15T18:56:33.864143Z INFO text_generation_router::server: router/src/server.rs:2353: Connected -INFO: Started server process [1] -INFO: Waiting for application startup. -INFO: Application startup complete. -INFO: Uvicorn running on http://[::]:5000 (Press CTRL+C to quit) -``` - -To kill the server -``` -docker compose down -``` - ### Start the Distribution (Single Node CPU) > [!NOTE] -> This assumes you have an hosted endpoint compatible with TGI server. +> This assumes you have an hosted endpoint at Together with API Key. ``` -$ cd llama-stack/distribution/tgi/cpu +$ cd llama-stack/distribution/together $ ls compose.yaml run.yaml $ docker compose up @@ -70,20 +34,8 @@ inference: ### (Alternative) TGI server + llama stack run (Single Node GPU) -If you wish to separately spin up a TGI server, and connect with Llama Stack, you may use the following commands. - -#### (optional) Start TGI server locally -- Please check the [TGI Getting Started Guide](https://github.com/huggingface/text-generation-inference?tab=readme-ov-file#get-started) to get a TGI endpoint. - ``` -docker run --rm -it -v $HOME/.cache/huggingface:/data -p 5009:5009 --gpus all ghcr.io/huggingface/text-generation-inference:latest --dtype bfloat16 --usage-stats on --sharded false --model-id meta-llama/Llama-3.1-8B-Instruct --port 5009 -``` - - -#### Start Llama Stack server pointing to TGI server - -``` -docker run --network host -it -p 5000:5000 -v ./run.yaml:/root/my-run.yaml --gpus=all llamastack-local-cpu --yaml_config /root/my-run.yaml +docker run --network host -it -p 5000:5000 -v ./run.yaml:/root/my-run.yaml --gpus=all llamastack/distribution-together --yaml_config /root/my-run.yaml ``` Make sure in you `run.yaml` file, you inference provider is pointing to the correct Together URL server endpoint. E.g. diff --git a/distributions/together/build.yaml b/distributions/together/build.yaml index 67ba2eefa..49eab859d 100644 --- a/distributions/together/build.yaml +++ b/distributions/together/build.yaml @@ -3,8 +3,8 @@ distribution_spec: description: Use Together.ai for running LLM inference providers: inference: remote::together - memory: meta-reference + memory: remote::weaviate safety: remote::together agents: meta-reference telemetry: meta-reference -image_type: conda +image_type: docker diff --git a/distributions/together/compose.yaml b/distributions/together/compose.yaml index e69de29bb..75c96b686 100644 --- a/distributions/together/compose.yaml +++ b/distributions/together/compose.yaml @@ -0,0 +1,18 @@ +services: + llamastack: + image: llamastack/distribution-together + network_mode: "host" + volumes: + - ~/.llama:/root/.llama + # Link to ollama run.yaml file + - ./run.yaml:/root/llamastack-run-together.yaml + ports: + - "5000:5000" + # Hack: wait for ollama server to start before starting docker + entrypoint: bash -c "python -m llama_stack.distribution.server.server --yaml_config /root/llamastack-run-together.yaml" + deploy: + restart_policy: + condition: on-failure + delay: 3s + max_attempts: 5 + window: 60s diff --git a/distributions/together/run.yaml b/distributions/together/run.yaml index e69de29bb..355080f61 100644 --- a/distributions/together/run.yaml +++ b/distributions/together/run.yaml @@ -0,0 +1,42 @@ +version: '2' +built_at: '2024-10-08T17:40:45.325529' +image_name: local +docker_image: null +conda_env: local +apis: +- shields +- agents +- models +- memory +- memory_banks +- inference +- safety +providers: + inference: + - provider_id: together0 + provider_type: remote::together + config: + url: https://api.together.xyz/v1 + safety: + - provider_id: together0 + provider_type: remote::together + config: + url: https://api.together.xyz/v1 + memory: + - provider_id: meta0 + provider_type: remote::weaviate + config: + weaviate_api_key: + weaviate_cluster_url: + agents: + - provider_id: meta0 + provider_type: meta-reference + config: + persistence_store: + namespace: null + type: sqlite + db_path: ~/.llama/runtime/kvstore.db + telemetry: + - provider_id: meta0 + provider_type: meta-reference + config: {}