mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-23 12:57:11 +00:00
update distributions/readmes
This commit is contained in:
parent
7b8748c53e
commit
985ff4d6ce
5 changed files with 102 additions and 8 deletions
|
@ -4,11 +4,9 @@ services:
|
||||||
network_mode: "host"
|
network_mode: "host"
|
||||||
volumes:
|
volumes:
|
||||||
- ~/.llama:/root/.llama
|
- ~/.llama:/root/.llama
|
||||||
# Link to ollama run.yaml file
|
|
||||||
- ./run.yaml:/root/llamastack-run-fireworks.yaml
|
- ./run.yaml:/root/llamastack-run-fireworks.yaml
|
||||||
ports:
|
ports:
|
||||||
- "5000:5000"
|
- "5000:5000"
|
||||||
# Hack: wait for ollama server to start before starting docker
|
|
||||||
entrypoint: bash -c "python -m llama_stack.distribution.server.server --yaml_config /root/llamastack-run-fireworks.yaml"
|
entrypoint: bash -c "python -m llama_stack.distribution.server.server --yaml_config /root/llamastack-run-fireworks.yaml"
|
||||||
deploy:
|
deploy:
|
||||||
restart_policy:
|
restart_policy:
|
||||||
|
|
|
@ -10,6 +10,13 @@ The `llamastack/distribution-meta-reference-gpu` distribution consists of the fo
|
||||||
|
|
||||||
### Start the Distribution (Single Node GPU)
|
### Start the Distribution (Single Node GPU)
|
||||||
|
|
||||||
|
```
|
||||||
|
$ cd distributions/meta-reference-gpu
|
||||||
|
$ ls
|
||||||
|
build.yaml compose.yaml README.md run.yaml
|
||||||
|
$ docker compose up
|
||||||
|
```
|
||||||
|
|
||||||
> [!NOTE]
|
> [!NOTE]
|
||||||
> This assumes you have access to GPU to start a local server with access to your GPU.
|
> This assumes you have access to GPU to start a local server with access to your GPU.
|
||||||
|
|
||||||
|
@ -18,7 +25,7 @@ The `llamastack/distribution-meta-reference-gpu` distribution consists of the fo
|
||||||
> `~/.llama` should be the path containing downloaded weights of Llama models.
|
> `~/.llama` should be the path containing downloaded weights of Llama models.
|
||||||
|
|
||||||
|
|
||||||
To download and start running a pre-built docker container, you may use the following commands:
|
This will download and start running a pre-built docker container. Alternatively, you may use the following commands:
|
||||||
|
|
||||||
```
|
```
|
||||||
docker run -it -p 5000:5000 -v ~/.llama:/root/.llama -v ./run.yaml:/root/my-run.yaml --gpus=all distribution-meta-reference-gpu --yaml_config /root/my-run.yaml
|
docker run -it -p 5000:5000 -v ~/.llama:/root/.llama -v ./run.yaml:/root/my-run.yaml --gpus=all distribution-meta-reference-gpu --yaml_config /root/my-run.yaml
|
||||||
|
@ -26,3 +33,54 @@ docker run -it -p 5000:5000 -v ~/.llama:/root/.llama -v ./run.yaml:/root/my-run.
|
||||||
|
|
||||||
### Alternative (Build and start distribution locally via conda)
|
### Alternative (Build and start distribution locally via conda)
|
||||||
- You may checkout the [Getting Started](../../docs/getting_started.md) for more details on building locally via conda and starting up a meta-reference distribution.
|
- You may checkout the [Getting Started](../../docs/getting_started.md) for more details on building locally via conda and starting up a meta-reference distribution.
|
||||||
|
|
||||||
|
### Start Distribution With pgvector/chromadb Memory Provider
|
||||||
|
##### pgvector
|
||||||
|
1. Start running the pgvector server:
|
||||||
|
|
||||||
|
```
|
||||||
|
docker run --network host --name mypostgres -it -p 5432:5432 -e POSTGRES_PASSWORD=mysecretpassword -e POSTGRES_USER=postgres -e POSTGRES_DB=postgres pgvector/pgvector:pg16
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Edit the `run.yaml` file to point to the pgvector server.
|
||||||
|
```
|
||||||
|
memory:
|
||||||
|
- provider_id: pgvector
|
||||||
|
provider_type: remote::pgvector
|
||||||
|
config:
|
||||||
|
host: 127.0.0.1
|
||||||
|
port: 5432
|
||||||
|
db: postgres
|
||||||
|
user: postgres
|
||||||
|
password: mysecretpassword
|
||||||
|
```
|
||||||
|
|
||||||
|
> [!NOTE]
|
||||||
|
> If you get a `RuntimeError: Vector extension is not installed.`. You will need to run `CREATE EXTENSION IF NOT EXISTS vector;` to include the vector extension. E.g.
|
||||||
|
|
||||||
|
```
|
||||||
|
docker exec -it mypostgres ./bin/psql -U postgres
|
||||||
|
postgres=# CREATE EXTENSION IF NOT EXISTS vector;
|
||||||
|
postgres=# SELECT extname from pg_extension;
|
||||||
|
extname
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Run `docker compose up` with the updated `run.yaml` file.
|
||||||
|
|
||||||
|
##### chromadb
|
||||||
|
1. Start running chromadb server
|
||||||
|
```
|
||||||
|
docker run -it --network host --name chromadb -p 6000:6000 -v ./chroma_vdb:/chroma/chroma -e IS_PERSISTENT=TRUE chromadb/chroma:latest
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Edit the `run.yaml` file to point to the chromadb server.
|
||||||
|
```
|
||||||
|
memory:
|
||||||
|
- provider_id: remote::chromadb
|
||||||
|
provider_type: remote::chromadb
|
||||||
|
config:
|
||||||
|
host: localhost
|
||||||
|
port: 6000
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Run `docker compose up` with the updated `run.yaml` file.
|
||||||
|
|
35
distributions/meta-reference-gpu/compose.yaml
Normal file
35
distributions/meta-reference-gpu/compose.yaml
Normal file
|
@ -0,0 +1,35 @@
|
||||||
|
services:
|
||||||
|
llamastack:
|
||||||
|
image: llamastack/distribution-meta-reference-gpu
|
||||||
|
network_mode: "host"
|
||||||
|
volumes:
|
||||||
|
- ~/.llama:/root/.llama
|
||||||
|
- ./run.yaml:/root/my-run.yaml
|
||||||
|
ports:
|
||||||
|
- "5000:5000"
|
||||||
|
devices:
|
||||||
|
- nvidia.com/gpu=all
|
||||||
|
environment:
|
||||||
|
- CUDA_VISIBLE_DEVICES=0
|
||||||
|
command: []
|
||||||
|
deploy:
|
||||||
|
resources:
|
||||||
|
reservations:
|
||||||
|
devices:
|
||||||
|
- driver: nvidia
|
||||||
|
# that's the closest analogue to --gpus; provide
|
||||||
|
# an integer amount of devices or 'all'
|
||||||
|
count: 1
|
||||||
|
# Devices are reserved using a list of capabilities, making
|
||||||
|
# capabilities the only required field. A device MUST
|
||||||
|
# satisfy all the requested capabilities for a successful
|
||||||
|
# reservation.
|
||||||
|
capabilities: [gpu]
|
||||||
|
runtime: nvidia
|
||||||
|
entrypoint: bash -c "python -m llama_stack.distribution.server.server --yaml_config /root/my-run.yaml"
|
||||||
|
deploy:
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
delay: 3s
|
||||||
|
max_attempts: 5
|
||||||
|
window: 60s
|
|
@ -33,9 +33,14 @@ providers:
|
||||||
prompt_guard_shield:
|
prompt_guard_shield:
|
||||||
model: Prompt-Guard-86M
|
model: Prompt-Guard-86M
|
||||||
memory:
|
memory:
|
||||||
- provider_id: meta0
|
- provider_id: pgvector
|
||||||
provider_type: meta-reference
|
provider_type: remote::pgvector
|
||||||
config: {}
|
config:
|
||||||
|
host: 127.0.0.1
|
||||||
|
port: 5432
|
||||||
|
db: postgres
|
||||||
|
user: postgres
|
||||||
|
password: mysecretpassword
|
||||||
agents:
|
agents:
|
||||||
- provider_id: meta0
|
- provider_id: meta0
|
||||||
provider_type: meta-reference
|
provider_type: meta-reference
|
||||||
|
|
|
@ -4,11 +4,9 @@ services:
|
||||||
network_mode: "host"
|
network_mode: "host"
|
||||||
volumes:
|
volumes:
|
||||||
- ~/.llama:/root/.llama
|
- ~/.llama:/root/.llama
|
||||||
# Link to ollama run.yaml file
|
|
||||||
- ./run.yaml:/root/llamastack-run-together.yaml
|
- ./run.yaml:/root/llamastack-run-together.yaml
|
||||||
ports:
|
ports:
|
||||||
- "5000:5000"
|
- "5000:5000"
|
||||||
# Hack: wait for ollama server to start before starting docker
|
|
||||||
entrypoint: bash -c "python -m llama_stack.distribution.server.server --yaml_config /root/llamastack-run-together.yaml"
|
entrypoint: bash -c "python -m llama_stack.distribution.server.server --yaml_config /root/llamastack-run-together.yaml"
|
||||||
deploy:
|
deploy:
|
||||||
restart_policy:
|
restart_policy:
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue