mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-31 16:01:46 +00:00
remove network host
This commit is contained in:
parent
8cd7e406c0
commit
4f367cbf6b
5 changed files with 65 additions and 6 deletions
|
@ -3,9 +3,8 @@ services:
|
||||||
image: chromadb/chroma:latest
|
image: chromadb/chroma:latest
|
||||||
container_name: chromadb
|
container_name: chromadb
|
||||||
ports:
|
ports:
|
||||||
- "6000:6000"
|
- "8000:8000"
|
||||||
volumes:
|
volumes:
|
||||||
- ./chroma_vdb:/chroma/chroma
|
- ./chroma_vdb:/chroma/chroma
|
||||||
environment:
|
environment:
|
||||||
- IS_PERSISTENT=TRUE
|
- IS_PERSISTENT=TRUE
|
||||||
network_mode: "host"
|
|
||||||
|
|
|
@ -3,14 +3,13 @@ services:
|
||||||
depends_on:
|
depends_on:
|
||||||
- ollama
|
- ollama
|
||||||
image: llamastack/distribution-ollama
|
image: llamastack/distribution-ollama
|
||||||
network_mode: "host"
|
|
||||||
volumes:
|
volumes:
|
||||||
- ~/.llama:/root/.llama
|
- ~/.llama:/root/.llama
|
||||||
# Link to ollama run.yaml file
|
# Link to run.yaml file
|
||||||
- ./run.yaml:/root/my-run.yaml
|
- ./run.yaml:/root/my-run.yaml
|
||||||
ports:
|
ports:
|
||||||
- "5000:5000"
|
- "5000:5000"
|
||||||
# Hack: wait for ollama server to start before starting docker
|
# Hack: wait for server to start before starting docker
|
||||||
entrypoint: bash -c "sleep 60; python -m llama_stack.distribution.server.server --yaml_config /root/my-run.yaml"
|
entrypoint: bash -c "sleep 60; python -m llama_stack.distribution.server.server --yaml_config /root/my-run.yaml"
|
||||||
deploy:
|
deploy:
|
||||||
restart_policy:
|
restart_policy:
|
15
docker/llamastack/together/compose.yaml
Normal file
15
docker/llamastack/together/compose.yaml
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
services:
|
||||||
|
llamastack:
|
||||||
|
image: llamastack/distribution-together
|
||||||
|
volumes:
|
||||||
|
- ~/.llama:/root/.llama
|
||||||
|
- ./run.yaml:/root/llamastack-run-together.yaml
|
||||||
|
ports:
|
||||||
|
- "5000:5000"
|
||||||
|
entrypoint: bash -c "python -m llama_stack.distribution.server.server --yaml_config /root/llamastack-run-together.yaml"
|
||||||
|
deploy:
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
delay: 3s
|
||||||
|
max_attempts: 5
|
||||||
|
window: 60s
|
47
docker/llamastack/together/run.yaml
Normal file
47
docker/llamastack/together/run.yaml
Normal file
|
@ -0,0 +1,47 @@
|
||||||
|
version: '2'
|
||||||
|
built_at: '2024-10-08T17:40:45.325529'
|
||||||
|
image_name: local
|
||||||
|
docker_image: null
|
||||||
|
conda_env: local
|
||||||
|
apis:
|
||||||
|
- shields
|
||||||
|
- agents
|
||||||
|
- models
|
||||||
|
- memory
|
||||||
|
- memory_banks
|
||||||
|
- inference
|
||||||
|
- safety
|
||||||
|
providers:
|
||||||
|
inference:
|
||||||
|
- provider_id: together0
|
||||||
|
provider_type: remote::together
|
||||||
|
config:
|
||||||
|
url: https://api.together.xyz/v1
|
||||||
|
# api_key: <ENTER_YOUR_API_KEY>
|
||||||
|
safety:
|
||||||
|
- provider_id: meta0
|
||||||
|
provider_type: meta-reference
|
||||||
|
config:
|
||||||
|
llama_guard_shield:
|
||||||
|
model: Llama-Guard-3-1B
|
||||||
|
excluded_categories: []
|
||||||
|
disable_input_check: false
|
||||||
|
disable_output_check: false
|
||||||
|
prompt_guard_shield:
|
||||||
|
model: Prompt-Guard-86M
|
||||||
|
memory:
|
||||||
|
- provider_id: meta0
|
||||||
|
provider_type: remote::weaviate
|
||||||
|
config: {}
|
||||||
|
agents:
|
||||||
|
- provider_id: meta0
|
||||||
|
provider_type: meta-reference
|
||||||
|
config:
|
||||||
|
persistence_store:
|
||||||
|
namespace: null
|
||||||
|
type: sqlite
|
||||||
|
db_path: ~/.llama/runtime/kvstore.db
|
||||||
|
telemetry:
|
||||||
|
- provider_id: meta0
|
||||||
|
provider_type: meta-reference
|
||||||
|
config: {}
|
|
@ -1,7 +1,6 @@
|
||||||
services:
|
services:
|
||||||
ollama:
|
ollama:
|
||||||
image: ollama/ollama:latest
|
image: ollama/ollama:latest
|
||||||
network_mode: "host"
|
|
||||||
volumes:
|
volumes:
|
||||||
- ollama:/root/.ollama # this solution synchronizes with the docker volume and loads the model rocket fast
|
- ollama:/root/.ollama # this solution synchronizes with the docker volume and loads the model rocket fast
|
||||||
ports:
|
ports:
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue