Update default port from 5000 -> 8321

This commit is contained in:
Ashwin Bharambe 2025-01-16 15:26:48 -08:00
parent f1faa9c924
commit 03ac84a829
18 changed files with 27 additions and 27 deletions

View file

@ -5,7 +5,7 @@ services:
- ~/.llama:/root/.llama
- ./run.yaml:/root/llamastack-run-bedrock.yaml
ports:
- "5000:5000"
- "8321:8321"
entrypoint: bash -c "python -m llama_stack.distribution.server.server --yaml_config /root/llamastack-run-bedrock.yaml"
deploy:
restart_policy:

View file

@ -6,7 +6,7 @@ services:
- ~/.llama:/root/.llama
- ./run.yaml:/root/llamastack-run-cerebras.yaml
ports:
- "5000:5000"
- "8321:8321"
entrypoint: bash -c "python -m llama_stack.distribution.server.server --yaml_config /root/llamastack-run-cerebras.yaml"
deploy:
restart_policy:

View file

@ -40,7 +40,7 @@ services:
# Link to TGI run.yaml file
- ./run.yaml:/root/my-run.yaml
ports:
- "5000:5000"
- "8321:8321"
# Hack: wait for TGI server to start before starting docker
entrypoint: bash -c "sleep 60; python -m llama_stack.distribution.server.server --yaml_config /root/my-run.yaml"
restart_policy:

View file

@ -6,7 +6,7 @@ services:
- ~/.llama:/root/.llama
- ./run.yaml:/root/llamastack-run-fireworks.yaml
ports:
- "5000:5000"
- "8321:8321"
entrypoint: bash -c "python -m llama_stack.distribution.server.server --yaml_config /root/llamastack-run-fireworks.yaml"
deploy:
restart_policy:

View file

@ -6,7 +6,7 @@ services:
- ~/.llama:/root/.llama
- ./run.yaml:/root/my-run.yaml
ports:
- "5000:5000"
- "8321:8321"
devices:
- nvidia.com/gpu=all
environment:

View file

@ -6,7 +6,7 @@ services:
- ~/.llama:/root/.llama
- ./run.yaml:/root/my-run.yaml
ports:
- "5000:5000"
- "8321:8321"
devices:
- nvidia.com/gpu=all
environment:

View file

@ -6,7 +6,7 @@ services:
- ~/.llama:/root/.llama
- ./run.yaml:/root/llamastack-run-nvidia.yaml
ports:
- "5000:5000"
- "8321:8321"
environment:
- INFERENCE_MODEL=${INFERENCE_MODEL:-Llama3.1-8B-Instruct}
- NVIDIA_API_KEY=${NVIDIA_API_KEY:-}

View file

@ -6,7 +6,7 @@ services:
- ~/.llama:/root/.llama
- ./run.yaml:/root/llamastack-run-together.yaml
ports:
- "5000:5000"
- "8321:8321"
entrypoint: bash -c "python -m llama_stack.distribution.server.server --yaml_config /root/llamastack-run-together.yaml"
deploy:
restart_policy:

View file

@ -6,7 +6,7 @@ services:
- ~/.llama:/root/.llama
- ./run.yaml:/root/my-run.yaml
ports:
- "5000:5000"
- "8321:8321"
devices:
- nvidia.com/gpu=all
environment: