Update docs

This commit is contained in:
Ashwin Bharambe 2024-11-18 23:21:25 -08:00
parent 93abb8e208
commit d463d68e1e
3 changed files with 5 additions and 9 deletions

View file

@ -60,9 +60,8 @@ docker run \
-p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \
-v ~/.llama:/root/.llama \ -v ~/.llama:/root/.llama \
-v ./run.yaml:/root/my-run.yaml \ -v ./run.yaml:/root/my-run.yaml \
--gpus=all \
llamastack/distribution-ollama \ llamastack/distribution-ollama \
/root/my-run.yaml \ --yaml-config /root/my-run.yaml \
--port $LLAMA_STACK_PORT \ --port $LLAMA_STACK_PORT \
--env INFERENCE_MODEL=$INFERENCE_MODEL \ --env INFERENCE_MODEL=$INFERENCE_MODEL \
--env OLLAMA_URL=http://host.docker.internal:11434 --env OLLAMA_URL=http://host.docker.internal:11434
@ -76,9 +75,8 @@ docker run \
-p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \
-v ~/.llama:/root/.llama \ -v ~/.llama:/root/.llama \
-v ./run-with-safety.yaml:/root/my-run.yaml \ -v ./run-with-safety.yaml:/root/my-run.yaml \
--gpus=all \
llamastack/distribution-ollama \ llamastack/distribution-ollama \
/root/my-run.yaml \ --yaml-config /root/my-run.yaml \
--port $LLAMA_STACK_PORT \ --port $LLAMA_STACK_PORT \
--env INFERENCE_MODEL=$INFERENCE_MODEL \ --env INFERENCE_MODEL=$INFERENCE_MODEL \
--env SAFETY_MODEL=$SAFETY_MODEL \ --env SAFETY_MODEL=$SAFETY_MODEL \

View file

@ -56,9 +56,8 @@ docker run \
-p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \
-v ~/.llama:/root/.llama \ -v ~/.llama:/root/.llama \
-v ./run.yaml:/root/my-run.yaml \ -v ./run.yaml:/root/my-run.yaml \
--gpus=all \
llamastack/distribution-{{ name }} \ llamastack/distribution-{{ name }} \
/root/my-run.yaml \ --yaml-config /root/my-run.yaml \
--port $LLAMA_STACK_PORT \ --port $LLAMA_STACK_PORT \
--env INFERENCE_MODEL=$INFERENCE_MODEL \ --env INFERENCE_MODEL=$INFERENCE_MODEL \
--env OLLAMA_URL=http://host.docker.internal:11434 --env OLLAMA_URL=http://host.docker.internal:11434
@ -72,9 +71,8 @@ docker run \
-p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \
-v ~/.llama:/root/.llama \ -v ~/.llama:/root/.llama \
-v ./run-with-safety.yaml:/root/my-run.yaml \ -v ./run-with-safety.yaml:/root/my-run.yaml \
--gpus=all \
llamastack/distribution-{{ name }} \ llamastack/distribution-{{ name }} \
/root/my-run.yaml \ --yaml-config /root/my-run.yaml \
--port $LLAMA_STACK_PORT \ --port $LLAMA_STACK_PORT \
--env INFERENCE_MODEL=$INFERENCE_MODEL \ --env INFERENCE_MODEL=$INFERENCE_MODEL \
--env SAFETY_MODEL=$SAFETY_MODEL \ --env SAFETY_MODEL=$SAFETY_MODEL \

View file

@ -78,7 +78,7 @@ models:
provider_model_id: meta-llama/Llama-Guard-3-11B-Vision-Turbo provider_model_id: meta-llama/Llama-Guard-3-11B-Vision-Turbo
shields: shields:
- params: null - params: null
shield_id: meta-llama/Llama-Guard-3-1B shield_id: meta-llama/Llama-Guard-3-8B
provider_id: null provider_id: null
provider_shield_id: null provider_shield_id: null
memory_banks: [] memory_banks: []