From 1ae61e8d5fc15c76f72bd2a2d2ec1b722c3b9b9e Mon Sep 17 00:00:00 2001 From: Charlie Doern Date: Fri, 16 May 2025 17:31:12 -0400 Subject: [PATCH] fix: replace all instances of --yaml-config with --config (#2196) # What does this PR do? start_stack.sh was using --yaml-config which is deprecated. a bunch of distro docs also mentioned --yaml-config. Replaces all instances and logic for --yaml-config with --config resolves #2189 Signed-off-by: Charlie Doern --- docs/source/distributions/kubernetes_deployment.md | 2 +- docs/source/distributions/remote_hosted_distro/watsonx.md | 2 +- docs/source/distributions/self_hosted_distro/cerebras.md | 2 +- docs/source/distributions/self_hosted_distro/dell.md | 2 +- docs/source/distributions/self_hosted_distro/nvidia.md | 2 +- docs/source/distributions/self_hosted_distro/ollama.md | 2 +- .../distributions/self_hosted_distro/remote-vllm.md | 4 ++-- docs/source/distributions/self_hosted_distro/tgi.md | 2 +- llama_stack/distribution/server/server.py | 6 +++--- llama_stack/distribution/start_stack.sh | 8 ++++---- llama_stack/templates/cerebras/doc_template.md | 2 +- llama_stack/templates/dell/doc_template.md | 2 +- llama_stack/templates/nvidia/doc_template.md | 2 +- llama_stack/templates/ollama/doc_template.md | 2 +- llama_stack/templates/remote-vllm/doc_template.md | 4 ++-- llama_stack/templates/tgi/doc_template.md | 2 +- llama_stack/templates/watsonx/doc_template.md | 2 +- 17 files changed, 24 insertions(+), 24 deletions(-) diff --git a/docs/source/distributions/kubernetes_deployment.md b/docs/source/distributions/kubernetes_deployment.md index 21ec02012..f43039824 100644 --- a/docs/source/distributions/kubernetes_deployment.md +++ b/docs/source/distributions/kubernetes_deployment.md @@ -172,7 +172,7 @@ spec: - name: llama-stack image: localhost/llama-stack-run-k8s:latest imagePullPolicy: IfNotPresent - command: ["python", "-m", "llama_stack.distribution.server.server", "--yaml-config", "/app/config.yaml"] + command: ["python", "-m", "llama_stack.distribution.server.server", "--config", "/app/config.yaml"] ports: - containerPort: 5000 volumeMounts: diff --git a/docs/source/distributions/remote_hosted_distro/watsonx.md b/docs/source/distributions/remote_hosted_distro/watsonx.md index d8d327bb5..ec1b98059 100644 --- a/docs/source/distributions/remote_hosted_distro/watsonx.md +++ b/docs/source/distributions/remote_hosted_distro/watsonx.md @@ -70,7 +70,7 @@ docker run \ -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ -v ./run.yaml:/root/my-run.yaml \ llamastack/distribution-watsonx \ - --yaml-config /root/my-run.yaml \ + --config /root/my-run.yaml \ --port $LLAMA_STACK_PORT \ --env WATSONX_API_KEY=$WATSONX_API_KEY \ --env WATSONX_PROJECT_ID=$WATSONX_PROJECT_ID \ diff --git a/docs/source/distributions/self_hosted_distro/cerebras.md b/docs/source/distributions/self_hosted_distro/cerebras.md index 329c9b802..3c4db1b75 100644 --- a/docs/source/distributions/self_hosted_distro/cerebras.md +++ b/docs/source/distributions/self_hosted_distro/cerebras.md @@ -52,7 +52,7 @@ docker run \ -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ -v ./run.yaml:/root/my-run.yaml \ llamastack/distribution-cerebras \ - --yaml-config /root/my-run.yaml \ + --config /root/my-run.yaml \ --port $LLAMA_STACK_PORT \ --env CEREBRAS_API_KEY=$CEREBRAS_API_KEY ``` diff --git a/docs/source/distributions/self_hosted_distro/dell.md b/docs/source/distributions/self_hosted_distro/dell.md index 2e987985c..eded3bdc4 100644 --- a/docs/source/distributions/self_hosted_distro/dell.md +++ b/docs/source/distributions/self_hosted_distro/dell.md @@ -155,7 +155,7 @@ docker run \ -v $HOME/.llama:/root/.llama \ -v ./llama_stack/templates/tgi/run-with-safety.yaml:/root/my-run.yaml \ llamastack/distribution-dell \ - --yaml-config /root/my-run.yaml \ + --config /root/my-run.yaml \ --port $LLAMA_STACK_PORT \ --env INFERENCE_MODEL=$INFERENCE_MODEL \ --env DEH_URL=$DEH_URL \ diff --git a/docs/source/distributions/self_hosted_distro/nvidia.md b/docs/source/distributions/self_hosted_distro/nvidia.md index a5bbbfdee..e84b5c525 100644 --- a/docs/source/distributions/self_hosted_distro/nvidia.md +++ b/docs/source/distributions/self_hosted_distro/nvidia.md @@ -143,7 +143,7 @@ docker run \ -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ -v ./run.yaml:/root/my-run.yaml \ llamastack/distribution-nvidia \ - --yaml-config /root/my-run.yaml \ + --config /root/my-run.yaml \ --port $LLAMA_STACK_PORT \ --env NVIDIA_API_KEY=$NVIDIA_API_KEY ``` diff --git a/docs/source/distributions/self_hosted_distro/ollama.md b/docs/source/distributions/self_hosted_distro/ollama.md index 5d8935fe2..a3d67d4ce 100644 --- a/docs/source/distributions/self_hosted_distro/ollama.md +++ b/docs/source/distributions/self_hosted_distro/ollama.md @@ -97,7 +97,7 @@ docker run \ -v ~/.llama:/root/.llama \ -v ./llama_stack/templates/ollama/run-with-safety.yaml:/root/my-run.yaml \ llamastack/distribution-ollama \ - --yaml-config /root/my-run.yaml \ + --config /root/my-run.yaml \ --port $LLAMA_STACK_PORT \ --env INFERENCE_MODEL=$INFERENCE_MODEL \ --env SAFETY_MODEL=$SAFETY_MODEL \ diff --git a/docs/source/distributions/self_hosted_distro/remote-vllm.md b/docs/source/distributions/self_hosted_distro/remote-vllm.md index 2ff4bad5b..6e7cf410d 100644 --- a/docs/source/distributions/self_hosted_distro/remote-vllm.md +++ b/docs/source/distributions/self_hosted_distro/remote-vllm.md @@ -233,7 +233,7 @@ docker run \ -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ -v ./llama_stack/templates/remote-vllm/run.yaml:/root/my-run.yaml \ llamastack/distribution-remote-vllm \ - --yaml-config /root/my-run.yaml \ + --config /root/my-run.yaml \ --port $LLAMA_STACK_PORT \ --env INFERENCE_MODEL=$INFERENCE_MODEL \ --env VLLM_URL=http://host.docker.internal:$INFERENCE_PORT/v1 @@ -255,7 +255,7 @@ docker run \ -v ~/.llama:/root/.llama \ -v ./llama_stack/templates/remote-vllm/run-with-safety.yaml:/root/my-run.yaml \ llamastack/distribution-remote-vllm \ - --yaml-config /root/my-run.yaml \ + --config /root/my-run.yaml \ --port $LLAMA_STACK_PORT \ --env INFERENCE_MODEL=$INFERENCE_MODEL \ --env VLLM_URL=http://host.docker.internal:$INFERENCE_PORT/v1 \ diff --git a/docs/source/distributions/self_hosted_distro/tgi.md b/docs/source/distributions/self_hosted_distro/tgi.md index 7a75aa559..24f9d03ec 100644 --- a/docs/source/distributions/self_hosted_distro/tgi.md +++ b/docs/source/distributions/self_hosted_distro/tgi.md @@ -117,7 +117,7 @@ docker run \ -v ~/.llama:/root/.llama \ -v ./llama_stack/templates/tgi/run-with-safety.yaml:/root/my-run.yaml \ llamastack/distribution-tgi \ - --yaml-config /root/my-run.yaml \ + --config /root/my-run.yaml \ --port $LLAMA_STACK_PORT \ --env INFERENCE_MODEL=$INFERENCE_MODEL \ --env TGI_URL=http://host.docker.internal:$INFERENCE_PORT \ diff --git a/llama_stack/distribution/server/server.py b/llama_stack/distribution/server/server.py index ff0775dd6..15a8058ae 100644 --- a/llama_stack/distribution/server/server.py +++ b/llama_stack/distribution/server/server.py @@ -371,9 +371,9 @@ def main(args: argparse.Namespace | None = None): args = parser.parse_args() # Check for deprecated argument usage - if "--yaml-config" in sys.argv: + if "--config" in sys.argv: warnings.warn( - "The '--yaml-config' argument is deprecated and will be removed in a future version. Use '--config' instead.", + "The '--config' argument is deprecated and will be removed in a future version. Use '--config' instead.", DeprecationWarning, stacklevel=2, ) @@ -391,7 +391,7 @@ def main(args: argparse.Namespace | None = None): raise ValueError(f"Template {args.template} does not exist") log_line = f"Using template {args.template} config file: {config_file}" else: - raise ValueError("Either --yaml-config or --template must be provided") + raise ValueError("Either --config or --template must be provided") logger_config = None with open(config_file) as fp: diff --git a/llama_stack/distribution/start_stack.sh b/llama_stack/distribution/start_stack.sh index bf49e1619..996935a5e 100755 --- a/llama_stack/distribution/start_stack.sh +++ b/llama_stack/distribution/start_stack.sh @@ -54,7 +54,7 @@ other_args="" # Process remaining arguments while [[ $# -gt 0 ]]; do case "$1" in - --config|--yaml-config) + --config) if [[ -n "$2" ]]; then yaml_config="$2" shift 2 @@ -121,7 +121,7 @@ if [[ "$env_type" == "venv" || "$env_type" == "conda" ]]; then set -x if [ -n "$yaml_config" ]; then - yaml_config_arg="--yaml-config $yaml_config" + yaml_config_arg="--config $yaml_config" else yaml_config_arg="" fi @@ -181,9 +181,9 @@ elif [[ "$env_type" == "container" ]]; then # Add yaml config if provided, otherwise use default if [ -n "$yaml_config" ]; then - cmd="$cmd -v $yaml_config:/app/run.yaml --yaml-config /app/run.yaml" + cmd="$cmd -v $yaml_config:/app/run.yaml --config /app/run.yaml" else - cmd="$cmd --yaml-config /app/run.yaml" + cmd="$cmd --config /app/run.yaml" fi # Add any other args diff --git a/llama_stack/templates/cerebras/doc_template.md b/llama_stack/templates/cerebras/doc_template.md index 76f8c34ad..5cae2b2da 100644 --- a/llama_stack/templates/cerebras/doc_template.md +++ b/llama_stack/templates/cerebras/doc_template.md @@ -46,7 +46,7 @@ docker run \ -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ -v ./run.yaml:/root/my-run.yaml \ llamastack/distribution-{{ name }} \ - --yaml-config /root/my-run.yaml \ + --config /root/my-run.yaml \ --port $LLAMA_STACK_PORT \ --env CEREBRAS_API_KEY=$CEREBRAS_API_KEY ``` diff --git a/llama_stack/templates/dell/doc_template.md b/llama_stack/templates/dell/doc_template.md index 26f07130b..6bdd7f81c 100644 --- a/llama_stack/templates/dell/doc_template.md +++ b/llama_stack/templates/dell/doc_template.md @@ -143,7 +143,7 @@ docker run \ -v $HOME/.llama:/root/.llama \ -v ./llama_stack/templates/tgi/run-with-safety.yaml:/root/my-run.yaml \ llamastack/distribution-{{ name }} \ - --yaml-config /root/my-run.yaml \ + --config /root/my-run.yaml \ --port $LLAMA_STACK_PORT \ --env INFERENCE_MODEL=$INFERENCE_MODEL \ --env DEH_URL=$DEH_URL \ diff --git a/llama_stack/templates/nvidia/doc_template.md b/llama_stack/templates/nvidia/doc_template.md index 068dd7ac3..50c96802f 100644 --- a/llama_stack/templates/nvidia/doc_template.md +++ b/llama_stack/templates/nvidia/doc_template.md @@ -116,7 +116,7 @@ docker run \ -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ -v ./run.yaml:/root/my-run.yaml \ llamastack/distribution-{{ name }} \ - --yaml-config /root/my-run.yaml \ + --config /root/my-run.yaml \ --port $LLAMA_STACK_PORT \ --env NVIDIA_API_KEY=$NVIDIA_API_KEY ``` diff --git a/llama_stack/templates/ollama/doc_template.md b/llama_stack/templates/ollama/doc_template.md index f961ab7ed..aaa65bab2 100644 --- a/llama_stack/templates/ollama/doc_template.md +++ b/llama_stack/templates/ollama/doc_template.md @@ -86,7 +86,7 @@ docker run \ -v ~/.llama:/root/.llama \ -v ./llama_stack/templates/ollama/run-with-safety.yaml:/root/my-run.yaml \ llamastack/distribution-{{ name }} \ - --yaml-config /root/my-run.yaml \ + --config /root/my-run.yaml \ --port $LLAMA_STACK_PORT \ --env INFERENCE_MODEL=$INFERENCE_MODEL \ --env SAFETY_MODEL=$SAFETY_MODEL \ diff --git a/llama_stack/templates/remote-vllm/doc_template.md b/llama_stack/templates/remote-vllm/doc_template.md index 3cede6080..5684888da 100644 --- a/llama_stack/templates/remote-vllm/doc_template.md +++ b/llama_stack/templates/remote-vllm/doc_template.md @@ -220,7 +220,7 @@ docker run \ -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ -v ./llama_stack/templates/remote-vllm/run.yaml:/root/my-run.yaml \ llamastack/distribution-{{ name }} \ - --yaml-config /root/my-run.yaml \ + --config /root/my-run.yaml \ --port $LLAMA_STACK_PORT \ --env INFERENCE_MODEL=$INFERENCE_MODEL \ --env VLLM_URL=http://host.docker.internal:$INFERENCE_PORT/v1 @@ -242,7 +242,7 @@ docker run \ -v ~/.llama:/root/.llama \ -v ./llama_stack/templates/remote-vllm/run-with-safety.yaml:/root/my-run.yaml \ llamastack/distribution-{{ name }} \ - --yaml-config /root/my-run.yaml \ + --config /root/my-run.yaml \ --port $LLAMA_STACK_PORT \ --env INFERENCE_MODEL=$INFERENCE_MODEL \ --env VLLM_URL=http://host.docker.internal:$INFERENCE_PORT/v1 \ diff --git a/llama_stack/templates/tgi/doc_template.md b/llama_stack/templates/tgi/doc_template.md index b69ccaa56..68b475893 100644 --- a/llama_stack/templates/tgi/doc_template.md +++ b/llama_stack/templates/tgi/doc_template.md @@ -105,7 +105,7 @@ docker run \ -v ~/.llama:/root/.llama \ -v ./llama_stack/templates/tgi/run-with-safety.yaml:/root/my-run.yaml \ llamastack/distribution-{{ name }} \ - --yaml-config /root/my-run.yaml \ + --config /root/my-run.yaml \ --port $LLAMA_STACK_PORT \ --env INFERENCE_MODEL=$INFERENCE_MODEL \ --env TGI_URL=http://host.docker.internal:$INFERENCE_PORT \ diff --git a/llama_stack/templates/watsonx/doc_template.md b/llama_stack/templates/watsonx/doc_template.md index af0ae15a8..f28dbf0bf 100644 --- a/llama_stack/templates/watsonx/doc_template.md +++ b/llama_stack/templates/watsonx/doc_template.md @@ -56,7 +56,7 @@ docker run \ -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ -v ./run.yaml:/root/my-run.yaml \ llamastack/distribution-{{ name }} \ - --yaml-config /root/my-run.yaml \ + --config /root/my-run.yaml \ --port $LLAMA_STACK_PORT \ --env WATSONX_API_KEY=$WATSONX_API_KEY \ --env WATSONX_PROJECT_ID=$WATSONX_PROJECT_ID \