fix: replace all instances of --yaml-config with --config (#2196)

# What does this PR do?

start_stack.sh was using --yaml-config which is deprecated.

a bunch of distro docs also mentioned --yaml-config. Replaces all
instances and logic for --yaml-config with --config

resolves #2189

Signed-off-by: Charlie Doern <cdoern@redhat.com>
This commit is contained in:
Charlie Doern 2025-05-16 17:31:12 -04:00 committed by GitHub
parent 65cf076f13
commit 1ae61e8d5f
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
17 changed files with 24 additions and 24 deletions

View file

@ -172,7 +172,7 @@ spec:
- name: llama-stack - name: llama-stack
image: localhost/llama-stack-run-k8s:latest image: localhost/llama-stack-run-k8s:latest
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
command: ["python", "-m", "llama_stack.distribution.server.server", "--yaml-config", "/app/config.yaml"] command: ["python", "-m", "llama_stack.distribution.server.server", "--config", "/app/config.yaml"]
ports: ports:
- containerPort: 5000 - containerPort: 5000
volumeMounts: volumeMounts:

View file

@ -70,7 +70,7 @@ docker run \
-p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \
-v ./run.yaml:/root/my-run.yaml \ -v ./run.yaml:/root/my-run.yaml \
llamastack/distribution-watsonx \ llamastack/distribution-watsonx \
--yaml-config /root/my-run.yaml \ --config /root/my-run.yaml \
--port $LLAMA_STACK_PORT \ --port $LLAMA_STACK_PORT \
--env WATSONX_API_KEY=$WATSONX_API_KEY \ --env WATSONX_API_KEY=$WATSONX_API_KEY \
--env WATSONX_PROJECT_ID=$WATSONX_PROJECT_ID \ --env WATSONX_PROJECT_ID=$WATSONX_PROJECT_ID \

View file

@ -52,7 +52,7 @@ docker run \
-p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \
-v ./run.yaml:/root/my-run.yaml \ -v ./run.yaml:/root/my-run.yaml \
llamastack/distribution-cerebras \ llamastack/distribution-cerebras \
--yaml-config /root/my-run.yaml \ --config /root/my-run.yaml \
--port $LLAMA_STACK_PORT \ --port $LLAMA_STACK_PORT \
--env CEREBRAS_API_KEY=$CEREBRAS_API_KEY --env CEREBRAS_API_KEY=$CEREBRAS_API_KEY
``` ```

View file

@ -155,7 +155,7 @@ docker run \
-v $HOME/.llama:/root/.llama \ -v $HOME/.llama:/root/.llama \
-v ./llama_stack/templates/tgi/run-with-safety.yaml:/root/my-run.yaml \ -v ./llama_stack/templates/tgi/run-with-safety.yaml:/root/my-run.yaml \
llamastack/distribution-dell \ llamastack/distribution-dell \
--yaml-config /root/my-run.yaml \ --config /root/my-run.yaml \
--port $LLAMA_STACK_PORT \ --port $LLAMA_STACK_PORT \
--env INFERENCE_MODEL=$INFERENCE_MODEL \ --env INFERENCE_MODEL=$INFERENCE_MODEL \
--env DEH_URL=$DEH_URL \ --env DEH_URL=$DEH_URL \

View file

@ -143,7 +143,7 @@ docker run \
-p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \
-v ./run.yaml:/root/my-run.yaml \ -v ./run.yaml:/root/my-run.yaml \
llamastack/distribution-nvidia \ llamastack/distribution-nvidia \
--yaml-config /root/my-run.yaml \ --config /root/my-run.yaml \
--port $LLAMA_STACK_PORT \ --port $LLAMA_STACK_PORT \
--env NVIDIA_API_KEY=$NVIDIA_API_KEY --env NVIDIA_API_KEY=$NVIDIA_API_KEY
``` ```

View file

@ -97,7 +97,7 @@ docker run \
-v ~/.llama:/root/.llama \ -v ~/.llama:/root/.llama \
-v ./llama_stack/templates/ollama/run-with-safety.yaml:/root/my-run.yaml \ -v ./llama_stack/templates/ollama/run-with-safety.yaml:/root/my-run.yaml \
llamastack/distribution-ollama \ llamastack/distribution-ollama \
--yaml-config /root/my-run.yaml \ --config /root/my-run.yaml \
--port $LLAMA_STACK_PORT \ --port $LLAMA_STACK_PORT \
--env INFERENCE_MODEL=$INFERENCE_MODEL \ --env INFERENCE_MODEL=$INFERENCE_MODEL \
--env SAFETY_MODEL=$SAFETY_MODEL \ --env SAFETY_MODEL=$SAFETY_MODEL \

View file

@ -233,7 +233,7 @@ docker run \
-p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \
-v ./llama_stack/templates/remote-vllm/run.yaml:/root/my-run.yaml \ -v ./llama_stack/templates/remote-vllm/run.yaml:/root/my-run.yaml \
llamastack/distribution-remote-vllm \ llamastack/distribution-remote-vllm \
--yaml-config /root/my-run.yaml \ --config /root/my-run.yaml \
--port $LLAMA_STACK_PORT \ --port $LLAMA_STACK_PORT \
--env INFERENCE_MODEL=$INFERENCE_MODEL \ --env INFERENCE_MODEL=$INFERENCE_MODEL \
--env VLLM_URL=http://host.docker.internal:$INFERENCE_PORT/v1 --env VLLM_URL=http://host.docker.internal:$INFERENCE_PORT/v1
@ -255,7 +255,7 @@ docker run \
-v ~/.llama:/root/.llama \ -v ~/.llama:/root/.llama \
-v ./llama_stack/templates/remote-vllm/run-with-safety.yaml:/root/my-run.yaml \ -v ./llama_stack/templates/remote-vllm/run-with-safety.yaml:/root/my-run.yaml \
llamastack/distribution-remote-vllm \ llamastack/distribution-remote-vllm \
--yaml-config /root/my-run.yaml \ --config /root/my-run.yaml \
--port $LLAMA_STACK_PORT \ --port $LLAMA_STACK_PORT \
--env INFERENCE_MODEL=$INFERENCE_MODEL \ --env INFERENCE_MODEL=$INFERENCE_MODEL \
--env VLLM_URL=http://host.docker.internal:$INFERENCE_PORT/v1 \ --env VLLM_URL=http://host.docker.internal:$INFERENCE_PORT/v1 \

View file

@ -117,7 +117,7 @@ docker run \
-v ~/.llama:/root/.llama \ -v ~/.llama:/root/.llama \
-v ./llama_stack/templates/tgi/run-with-safety.yaml:/root/my-run.yaml \ -v ./llama_stack/templates/tgi/run-with-safety.yaml:/root/my-run.yaml \
llamastack/distribution-tgi \ llamastack/distribution-tgi \
--yaml-config /root/my-run.yaml \ --config /root/my-run.yaml \
--port $LLAMA_STACK_PORT \ --port $LLAMA_STACK_PORT \
--env INFERENCE_MODEL=$INFERENCE_MODEL \ --env INFERENCE_MODEL=$INFERENCE_MODEL \
--env TGI_URL=http://host.docker.internal:$INFERENCE_PORT \ --env TGI_URL=http://host.docker.internal:$INFERENCE_PORT \

View file

@ -371,9 +371,9 @@ def main(args: argparse.Namespace | None = None):
args = parser.parse_args() args = parser.parse_args()
# Check for deprecated argument usage # Check for deprecated argument usage
if "--yaml-config" in sys.argv: if "--config" in sys.argv:
warnings.warn( warnings.warn(
"The '--yaml-config' argument is deprecated and will be removed in a future version. Use '--config' instead.", "The '--config' argument is deprecated and will be removed in a future version. Use '--config' instead.",
DeprecationWarning, DeprecationWarning,
stacklevel=2, stacklevel=2,
) )
@ -391,7 +391,7 @@ def main(args: argparse.Namespace | None = None):
raise ValueError(f"Template {args.template} does not exist") raise ValueError(f"Template {args.template} does not exist")
log_line = f"Using template {args.template} config file: {config_file}" log_line = f"Using template {args.template} config file: {config_file}"
else: else:
raise ValueError("Either --yaml-config or --template must be provided") raise ValueError("Either --config or --template must be provided")
logger_config = None logger_config = None
with open(config_file) as fp: with open(config_file) as fp:

View file

@ -54,7 +54,7 @@ other_args=""
# Process remaining arguments # Process remaining arguments
while [[ $# -gt 0 ]]; do while [[ $# -gt 0 ]]; do
case "$1" in case "$1" in
--config|--yaml-config) --config)
if [[ -n "$2" ]]; then if [[ -n "$2" ]]; then
yaml_config="$2" yaml_config="$2"
shift 2 shift 2
@ -121,7 +121,7 @@ if [[ "$env_type" == "venv" || "$env_type" == "conda" ]]; then
set -x set -x
if [ -n "$yaml_config" ]; then if [ -n "$yaml_config" ]; then
yaml_config_arg="--yaml-config $yaml_config" yaml_config_arg="--config $yaml_config"
else else
yaml_config_arg="" yaml_config_arg=""
fi fi
@ -181,9 +181,9 @@ elif [[ "$env_type" == "container" ]]; then
# Add yaml config if provided, otherwise use default # Add yaml config if provided, otherwise use default
if [ -n "$yaml_config" ]; then if [ -n "$yaml_config" ]; then
cmd="$cmd -v $yaml_config:/app/run.yaml --yaml-config /app/run.yaml" cmd="$cmd -v $yaml_config:/app/run.yaml --config /app/run.yaml"
else else
cmd="$cmd --yaml-config /app/run.yaml" cmd="$cmd --config /app/run.yaml"
fi fi
# Add any other args # Add any other args

View file

@ -46,7 +46,7 @@ docker run \
-p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \
-v ./run.yaml:/root/my-run.yaml \ -v ./run.yaml:/root/my-run.yaml \
llamastack/distribution-{{ name }} \ llamastack/distribution-{{ name }} \
--yaml-config /root/my-run.yaml \ --config /root/my-run.yaml \
--port $LLAMA_STACK_PORT \ --port $LLAMA_STACK_PORT \
--env CEREBRAS_API_KEY=$CEREBRAS_API_KEY --env CEREBRAS_API_KEY=$CEREBRAS_API_KEY
``` ```

View file

@ -143,7 +143,7 @@ docker run \
-v $HOME/.llama:/root/.llama \ -v $HOME/.llama:/root/.llama \
-v ./llama_stack/templates/tgi/run-with-safety.yaml:/root/my-run.yaml \ -v ./llama_stack/templates/tgi/run-with-safety.yaml:/root/my-run.yaml \
llamastack/distribution-{{ name }} \ llamastack/distribution-{{ name }} \
--yaml-config /root/my-run.yaml \ --config /root/my-run.yaml \
--port $LLAMA_STACK_PORT \ --port $LLAMA_STACK_PORT \
--env INFERENCE_MODEL=$INFERENCE_MODEL \ --env INFERENCE_MODEL=$INFERENCE_MODEL \
--env DEH_URL=$DEH_URL \ --env DEH_URL=$DEH_URL \

View file

@ -116,7 +116,7 @@ docker run \
-p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \
-v ./run.yaml:/root/my-run.yaml \ -v ./run.yaml:/root/my-run.yaml \
llamastack/distribution-{{ name }} \ llamastack/distribution-{{ name }} \
--yaml-config /root/my-run.yaml \ --config /root/my-run.yaml \
--port $LLAMA_STACK_PORT \ --port $LLAMA_STACK_PORT \
--env NVIDIA_API_KEY=$NVIDIA_API_KEY --env NVIDIA_API_KEY=$NVIDIA_API_KEY
``` ```

View file

@ -86,7 +86,7 @@ docker run \
-v ~/.llama:/root/.llama \ -v ~/.llama:/root/.llama \
-v ./llama_stack/templates/ollama/run-with-safety.yaml:/root/my-run.yaml \ -v ./llama_stack/templates/ollama/run-with-safety.yaml:/root/my-run.yaml \
llamastack/distribution-{{ name }} \ llamastack/distribution-{{ name }} \
--yaml-config /root/my-run.yaml \ --config /root/my-run.yaml \
--port $LLAMA_STACK_PORT \ --port $LLAMA_STACK_PORT \
--env INFERENCE_MODEL=$INFERENCE_MODEL \ --env INFERENCE_MODEL=$INFERENCE_MODEL \
--env SAFETY_MODEL=$SAFETY_MODEL \ --env SAFETY_MODEL=$SAFETY_MODEL \

View file

@ -220,7 +220,7 @@ docker run \
-p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \
-v ./llama_stack/templates/remote-vllm/run.yaml:/root/my-run.yaml \ -v ./llama_stack/templates/remote-vllm/run.yaml:/root/my-run.yaml \
llamastack/distribution-{{ name }} \ llamastack/distribution-{{ name }} \
--yaml-config /root/my-run.yaml \ --config /root/my-run.yaml \
--port $LLAMA_STACK_PORT \ --port $LLAMA_STACK_PORT \
--env INFERENCE_MODEL=$INFERENCE_MODEL \ --env INFERENCE_MODEL=$INFERENCE_MODEL \
--env VLLM_URL=http://host.docker.internal:$INFERENCE_PORT/v1 --env VLLM_URL=http://host.docker.internal:$INFERENCE_PORT/v1
@ -242,7 +242,7 @@ docker run \
-v ~/.llama:/root/.llama \ -v ~/.llama:/root/.llama \
-v ./llama_stack/templates/remote-vllm/run-with-safety.yaml:/root/my-run.yaml \ -v ./llama_stack/templates/remote-vllm/run-with-safety.yaml:/root/my-run.yaml \
llamastack/distribution-{{ name }} \ llamastack/distribution-{{ name }} \
--yaml-config /root/my-run.yaml \ --config /root/my-run.yaml \
--port $LLAMA_STACK_PORT \ --port $LLAMA_STACK_PORT \
--env INFERENCE_MODEL=$INFERENCE_MODEL \ --env INFERENCE_MODEL=$INFERENCE_MODEL \
--env VLLM_URL=http://host.docker.internal:$INFERENCE_PORT/v1 \ --env VLLM_URL=http://host.docker.internal:$INFERENCE_PORT/v1 \

View file

@ -105,7 +105,7 @@ docker run \
-v ~/.llama:/root/.llama \ -v ~/.llama:/root/.llama \
-v ./llama_stack/templates/tgi/run-with-safety.yaml:/root/my-run.yaml \ -v ./llama_stack/templates/tgi/run-with-safety.yaml:/root/my-run.yaml \
llamastack/distribution-{{ name }} \ llamastack/distribution-{{ name }} \
--yaml-config /root/my-run.yaml \ --config /root/my-run.yaml \
--port $LLAMA_STACK_PORT \ --port $LLAMA_STACK_PORT \
--env INFERENCE_MODEL=$INFERENCE_MODEL \ --env INFERENCE_MODEL=$INFERENCE_MODEL \
--env TGI_URL=http://host.docker.internal:$INFERENCE_PORT \ --env TGI_URL=http://host.docker.internal:$INFERENCE_PORT \

View file

@ -56,7 +56,7 @@ docker run \
-p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \
-v ./run.yaml:/root/my-run.yaml \ -v ./run.yaml:/root/my-run.yaml \
llamastack/distribution-{{ name }} \ llamastack/distribution-{{ name }} \
--yaml-config /root/my-run.yaml \ --config /root/my-run.yaml \
--port $LLAMA_STACK_PORT \ --port $LLAMA_STACK_PORT \
--env WATSONX_API_KEY=$WATSONX_API_KEY \ --env WATSONX_API_KEY=$WATSONX_API_KEY \
--env WATSONX_PROJECT_ID=$WATSONX_PROJECT_ID \ --env WATSONX_PROJECT_ID=$WATSONX_PROJECT_ID \