refactor: display defaults in help text (#1480)

# What does this PR do?

using `formatter_class=argparse.ArgumentDefaultsHelpFormatter` displays
(default: DEFAULT_VALUE) for each flag. add this formatter class to
build and run to show users some default values like `conda`, `8321`,
etc

## Test Plan

ran locally with following output: 

before: 
```
llama stack run --help
usage: llama stack run [-h] [--port PORT] [--image-name IMAGE_NAME] [--disable-ipv6] [--env KEY=VALUE] [--tls-keyfile TLS_KEYFILE] [--tls-certfile TLS_CERTFILE]
                       [--image-type {conda,container,venv}]
                       config

Start the server for a Llama Stack Distribution. You should have already built (or downloaded) and configured the distribution.

positional arguments:
  config                Path to config file to use for the run

options:
  -h, --help            show this help message and exit
  --port PORT           Port to run the server on. It can also be passed via the env var LLAMA_STACK_PORT. Defaults to 8321
  --image-name IMAGE_NAME
                        Name of the image to run. Defaults to the current conda environment
  --disable-ipv6        Disable IPv6 support
  --env KEY=VALUE       Environment variables to pass to the server in KEY=VALUE format. Can be specified multiple times.
  --tls-keyfile TLS_KEYFILE
                        Path to TLS key file for HTTPS
  --tls-certfile TLS_CERTFILE
                        Path to TLS certificate file for HTTPS
  --image-type {conda,container,venv}
                        Image Type used during the build. This can be either conda or container or venv.
```

after:
```
llama stack run --help
usage: llama stack run [-h] [--port PORT] [--image-name IMAGE_NAME] [--disable-ipv6] [--env KEY=VALUE] [--tls-keyfile TLS_KEYFILE] [--tls-certfile TLS_CERTFILE]
                       [--image-type {conda,container,venv}]
                       config

Start the server for a Llama Stack Distribution. You should have already built (or downloaded) and configured the distribution.

positional arguments:
  config                Path to config file to use for the run

options:
  -h, --help            show this help message and exit
  --port PORT           Port to run the server on. It can also be passed via the env var LLAMA_STACK_PORT. (default: 8321)
  --image-name IMAGE_NAME
                        Name of the image to run. Defaults to the current conda environment (default: None)
  --disable-ipv6        Disable IPv6 support (default: False)
  --env KEY=VALUE       Environment variables to pass to the server in KEY=VALUE format. Can be specified multiple times. (default: [])
  --tls-keyfile TLS_KEYFILE
                        Path to TLS key file for HTTPS (default: None)
  --tls-certfile TLS_CERTFILE
                        Path to TLS certificate file for HTTPS (default: None)
  --image-type {conda,container,venv}
                        Image Type used during the build. This can be either conda or container or venv. (default: conda)
```

[//]: # (## Documentation)

Signed-off-by: Charlie Doern <cdoern@redhat.com>
This commit is contained in:
Charlie Doern 2025-03-07 14:05:58 -05:00 committed by GitHub
parent b8c519ba11
commit 1097912054
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 25 additions and 24 deletions

View file

@ -51,25 +51,25 @@ The main points to consider are:
``` ```
llama stack build -h llama stack build -h
usage: llama stack build [-h] [--config CONFIG] [--template TEMPLATE] [--list-templates] [--image-type {conda,container,venv}] [--image-name IMAGE_NAME] [--print-deps-only] [--run]
usage: llama stack build [-h] [--config CONFIG] [--template TEMPLATE] [--list-templates]
[--image-type {conda,container,venv}] [--image-name IMAGE_NAME] [--print-deps-only]
Build a Llama stack container Build a Llama stack container
options: options:
-h, --help show this help message and exit -h, --help show this help message and exit
--config CONFIG Path to a config file to use for the build. You can find example configs in llama_stack/distributions/**/build.yaml. --config CONFIG Path to a config file to use for the build. You can find example configs in llama_stack/distributions/**/build.yaml. If this argument is not provided, you will
If this argument is not provided, you will be prompted to enter information interactively be prompted to enter information interactively (default: None)
--template TEMPLATE Name of the example template config to use for build. You may use `llama stack build --list-templates` to check out the available templates --template TEMPLATE Name of the example template config to use for build. You may use `llama stack build --list-templates` to check out the available templates (default: None)
--list-templates Show the available templates for building a Llama Stack distribution --list-templates Show the available templates for building a Llama Stack distribution (default: False)
--image-type {conda,container,venv} --image-type {conda,container,venv}
Image Type to use for the build. This can be either conda or container or venv. If not specified, will use the image type from the template config. Image Type to use for the build. This can be either conda or container or venv. If not specified, will use the image type from the template config. (default:
conda)
--image-name IMAGE_NAME --image-name IMAGE_NAME
[for image-type=conda] Name of the conda environment to use for the build. If [for image-type=conda|venv] Name of the conda or virtual environment to use for the build. If not specified, currently active Conda environment will be used if
not specified, currently active Conda environment will be used. If no Conda found. (default: None)
environment is active, you must specify a name. --print-deps-only Print the dependencies for the stack only, without building the stack (default: False)
--print-deps-only Print the dependencies for the stack only, without building the stack --run Run the stack after building using the same image type, name, and other applicable arguments (default: False)
``` ```
After this step is complete, a file named `<name>-build.yaml` and template file `<name>-run.yaml` will be generated and saved at the output file path specified at the end of the command. After this step is complete, a file named `<name>-build.yaml` and template file `<name>-run.yaml` will be generated and saved at the output file path specified at the end of the command.
@ -212,8 +212,8 @@ Now, let's start the Llama Stack Distribution Server. You will need the YAML con
``` ```
llama stack run -h llama stack run -h
usage: llama stack run [-h] [--port PORT] [--image-name IMAGE_NAME] [--disable-ipv6] [--env KEY=VALUE] [--tls-keyfile TLS_KEYFILE] usage: llama stack run [-h] [--port PORT] [--image-name IMAGE_NAME] [--disable-ipv6] [--env KEY=VALUE] [--tls-keyfile TLS_KEYFILE] [--tls-certfile TLS_CERTFILE]
[--tls-certfile TLS_CERTFILE] [--image-type {conda,container,venv}] [--image-type {conda,container,venv}]
config config
Start the server for a Llama Stack Distribution. You should have already built (or downloaded) and configured the distribution. Start the server for a Llama Stack Distribution. You should have already built (or downloaded) and configured the distribution.
@ -223,17 +223,17 @@ positional arguments:
options: options:
-h, --help show this help message and exit -h, --help show this help message and exit
--port PORT Port to run the server on. It can also be passed via the env var LLAMA_STACK_PORT. Defaults to 8321 --port PORT Port to run the server on. It can also be passed via the env var LLAMA_STACK_PORT. (default: 8321)
--image-name IMAGE_NAME --image-name IMAGE_NAME
Name of the image to run. Defaults to the current conda environment Name of the image to run. Defaults to the current conda environment (default: None)
--disable-ipv6 Disable IPv6 support --disable-ipv6 Disable IPv6 support (default: False)
--env KEY=VALUE Environment variables to pass to the server in KEY=VALUE format. Can be specified multiple times. --env KEY=VALUE Environment variables to pass to the server in KEY=VALUE format. Can be specified multiple times. (default: [])
--tls-keyfile TLS_KEYFILE --tls-keyfile TLS_KEYFILE
Path to TLS key file for HTTPS Path to TLS key file for HTTPS (default: None)
--tls-certfile TLS_CERTFILE --tls-certfile TLS_CERTFILE
Path to TLS certificate file for HTTPS Path to TLS certificate file for HTTPS (default: None)
--image-type {conda,container,venv} --image-type {conda,container,venv}
Image Type used during the build. This can be either conda or container or venv. Image Type used during the build. This can be either conda or container or venv. (default: conda)
``` ```

View file

@ -16,7 +16,7 @@ class StackBuild(Subcommand):
"build", "build",
prog="llama stack build", prog="llama stack build",
description="Build a Llama stack container", description="Build a Llama stack container",
formatter_class=argparse.RawTextHelpFormatter, formatter_class=argparse.ArgumentDefaultsHelpFormatter,
) )
self._add_arguments() self._add_arguments()
self.parser.set_defaults(func=self._run_stack_build_command) self.parser.set_defaults(func=self._run_stack_build_command)

View file

@ -23,7 +23,7 @@ class StackRun(Subcommand):
"run", "run",
prog="llama stack run", prog="llama stack run",
description="""Start the server for a Llama Stack Distribution. You should have already built (or downloaded) and configured the distribution.""", description="""Start the server for a Llama Stack Distribution. You should have already built (or downloaded) and configured the distribution.""",
formatter_class=argparse.RawTextHelpFormatter, formatter_class=argparse.ArgumentDefaultsHelpFormatter,
) )
self._add_arguments() self._add_arguments()
self.parser.set_defaults(func=self._run_stack_run_cmd) self.parser.set_defaults(func=self._run_stack_run_cmd)
@ -37,12 +37,13 @@ class StackRun(Subcommand):
self.parser.add_argument( self.parser.add_argument(
"--port", "--port",
type=int, type=int,
help="Port to run the server on. It can also be passed via the env var LLAMA_STACK_PORT. Defaults to 8321", help="Port to run the server on. It can also be passed via the env var LLAMA_STACK_PORT.",
default=int(os.getenv("LLAMA_STACK_PORT", 8321)), default=int(os.getenv("LLAMA_STACK_PORT", 8321)),
) )
self.parser.add_argument( self.parser.add_argument(
"--image-name", "--image-name",
type=str, type=str,
default=os.environ.get("CONDA_DEFAULT_ENV"),
help="Name of the image to run. Defaults to the current conda environment", help="Name of the image to run. Defaults to the current conda environment",
) )
self.parser.add_argument( self.parser.add_argument(