fix: resolve template name to config path in llama stack run (#2361)

# What does this PR do?
<!-- Provide a short summary of what this PR does and why. Link to
relevant issues if applicable. -->
This PR fixes a bug where running a known template by name using:
`llama stack run ollama`
would fail with the following error:
`ValueError: Config file ollama does not exist`

<!-- If resolving an issue, uncomment and update the line below -->
Closes #2291 

## Test Plan
<!-- Describe the tests you ran to verify your changes with result
summaries. *Provide clear instructions so the plan can be easily
re-executed.* -->
`llama stack run ollama` should work
This commit is contained in:
Ignas Baranauskas 2025-06-03 22:39:12 +01:00 committed by GitHub
parent cba55808ab
commit c70ca8344f
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -35,7 +35,8 @@ class StackRun(Subcommand):
"config",
type=str,
nargs="?", # Make it optional
help="Path to config file to use for the run. Required for venv and conda environments.",
metavar="config | template",
help="Path to config file to use for the run or name of known template (`llama stack list` for a list).",
)
self.parser.add_argument(
"--port",
@ -154,7 +155,10 @@ class StackRun(Subcommand):
# func=<bound method StackRun._run_stack_run_cmd of <llama_stack.cli.stack.run.StackRun object at 0x10484b010>>
if callable(getattr(args, arg)):
continue
setattr(server_args, arg, getattr(args, arg))
if arg == "config" and template_name:
server_args.config = str(config_file)
else:
setattr(server_args, arg, getattr(args, arg))
# Run the server
server_main(server_args)