Update broken references

This commit is contained in:
Ashwin Bharambe 2024-10-28 22:24:19 -07:00
parent bd424f9a26
commit 4349d5b1c4
8 changed files with 11 additions and 30 deletions

View file

@ -58,11 +58,11 @@ llama stack build --list-templates
You may then pick a template to build your distribution with providers fitted to your liking.
```
llama stack build --template tgi --name tgi
llama stack build --template tgi
```
```
$ llama stack build --template tgi --name tgi
$ llama stack build --template tgi
...
...
Build spec configuration saved at ~/.conda/envs/llamastack-tgi/tgi-build.yaml
@ -77,7 +77,7 @@ You may now run `llama stack configure tgi` or `llama stack configure ~/.conda/e
```
$ cat llama_stack/templates/ollama/build.yaml
name: local-ollama
name: ollama
distribution_spec:
description: Like local, but use ollama for running LLM inference
providers:

View file

@ -298,7 +298,7 @@ You may now run `llama stack configure tgi` or `llama stack configure ~/.conda/e
```
$ cat build.yaml
name: local-ollama
name: ollama
distribution_spec:
description: Like local, but use ollama for running LLM inference
providers:

View file

@ -68,7 +68,6 @@
"```\n",
"$ llama stack configure llamastack-local-gpu\n",
"\n",
"Could not find llamastack-local-gpu. Trying conda build name instead...\n",
"Could not find /home/hjshah/.conda/envs/llamastack-llamastack-local-gpu/llamastack-local-gpu-build.yaml. Trying docker image name instead...\n",
"+ podman run --network host -it -v /home/hjshah/.llama/builds/docker:/app/builds llamastack-local-gpu llama stack configure ./llamastack-build.yaml --output-dir /app/builds\n",
"\n",

View file

@ -97,7 +97,6 @@ docker run -it -p 5000:5000 -v ~/.llama:/root/.llama -v ./run.yaml:/root/my-run.
```
$ llama stack configure my-local-stack
Could not find my-local-stack. Trying conda build name instead...
Configuring API `inference`...
=== Configuring provider `meta-reference` for API inference...
Enter value for model (default: Llama3.1-8B-Instruct) (required):

View file

@ -296,9 +296,9 @@ You may now run `llama stack configure tgi` or `llama stack configure ~/.conda/e
- The config file will be of contents like the ones in `llama_stack/distributions/templates/`.
```
$ cat llama_stack/distribution/templates/local-ollama-build.yaml
$ cat llama_stack/templates/ollama/build.yaml
name: local-ollama
name: ollama
distribution_spec:
description: Like local, but use ollama for running LLM inference
providers:
@ -311,7 +311,7 @@ image_type: conda
```
```
llama stack build --config llama_stack/distribution/templates/local-ollama-build.yaml
llama stack build --config llama_stack/templates/ollama/build.yaml
```
#### How to build distribution with Docker image

View file

@ -86,7 +86,6 @@ llama stack configure <name>
```
$ llama stack configure my-local-stack
Could not find my-local-stack. Trying conda build name instead...
Configuring API `inference`...
=== Configuring provider `meta-reference` for API inference...
Enter value for model (default: Llama3.1-8B-Instruct) (required):
@ -235,9 +234,9 @@ You may now run `llama stack configure tgi` or `llama stack configure ~/.conda/e
- The config file will be of contents like the ones in `llama_stack/distributions/templates/`.
```
$ cat llama_stack/distribution/templates/local-ollama-build.yaml
$ cat llama_stack/templates/ollama/build.yaml
name: local-ollama
name: ollama
distribution_spec:
description: Like local, but use ollama for running LLM inference
providers:

View file

@ -67,6 +67,7 @@ class StackBuild(Subcommand):
type=str,
help="Image Type to use for the build. This can be either conda or docker. If not specified, will use the image type from the template config.",
choices=["conda", "docker"],
default="conda",
)
def _run_stack_build_command(self, args: argparse.Namespace) -> None:

View file

@ -55,19 +55,12 @@ class StackConfigure(Subcommand):
docker_image = None
build_config_file = Path(args.config)
if build_config_file.exists():
with open(build_config_file, "r") as f:
build_config = BuildConfig(**yaml.safe_load(f))
self._configure_llama_distribution(build_config, args.output_dir)
return
# if we get here, we need to try to find the conda build config file
cprint(
f"Could not find {build_config_file}. Trying conda build name instead...",
color="green",
)
conda_dir = (
Path(os.path.expanduser("~/.conda/envs")) / f"llamastack-{args.config}"
)
@ -80,19 +73,14 @@ class StackConfigure(Subcommand):
break
build_config_file = Path(conda_dir) / f"{args.config}-build.yaml"
if build_config_file.exists():
with open(build_config_file, "r") as f:
build_config = BuildConfig(**yaml.safe_load(f))
cprint(f"Using {build_config_file}...", "green")
self._configure_llama_distribution(build_config, args.output_dir)
return
# if we get here, we need to try to find the docker image
cprint(
f"Could not find {build_config_file}. Trying docker image name instead...",
color="green",
)
docker_image = args.config
builds_dir = BUILDS_BASE_DIR / ImageType.docker.value
if args.output_dir:
@ -105,15 +93,10 @@ class StackConfigure(Subcommand):
script_args = [script, docker_image, str(builds_dir)]
return_code = run_with_pty(script_args)
# we have regenerated the build config file with script, now check if it exists
if return_code != 0:
self.parser.error(
f"Failed to configure container {docker_image} with return code {return_code}. Please run `llama stack build` first. "
)
return
return
def _configure_llama_distribution(
self,