From 3c99f08267bca8eabaaa0b8092ca82c92291cf3f Mon Sep 17 00:00:00 2001 From: Mark Sze <66362098+marklysze@users.noreply.github.com> Date: Fri, 27 Sep 2024 02:48:23 +1000 Subject: [PATCH] minor typo and HuggingFace -> Hugging Face (#113) --- docs/cli_reference.md | 8 ++++---- llama_stack/cli/model/describe.py | 2 +- llama_stack/cli/model/list.py | 2 +- tests/test_inference.py | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/cli_reference.md b/docs/cli_reference.md index 1c62188ef..feded6bac 100644 --- a/docs/cli_reference.md +++ b/docs/cli_reference.md @@ -3,7 +3,7 @@ The `llama` CLI tool helps you setup and use the Llama toolchain & agentic systems. It should be available on your path after installing the `llama-stack` package. ### Subcommands -1. `download`: `llama` cli tools supports downloading the model from Meta or HuggingFace. +1. `download`: `llama` cli tools supports downloading the model from Meta or Hugging Face. 2. `model`: Lists available models and their properties. 3. `stack`: Allows you to build and run a Llama Stack server. You can read more about this [here](/docs/cli_reference.md#step-3-building-configuring-and-running-llama-stack-servers). @@ -38,7 +38,7 @@ You should see a table like this:
+----------------------------------+------------------------------------------+----------------+ -| Model Descriptor | HuggingFace Repo | Context Length | +| Model Descriptor | Hugging Face Repo | Context Length | +----------------------------------+------------------------------------------+----------------+ | Llama3.1-8B | meta-llama/Llama-3.1-8B | 128K | +----------------------------------+------------------------------------------+----------------+ @@ -112,7 +112,7 @@ llama download --source meta --model-id Prompt-Guard-86M --meta-url META_URL llama download --source meta --model-id Llama-Guard-3-8B --meta-url META_URL ``` -#### Downloading from [Huggingface](https://huggingface.co/meta-llama) +#### Downloading from [Hugging Face](https://huggingface.co/meta-llama) Essentially, the same commands above work, just replace `--source meta` with `--source huggingface`. @@ -180,7 +180,7 @@ llama model describe -m Llama3.2-3B-Instruct +-----------------------------+----------------------------------+ | Model | Llama3.2-3B-Instruct | +-----------------------------+----------------------------------+ -| HuggingFace ID | meta-llama/Llama-3.2-3B-Instruct | +| Hugging Face ID | meta-llama/Llama-3.2-3B-Instruct | +-----------------------------+----------------------------------+ | Description | Llama 3.2 3b instruct model | +-----------------------------+----------------------------------+ diff --git a/llama_stack/cli/model/describe.py b/llama_stack/cli/model/describe.py index 70bd28a83..6b5325a03 100644 --- a/llama_stack/cli/model/describe.py +++ b/llama_stack/cli/model/describe.py @@ -51,7 +51,7 @@ class ModelDescribe(Subcommand): colored("Model", "white", attrs=["bold"]), colored(model.descriptor(), "white", attrs=["bold"]), ), - ("HuggingFace ID", model.huggingface_repo or ""), + ("Hugging Face ID", model.huggingface_repo or " "), ("Description", model.description), ("Context Length", f"{model.max_seq_length // 1024}K tokens"), ("Weights format", model.quantization_format.value), diff --git a/llama_stack/cli/model/list.py b/llama_stack/cli/model/list.py index 977590d7a..dbb00d589 100644 --- a/llama_stack/cli/model/list.py +++ b/llama_stack/cli/model/list.py @@ -36,7 +36,7 @@ class ModelList(Subcommand): def _run_model_list_cmd(self, args: argparse.Namespace) -> None: headers = [ "Model Descriptor", - "HuggingFace Repo", + "Hugging Face Repo", "Context Length", ] diff --git a/tests/test_inference.py b/tests/test_inference.py index 1bb3200a3..44a171750 100644 --- a/tests/test_inference.py +++ b/tests/test_inference.py @@ -20,7 +20,7 @@ from llama_stack.inference.meta_reference.inference import get_provider_impl MODEL = "Llama3.1-8B-Instruct" HELPER_MSG = """ This test needs llama-3.1-8b-instruct models. -Please donwload using the llama cli +Please download using the llama cli llama download --source huggingface --model-id llama3_1_8b_instruct --hf-token """