From 01d93be948cfa613ba06431d1fadc0856a6ec672 Mon Sep 17 00:00:00 2001 From: Adrian Cole <64215+codefromthecrypt@users.noreply.github.com> Date: Thu, 3 Oct 2024 05:26:20 +0800 Subject: [PATCH] Adds markdown-link-check and fixes a broken link (#165) Signed-off-by: Adrian Cole Co-authored-by: Ashwin Bharambe --- .pre-commit-config.yaml | 6 ++++++ docs/cli_reference.md | 2 +- llama_stack/providers/utils/inference/augment_messages.py | 3 ++- 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c00ea3040..555a475b2 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -51,3 +51,9 @@ repos: # hooks: # - id: pydoclint # args: [--config=pyproject.toml] + +- repo: https://github.com/tcort/markdown-link-check + rev: v3.11.2 + hooks: + - id: markdown-link-check + args: ['--quiet'] diff --git a/docs/cli_reference.md b/docs/cli_reference.md index 28874641f..3541d0b4e 100644 --- a/docs/cli_reference.md +++ b/docs/cli_reference.md @@ -5,7 +5,7 @@ The `llama` CLI tool helps you setup and use the Llama toolchain & agentic syste ### Subcommands 1. `download`: `llama` cli tools supports downloading the model from Meta or Hugging Face. 2. `model`: Lists available models and their properties. -3. `stack`: Allows you to build and run a Llama Stack server. You can read more about this [here](/docs/cli_reference.md#step-3-building-configuring-and-running-llama-stack-servers). +3. `stack`: Allows you to build and run a Llama Stack server. You can read more about this [here](cli_reference.md#step-3-building-and-configuring-llama-stack-distributions). ### Sample Usage diff --git a/llama_stack/providers/utils/inference/augment_messages.py b/llama_stack/providers/utils/inference/augment_messages.py index 10375cf0e..613a39525 100644 --- a/llama_stack/providers/utils/inference/augment_messages.py +++ b/llama_stack/providers/utils/inference/augment_messages.py @@ -34,7 +34,8 @@ def augment_messages_for_tools(request: ChatCompletionRequest) -> List[Message]: return request.messages if model.model_family == ModelFamily.llama3_1 or ( - model.model_family == ModelFamily.llama3_2 and is_multimodal(model.core_model_id) + model.model_family == ModelFamily.llama3_2 + and is_multimodal(model.core_model_id) ): # llama3.1 and llama3.2 multimodal models follow the same tool prompt format return augment_messages_for_tools_llama_3_1(request)