mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-29 07:14:20 +00:00
Adds markdown-link-check and fixes a broken link
Signed-off-by: Adrian Cole <adrian.cole@elastic.co>
This commit is contained in:
parent
eb2d8a31a5
commit
de8fdd8db8
4 changed files with 10 additions and 5 deletions
|
@ -51,3 +51,9 @@ repos:
|
||||||
# hooks:
|
# hooks:
|
||||||
# - id: pydoclint
|
# - id: pydoclint
|
||||||
# args: [--config=pyproject.toml]
|
# args: [--config=pyproject.toml]
|
||||||
|
|
||||||
|
- repo: https://github.com/tcort/markdown-link-check
|
||||||
|
rev: v3.11.2
|
||||||
|
hooks:
|
||||||
|
- id: markdown-link-check
|
||||||
|
args: ['--quiet']
|
||||||
|
|
|
@ -5,7 +5,7 @@ The `llama` CLI tool helps you setup and use the Llama toolchain & agentic syste
|
||||||
### Subcommands
|
### Subcommands
|
||||||
1. `download`: `llama` cli tools supports downloading the model from Meta or Hugging Face.
|
1. `download`: `llama` cli tools supports downloading the model from Meta or Hugging Face.
|
||||||
2. `model`: Lists available models and their properties.
|
2. `model`: Lists available models and their properties.
|
||||||
3. `stack`: Allows you to build and run a Llama Stack server. You can read more about this [here](/docs/cli_reference.md#step-3-building-configuring-and-running-llama-stack-servers).
|
3. `stack`: Allows you to build and run a Llama Stack server. You can read more about this [here](cli_reference.md#step-3-building-and-configuring-llama-stack-distributions).
|
||||||
|
|
||||||
### Sample Usage
|
### Sample Usage
|
||||||
|
|
||||||
|
|
|
@ -98,9 +98,7 @@ def available_providers() -> List[ProviderSpec]:
|
||||||
api=Api.inference,
|
api=Api.inference,
|
||||||
adapter=AdapterSpec(
|
adapter=AdapterSpec(
|
||||||
adapter_id="bedrock",
|
adapter_id="bedrock",
|
||||||
pip_packages=[
|
pip_packages=["boto3"],
|
||||||
"boto3"
|
|
||||||
],
|
|
||||||
module="llama_stack.providers.adapters.inference.bedrock",
|
module="llama_stack.providers.adapters.inference.bedrock",
|
||||||
config_class="llama_stack.providers.adapters.inference.bedrock.BedrockConfig",
|
config_class="llama_stack.providers.adapters.inference.bedrock.BedrockConfig",
|
||||||
),
|
),
|
||||||
|
|
|
@ -34,7 +34,8 @@ def augment_messages_for_tools(request: ChatCompletionRequest) -> List[Message]:
|
||||||
return request.messages
|
return request.messages
|
||||||
|
|
||||||
if model.model_family == ModelFamily.llama3_1 or (
|
if model.model_family == ModelFamily.llama3_1 or (
|
||||||
model.model_family == ModelFamily.llama3_2 and is_multimodal(model.core_model_id)
|
model.model_family == ModelFamily.llama3_2
|
||||||
|
and is_multimodal(model.core_model_id)
|
||||||
):
|
):
|
||||||
# llama3.1 and llama3.2 multimodal models follow the same tool prompt format
|
# llama3.1 and llama3.2 multimodal models follow the same tool prompt format
|
||||||
return augment_messages_for_tools_llama_3_1(request)
|
return augment_messages_for_tools_llama_3_1(request)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue