mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-03 19:57:35 +00:00
Update docs
This commit is contained in:
parent
a0e6e82c1e
commit
3538477070
7 changed files with 6 additions and 137 deletions
|
@ -18,6 +18,6 @@ Llama Stack Inference API for generating completions, chat completions, and embe
|
||||||
This API provides the raw interface to the underlying models. Three kinds of models are supported:
|
This API provides the raw interface to the underlying models. Three kinds of models are supported:
|
||||||
- LLM models: these models generate "raw" and "chat" (conversational) completions.
|
- LLM models: these models generate "raw" and "chat" (conversational) completions.
|
||||||
- Embedding models: these models generate embeddings to be used for semantic search.
|
- Embedding models: these models generate embeddings to be used for semantic search.
|
||||||
- Rerank models: these models reorder the documents by relevance.
|
- Rerank models: these models reorder the documents based on their relevance to a query.
|
||||||
|
|
||||||
This section contains documentation for all available providers for the **inference** API.
|
This section contains documentation for all available providers for the **inference** API.
|
||||||
|
|
2
docs/static/llama-stack-spec.html
vendored
2
docs/static/llama-stack-spec.html
vendored
|
@ -17875,7 +17875,7 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "Inference",
|
"name": "Inference",
|
||||||
"description": "This API provides the raw interface to the underlying models. Three kinds of models are supported:\n- LLM models: these models generate \"raw\" and \"chat\" (conversational) completions.\n- Embedding models: these models generate embeddings to be used for semantic search.\n- Rerank models: these models reorder the documents by relevance.",
|
"description": "This API provides the raw interface to the underlying models. Three kinds of models are supported:\n- LLM models: these models generate \"raw\" and \"chat\" (conversational) completions.\n- Embedding models: these models generate embeddings to be used for semantic search.\n- Rerank models: these models reorder the documents based on their relevance to a query.",
|
||||||
"x-displayName": "Llama Stack Inference API for generating completions, chat completions, and embeddings."
|
"x-displayName": "Llama Stack Inference API for generating completions, chat completions, and embeddings."
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
3
docs/static/llama-stack-spec.yaml
vendored
3
docs/static/llama-stack-spec.yaml
vendored
|
@ -13460,7 +13460,8 @@ tags:
|
||||||
- Embedding models: these models generate embeddings to be used for semantic
|
- Embedding models: these models generate embeddings to be used for semantic
|
||||||
search.
|
search.
|
||||||
|
|
||||||
- Rerank models: these models reorder the documents by relevance.
|
- Rerank models: these models reorder the documents based on their relevance
|
||||||
|
to a query.
|
||||||
x-displayName: >-
|
x-displayName: >-
|
||||||
Llama Stack Inference API for generating completions, chat completions, and
|
Llama Stack Inference API for generating completions, chat completions, and
|
||||||
embeddings.
|
embeddings.
|
||||||
|
|
|
@ -1162,7 +1162,7 @@ class Inference(InferenceProvider):
|
||||||
This API provides the raw interface to the underlying models. Three kinds of models are supported:
|
This API provides the raw interface to the underlying models. Three kinds of models are supported:
|
||||||
- LLM models: these models generate "raw" and "chat" (conversational) completions.
|
- LLM models: these models generate "raw" and "chat" (conversational) completions.
|
||||||
- Embedding models: these models generate embeddings to be used for semantic search.
|
- Embedding models: these models generate embeddings to be used for semantic search.
|
||||||
- Rerank models: these models reorder the documents by relevance.
|
- Rerank models: these models reorder the documents based on their relevance to a query.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@webmethod(route="/openai/v1/chat/completions", method="GET", level=LLAMA_STACK_API_V1, deprecated=True)
|
@webmethod(route="/openai/v1/chat/completions", method="GET", level=LLAMA_STACK_API_V1, deprecated=True)
|
||||||
|
|
|
@ -27,7 +27,7 @@ class ModelType(StrEnum):
|
||||||
"""Enumeration of supported model types in Llama Stack.
|
"""Enumeration of supported model types in Llama Stack.
|
||||||
:cvar llm: Large language model for text generation and completion
|
:cvar llm: Large language model for text generation and completion
|
||||||
:cvar embedding: Embedding model for converting text to vector representations
|
:cvar embedding: Embedding model for converting text to vector representations
|
||||||
:cvar rerank: Reranking model for reordering documents by relevance
|
:cvar rerank: Reranking model for reordering documents based on their relevance to a query
|
||||||
"""
|
"""
|
||||||
|
|
||||||
llm = "llm"
|
llm = "llm"
|
||||||
|
|
|
@ -191,7 +191,6 @@ class InferenceRouter(Inference):
|
||||||
items: list[str | OpenAIChatCompletionContentPartTextParam | OpenAIChatCompletionContentPartImageParam],
|
items: list[str | OpenAIChatCompletionContentPartTextParam | OpenAIChatCompletionContentPartImageParam],
|
||||||
max_num_results: int | None = None,
|
max_num_results: int | None = None,
|
||||||
) -> RerankResponse:
|
) -> RerankResponse:
|
||||||
"""Route rerank requests to the appropriate provider based on the model."""
|
|
||||||
logger.debug(f"InferenceRouter.rerank: {model}")
|
logger.debug(f"InferenceRouter.rerank: {model}")
|
||||||
model_obj = await self._get_model(model, ModelType.rerank)
|
model_obj = await self._get_model(model, ModelType.rerank)
|
||||||
provider = await self.routing_table.get_provider_impl(model_obj.identifier)
|
provider = await self.routing_table.get_provider_impl(model_obj.identifier)
|
||||||
|
|
|
@ -1,131 +0,0 @@
|
||||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
||||||
# All rights reserved.
|
|
||||||
#
|
|
||||||
# This source code is licensed under the terms described in the LICENSE file in
|
|
||||||
# the root directory of this source tree.
|
|
||||||
|
|
||||||
from llama_stack.apis.models import ModelType
|
|
||||||
from llama_stack.models.llama.sku_types import CoreModelId
|
|
||||||
from llama_stack.providers.utils.inference.model_registry import (
|
|
||||||
ProviderModelEntry,
|
|
||||||
build_hf_repo_model_entry,
|
|
||||||
)
|
|
||||||
|
|
||||||
SAFETY_MODELS_ENTRIES = []
|
|
||||||
|
|
||||||
# https://docs.nvidia.com/nim/large-language-models/latest/supported-llm-agnostic-architectures.html
|
|
||||||
MODEL_ENTRIES = [
|
|
||||||
build_hf_repo_model_entry(
|
|
||||||
"meta/llama3-8b-instruct",
|
|
||||||
CoreModelId.llama3_8b_instruct.value,
|
|
||||||
),
|
|
||||||
build_hf_repo_model_entry(
|
|
||||||
"meta/llama3-70b-instruct",
|
|
||||||
CoreModelId.llama3_70b_instruct.value,
|
|
||||||
),
|
|
||||||
build_hf_repo_model_entry(
|
|
||||||
"meta/llama-3.1-8b-instruct",
|
|
||||||
CoreModelId.llama3_1_8b_instruct.value,
|
|
||||||
),
|
|
||||||
build_hf_repo_model_entry(
|
|
||||||
"meta/llama-3.1-70b-instruct",
|
|
||||||
CoreModelId.llama3_1_70b_instruct.value,
|
|
||||||
),
|
|
||||||
build_hf_repo_model_entry(
|
|
||||||
"meta/llama-3.1-405b-instruct",
|
|
||||||
CoreModelId.llama3_1_405b_instruct.value,
|
|
||||||
),
|
|
||||||
build_hf_repo_model_entry(
|
|
||||||
"meta/llama-3.2-1b-instruct",
|
|
||||||
CoreModelId.llama3_2_1b_instruct.value,
|
|
||||||
),
|
|
||||||
build_hf_repo_model_entry(
|
|
||||||
"meta/llama-3.2-3b-instruct",
|
|
||||||
CoreModelId.llama3_2_3b_instruct.value,
|
|
||||||
),
|
|
||||||
build_hf_repo_model_entry(
|
|
||||||
"meta/llama-3.2-11b-vision-instruct",
|
|
||||||
CoreModelId.llama3_2_11b_vision_instruct.value,
|
|
||||||
),
|
|
||||||
build_hf_repo_model_entry(
|
|
||||||
"meta/llama-3.2-90b-vision-instruct",
|
|
||||||
CoreModelId.llama3_2_90b_vision_instruct.value,
|
|
||||||
),
|
|
||||||
build_hf_repo_model_entry(
|
|
||||||
"meta/llama-3.3-70b-instruct",
|
|
||||||
CoreModelId.llama3_3_70b_instruct.value,
|
|
||||||
),
|
|
||||||
ProviderModelEntry(
|
|
||||||
provider_model_id="nvidia/vila",
|
|
||||||
model_type=ModelType.llm,
|
|
||||||
),
|
|
||||||
# NeMo Retriever Text Embedding models -
|
|
||||||
#
|
|
||||||
# https://docs.nvidia.com/nim/nemo-retriever/text-embedding/latest/support-matrix.html
|
|
||||||
#
|
|
||||||
# +-----------------------------------+--------+-----------+-----------+------------+
|
|
||||||
# | Model ID | Max | Publisher | Embedding | Dynamic |
|
|
||||||
# | | Tokens | | Dimension | Embeddings |
|
|
||||||
# +-----------------------------------+--------+-----------+-----------+------------+
|
|
||||||
# | nvidia/llama-3.2-nv-embedqa-1b-v2 | 8192 | NVIDIA | 2048 | Yes |
|
|
||||||
# | nvidia/nv-embedqa-e5-v5 | 512 | NVIDIA | 1024 | No |
|
|
||||||
# | nvidia/nv-embedqa-mistral-7b-v2 | 512 | NVIDIA | 4096 | No |
|
|
||||||
# | snowflake/arctic-embed-l | 512 | Snowflake | 1024 | No |
|
|
||||||
# +-----------------------------------+--------+-----------+-----------+------------+
|
|
||||||
ProviderModelEntry(
|
|
||||||
provider_model_id="nvidia/llama-3.2-nv-embedqa-1b-v2",
|
|
||||||
model_type=ModelType.embedding,
|
|
||||||
metadata={
|
|
||||||
"embedding_dimension": 2048,
|
|
||||||
"context_length": 8192,
|
|
||||||
},
|
|
||||||
),
|
|
||||||
ProviderModelEntry(
|
|
||||||
provider_model_id="nvidia/nv-embedqa-e5-v5",
|
|
||||||
model_type=ModelType.embedding,
|
|
||||||
metadata={
|
|
||||||
"embedding_dimension": 1024,
|
|
||||||
"context_length": 512,
|
|
||||||
},
|
|
||||||
),
|
|
||||||
ProviderModelEntry(
|
|
||||||
provider_model_id="nvidia/nv-embedqa-mistral-7b-v2",
|
|
||||||
model_type=ModelType.embedding,
|
|
||||||
metadata={
|
|
||||||
"embedding_dimension": 4096,
|
|
||||||
"context_length": 512,
|
|
||||||
},
|
|
||||||
),
|
|
||||||
ProviderModelEntry(
|
|
||||||
provider_model_id="snowflake/arctic-embed-l",
|
|
||||||
model_type=ModelType.embedding,
|
|
||||||
metadata={
|
|
||||||
"embedding_dimension": 1024,
|
|
||||||
"context_length": 512,
|
|
||||||
},
|
|
||||||
),
|
|
||||||
# NVIDIA Reranking models
|
|
||||||
ProviderModelEntry(
|
|
||||||
provider_model_id="nv-rerank-qa-mistral-4b:1",
|
|
||||||
model_type=ModelType.rerank,
|
|
||||||
metadata={
|
|
||||||
"endpoint": "https://ai.api.nvidia.com/v1/retrieval/nvidia/reranking",
|
|
||||||
},
|
|
||||||
),
|
|
||||||
ProviderModelEntry(
|
|
||||||
provider_model_id="nvidia/nv-rerankqa-mistral-4b-v3",
|
|
||||||
model_type=ModelType.rerank,
|
|
||||||
metadata={
|
|
||||||
"endpoint": "https://ai.api.nvidia.com/v1/retrieval/nvidia/nv-rerankqa-mistral-4b-v3/reranking",
|
|
||||||
},
|
|
||||||
),
|
|
||||||
ProviderModelEntry(
|
|
||||||
provider_model_id="nvidia/llama-3.2-nv-rerankqa-1b-v2",
|
|
||||||
model_type=ModelType.rerank,
|
|
||||||
metadata={
|
|
||||||
"endpoint": "https://ai.api.nvidia.com/v1/retrieval/nvidia/llama-3_2-nv-rerankqa-1b-v2/reranking",
|
|
||||||
},
|
|
||||||
),
|
|
||||||
# TODO(mf): how do we handle Nemotron models?
|
|
||||||
# "Llama3.1-Nemotron-51B-Instruct" -> "meta/llama-3.1-nemotron-51b-instruct",
|
|
||||||
] + SAFETY_MODELS_ENTRIES
|
|
Loading…
Add table
Add a link
Reference in a new issue