mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-28 02:53:30 +00:00
* Since our API packages use import * in __init__.py, we can import directly from llama_stack.apis.models instead of llama_stack.apis.models.models. However, the choice to use import * is debatable and may need to be reconsidered in the future. * Remove the unnecessary Ruff F401 suppression. * Consolidate the Ruff F403 rule configuration in pyproject.toml. Signed-off-by: Sébastien Han <seb@redhat.com>
27 lines
798 B
Python
27 lines
798 B
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
from llama_stack.apis.models import ModelType
|
|
from llama_stack.providers.utils.inference.model_registry import (
|
|
ProviderModelEntry,
|
|
)
|
|
|
|
LLM_MODEL_IDS = [
|
|
"gemini/gemini-1.5-flash",
|
|
"gemini/gemini-1.5-pro",
|
|
"gemini/gemini-2.0-flash",
|
|
"gemini/gemini-2.5-flash",
|
|
"gemini/gemini-2.5-pro",
|
|
]
|
|
|
|
|
|
MODEL_ENTRIES = [ProviderModelEntry(provider_model_id=m) for m in LLM_MODEL_IDS] + [
|
|
ProviderModelEntry(
|
|
provider_model_id="gemini/text-embedding-004",
|
|
model_type=ModelType.embedding,
|
|
metadata={"embedding_dimension": 768, "context_length": 2048},
|
|
),
|
|
]
|