mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-27 18:50:41 +00:00
# What does this PR do? * Given that our API packages use "import *" in `__init.py__` we don't need to do `from llama_stack.apis.models.models` but simply from llama_stack.apis.models. The decision to use `import *` is debatable and should probably be revisited at one point. * Remove unneeded Ruff F401 rule * Consolidate Ruff F403 rule in the pyprojectfrom llama_stack.apis.models.models Signed-off-by: Sébastien Han <seb@redhat.com>
177 lines
6.2 KiB
Python
177 lines
6.2 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
from pathlib import Path
|
|
|
|
from llama_stack.apis.models import ModelType
|
|
from llama_stack.distribution.datatypes import (
|
|
ModelInput,
|
|
Provider,
|
|
ShieldInput,
|
|
ToolGroupInput,
|
|
)
|
|
from llama_stack.providers.inline.files.localfs.config import LocalfsFilesImplConfig
|
|
from llama_stack.providers.inline.inference.sentence_transformers import (
|
|
SentenceTransformersInferenceConfig,
|
|
)
|
|
from llama_stack.providers.inline.vector_io.faiss.config import FaissVectorIOConfig
|
|
from llama_stack.providers.remote.inference.fireworks.config import FireworksImplConfig
|
|
from llama_stack.providers.remote.inference.fireworks.models import MODEL_ENTRIES
|
|
from llama_stack.templates.template import (
|
|
DistributionTemplate,
|
|
RunConfigSettings,
|
|
get_model_registry,
|
|
)
|
|
|
|
|
|
def get_distribution_template() -> DistributionTemplate:
|
|
providers = {
|
|
"inference": ["remote::fireworks", "inline::sentence-transformers"],
|
|
"vector_io": ["inline::faiss", "remote::chromadb", "remote::pgvector"],
|
|
"safety": ["inline::llama-guard"],
|
|
"agents": ["inline::meta-reference"],
|
|
"telemetry": ["inline::meta-reference"],
|
|
"eval": ["inline::meta-reference"],
|
|
"datasetio": ["remote::huggingface", "inline::localfs"],
|
|
"scoring": ["inline::basic", "inline::llm-as-judge", "inline::braintrust"],
|
|
"files": ["inline::localfs"],
|
|
"tool_runtime": [
|
|
"remote::brave-search",
|
|
"remote::tavily-search",
|
|
"remote::wolfram-alpha",
|
|
"inline::rag-runtime",
|
|
"remote::model-context-protocol",
|
|
],
|
|
}
|
|
|
|
name = "fireworks"
|
|
|
|
inference_provider = Provider(
|
|
provider_id="fireworks",
|
|
provider_type="remote::fireworks",
|
|
config=FireworksImplConfig.sample_run_config(),
|
|
)
|
|
embedding_provider = Provider(
|
|
provider_id="sentence-transformers",
|
|
provider_type="inline::sentence-transformers",
|
|
config=SentenceTransformersInferenceConfig.sample_run_config(),
|
|
)
|
|
vector_io_provider = Provider(
|
|
provider_id="faiss",
|
|
provider_type="inline::faiss",
|
|
config=FaissVectorIOConfig.sample_run_config(f"~/.llama/distributions/{name}"),
|
|
)
|
|
files_provider = Provider(
|
|
provider_id="meta-reference-files",
|
|
provider_type="inline::localfs",
|
|
config=LocalfsFilesImplConfig.sample_run_config(f"~/.llama/distributions/{name}"),
|
|
)
|
|
|
|
available_models = {
|
|
"fireworks": MODEL_ENTRIES,
|
|
}
|
|
default_models = get_model_registry(available_models)
|
|
|
|
embedding_model = ModelInput(
|
|
model_id="all-MiniLM-L6-v2",
|
|
provider_id="sentence-transformers",
|
|
model_type=ModelType.embedding,
|
|
metadata={
|
|
"embedding_dimension": 384,
|
|
},
|
|
)
|
|
default_tool_groups = [
|
|
ToolGroupInput(
|
|
toolgroup_id="builtin::websearch",
|
|
provider_id="tavily-search",
|
|
),
|
|
ToolGroupInput(
|
|
toolgroup_id="builtin::wolfram_alpha",
|
|
provider_id="wolfram-alpha",
|
|
),
|
|
ToolGroupInput(
|
|
toolgroup_id="builtin::rag",
|
|
provider_id="rag-runtime",
|
|
),
|
|
]
|
|
|
|
return DistributionTemplate(
|
|
name=name,
|
|
distro_type="self_hosted",
|
|
description="Use Fireworks.AI for running LLM inference",
|
|
container_image=None,
|
|
template_path=Path(__file__).parent / "doc_template.md",
|
|
providers=providers,
|
|
available_models_by_provider=available_models,
|
|
run_configs={
|
|
"run.yaml": RunConfigSettings(
|
|
provider_overrides={
|
|
"inference": [inference_provider, embedding_provider],
|
|
"vector_io": [vector_io_provider],
|
|
"files": [files_provider],
|
|
},
|
|
default_models=default_models + [embedding_model],
|
|
default_shields=[ShieldInput(shield_id="meta-llama/Llama-Guard-3-8B")],
|
|
default_tool_groups=default_tool_groups,
|
|
),
|
|
"run-with-safety.yaml": RunConfigSettings(
|
|
provider_overrides={
|
|
"inference": [
|
|
inference_provider,
|
|
embedding_provider,
|
|
],
|
|
"vector_io": [vector_io_provider],
|
|
"files": [files_provider],
|
|
"safety": [
|
|
Provider(
|
|
provider_id="llama-guard",
|
|
provider_type="inline::llama-guard",
|
|
config={},
|
|
),
|
|
Provider(
|
|
provider_id="llama-guard-vision",
|
|
provider_type="inline::llama-guard",
|
|
config={},
|
|
),
|
|
Provider(
|
|
provider_id="code-scanner",
|
|
provider_type="inline::code-scanner",
|
|
config={},
|
|
),
|
|
],
|
|
},
|
|
default_models=[
|
|
*default_models,
|
|
embedding_model,
|
|
],
|
|
default_shields=[
|
|
ShieldInput(
|
|
shield_id="meta-llama/Llama-Guard-3-8B",
|
|
provider_id="llama-guard",
|
|
),
|
|
ShieldInput(
|
|
shield_id="meta-llama/Llama-Guard-3-11B-Vision",
|
|
provider_id="llama-guard-vision",
|
|
),
|
|
ShieldInput(
|
|
shield_id="CodeScanner",
|
|
provider_id="code-scanner",
|
|
),
|
|
],
|
|
default_tool_groups=default_tool_groups,
|
|
),
|
|
},
|
|
run_config_env_vars={
|
|
"LLAMA_STACK_PORT": (
|
|
"8321",
|
|
"Port for the Llama Stack distribution server",
|
|
),
|
|
"FIREWORKS_API_KEY": (
|
|
"",
|
|
"Fireworks.AI API Key",
|
|
),
|
|
},
|
|
)
|