llama-stack-mirror/llama_toolchain/core/distribution_registry.py
2024-09-14 14:22:30 -07:00

59 lines
2.1 KiB
Python

# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from functools import lru_cache
from typing import List, Optional
from .datatypes import * # noqa: F403
@lru_cache()
def available_distribution_specs() -> List[DistributionSpec]:
return [
DistributionSpec(
distribution_type="local",
description="Use code from `llama_toolchain` itself to serve all llama stack APIs",
providers={
"inference": "meta-reference",
"memory": "meta-reference-faiss",
"safety": "meta-reference",
"agentic_system": "meta-reference",
"telemetry": "console",
},
),
DistributionSpec(
distribution_type="local-ollama",
description="Like local, but use ollama for running LLM inference",
providers={
"inference": remote_provider_type("ollama"),
"safety": "meta-reference",
"agentic_system": "meta-reference",
"memory": "meta-reference-faiss",
"telemetry": "console",
},
),
DistributionSpec(
distribution_type="local-plus-fireworks-inference",
description="Use Fireworks.ai for running LLM inference",
providers={
"inference": remote_provider_type("fireworks"),
"safety": "meta-reference",
"agentic_system": "meta-reference",
"memory": "meta-reference-faiss",
"telemetry": "console",
},
),
DistributionSpec(
distribution_type="local-plus-tgi-inference",
description="Use TGI for running LLM inference",
providers={
"inference": remote_provider_type("tgi"),
"safety": "meta-reference",
"agentic_system": "meta-reference",
"memory": "meta-reference-faiss",
},
),
]