llama-stack/llama_toolchain/inference/providers.py
Ashwin Bharambe 191cd28831
Simplified Telemetry API and tying it to logger (#57)
* Simplified Telemetry API and tying it to logger

* small update which adds a METRIC type

* move span events one level down into structured log events

---------

Co-authored-by: Ashwin Bharambe <ashwin@meta.com>
2024-09-11 14:25:37 -07:00

68 lines
2.2 KiB
Python

# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from typing import List
from llama_toolchain.core.datatypes import * # noqa: F403
def available_providers() -> List[ProviderSpec]:
return [
InlineProviderSpec(
api=Api.inference,
provider_type="meta-reference",
pip_packages=[
"accelerate",
"blobfile",
"codeshield",
"fairscale",
"fbgemm-gpu==0.8.0",
"torch",
"transformers",
"zmq",
],
module="llama_toolchain.inference.meta_reference",
config_class="llama_toolchain.inference.meta_reference.MetaReferenceImplConfig",
),
remote_provider_spec(
api=Api.inference,
adapter=AdapterSpec(
adapter_id="ollama",
pip_packages=["ollama"],
module="llama_toolchain.inference.adapters.ollama",
),
),
remote_provider_spec(
api=Api.inference,
adapter=AdapterSpec(
adapter_id="tgi",
pip_packages=["text-generation"],
module="llama_toolchain.inference.adapters.tgi",
),
),
remote_provider_spec(
api=Api.inference,
adapter=AdapterSpec(
adapter_id="fireworks",
pip_packages=[
"fireworks-ai",
],
module="llama_toolchain.inference.adapters.fireworks",
config_class="llama_toolchain.inference.adapters.fireworks.FireworksImplConfig",
),
),
remote_provider_spec(
api=Api.inference,
adapter=AdapterSpec(
adapter_id="together",
pip_packages=[
"together",
],
module="llama_toolchain.inference.adapters.together",
config_class="llama_toolchain.inference.adapters.together.TogetherImplConfig",
),
),
]