mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-04 04:04:14 +00:00
currently providers have a `pip_package` list. Rather than make our own form of python dependency management, we should use `pyproject.toml` files in each provider declaring the dependencies in a more trackable manner. Each provider can then be installed using the already in place `module` field in the ProviderSpec, pointing to the directory the provider lives in we can then simply `uv pip install` this directory as opposed to installing the dependencies one by one Signed-off-by: Charlie Doern <cdoern@redhat.com>
42 lines
1.6 KiB
Python
42 lines
1.6 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
|
|
from llama_stack.providers.datatypes import Api, InlineProviderSpec, ProviderSpec, RemoteProviderSpec
|
|
|
|
|
|
def available_providers() -> list[ProviderSpec]:
|
|
return [
|
|
InlineProviderSpec(
|
|
api=Api.eval,
|
|
provider_type="inline::meta-reference",
|
|
module="llama_stack.providers.inline.eval.meta_reference",
|
|
config_class="llama_stack.providers.inline.eval.meta_reference.MetaReferenceEvalConfig",
|
|
api_dependencies=[
|
|
Api.datasetio,
|
|
Api.datasets,
|
|
Api.scoring,
|
|
Api.inference,
|
|
Api.agents,
|
|
],
|
|
description="Meta's reference implementation of evaluation tasks with support for multiple languages and evaluation metrics.",
|
|
),
|
|
RemoteProviderSpec(
|
|
api=Api.eval,
|
|
adapter_type="nvidia",
|
|
provider_type="remote::nvidia",
|
|
module="llama_stack.providers.remote.eval.nvidia",
|
|
config_class="llama_stack.providers.remote.eval.nvidia.NVIDIAEvalConfig",
|
|
description="NVIDIA's evaluation provider for running evaluation tasks on NVIDIA's platform.",
|
|
api_dependencies=[
|
|
Api.datasetio,
|
|
Api.datasets,
|
|
Api.scoring,
|
|
Api.inference,
|
|
Api.agents,
|
|
],
|
|
),
|
|
]
|