mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-04 12:07:34 +00:00
# What does this PR do? The openai package is already a dependency of the llama-stack project itself, so let's the project dictate which openai version we need and avoid potential breakage with unsatisfiable dependency resolution. Signed-off-by: Sébastien Han <seb@redhat.com>
26 lines
901 B
Python
26 lines
901 B
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
|
|
from llama_stack.providers.datatypes import Api, InlineProviderSpec, ProviderSpec
|
|
|
|
|
|
def available_providers() -> list[ProviderSpec]:
|
|
return [
|
|
InlineProviderSpec(
|
|
api=Api.batches,
|
|
provider_type="inline::reference",
|
|
pip_packages=[],
|
|
module="llama_stack.providers.inline.batches.reference",
|
|
config_class="llama_stack.providers.inline.batches.reference.config.ReferenceBatchesImplConfig",
|
|
api_dependencies=[
|
|
Api.inference,
|
|
Api.files,
|
|
Api.models,
|
|
],
|
|
description="Reference implementation of batches API with KVStore persistence.",
|
|
),
|
|
]
|