forked from phoenix-oss/llama-stack-mirror
# What does this PR do? - To make it easier, delete existing `eval/scoring/scoring_function` apis. There will be a bunch of broken impls here. The sequence is: 1. migrate benchmark graders 2. clean up existing scoring functions - Add a skeleton evaluation impl to make tests pass. ## Test Plan tested in following PRs [//]: # (## Documentation)
36 lines
1.2 KiB
Python
36 lines
1.2 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
from typing import List
|
|
|
|
from llama_stack.providers.datatypes import Api, InlineProviderSpec, ProviderSpec
|
|
from llama_stack.providers.utils.kvstore import kvstore_dependencies
|
|
|
|
|
|
def available_providers() -> List[ProviderSpec]:
|
|
return [
|
|
InlineProviderSpec(
|
|
api=Api.evaluation,
|
|
provider_type="inline::meta-reference",
|
|
pip_packages=[
|
|
"matplotlib",
|
|
"pillow",
|
|
"pandas",
|
|
"scikit-learn",
|
|
]
|
|
+ kvstore_dependencies(),
|
|
module="llama_stack.providers.inline.evaluation.meta_reference",
|
|
config_class="llama_stack.providers.inline.evaluation.meta_reference.MetaReferenceEvaluationImplConfig",
|
|
api_dependencies=[
|
|
Api.inference,
|
|
Api.safety,
|
|
Api.vector_io,
|
|
Api.vector_dbs,
|
|
Api.tool_runtime,
|
|
Api.tool_groups,
|
|
],
|
|
),
|
|
]
|