forked from phoenix-oss/llama-stack-mirror
# What does this PR do? - To make it easier, delete existing `eval/scoring/scoring_function` apis. There will be a bunch of broken impls here. The sequence is: 1. migrate benchmark graders 2. clean up existing scoring functions - Add a skeleton evaluation impl to make tests pass. ## Test Plan tested in following PRs [//]: # (## Documentation)
26 lines
752 B
Python
26 lines
752 B
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
from typing import Any, Dict
|
|
|
|
from pydantic import BaseModel
|
|
|
|
from llama_stack.providers.utils.kvstore.config import (
|
|
KVStoreConfig,
|
|
SqliteKVStoreConfig,
|
|
)
|
|
|
|
|
|
class MetaReferenceEvaluationConfig(BaseModel):
|
|
kvstore: KVStoreConfig
|
|
|
|
@classmethod
|
|
def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> Dict[str, Any]:
|
|
return {
|
|
"kvstore": SqliteKVStoreConfig.sample_run_config(
|
|
__distro_dir__=__distro_dir__,
|
|
db_name="meta_reference_evaluation.db",
|
|
)
|
|
}
|