diff --git a/llama_stack/distribution/routers/routing_tables.py b/llama_stack/distribution/routers/routing_tables.py index 69834868e..55c4ed85f 100644 --- a/llama_stack/distribution/routers/routing_tables.py +++ b/llama_stack/distribution/routers/routing_tables.py @@ -106,7 +106,7 @@ class CommonRoutingTableImpl(RoutingTable): await self.dist_registry.register(obj) # Register all objects from providers - for pid, p in self.impls_by_provider_id.items(): + for _pid, p in self.impls_by_provider_id.items(): api = get_impl_api(p) if api == Api.inference: p.model_store = self diff --git a/llama_stack/providers/inline/eval/meta_reference/eval.py b/llama_stack/providers/inline/eval/meta_reference/eval.py index 3630d4c03..64a4c0946 100644 --- a/llama_stack/providers/inline/eval/meta_reference/eval.py +++ b/llama_stack/providers/inline/eval/meta_reference/eval.py @@ -13,7 +13,6 @@ from llama_stack.apis.benchmarks import Benchmark from llama_stack.apis.datasetio import DatasetIO from llama_stack.apis.datasets import Datasets from llama_stack.apis.inference import Inference, SystemMessage, UserMessage -from llama_stack.apis.scoring import Scoring from llama_stack.providers.datatypes import BenchmarksProtocolPrivate from llama_stack.providers.inline.agents.meta_reference.agent_instance import ( MEMORY_QUERY_TOOL, @@ -37,14 +36,14 @@ class MetaReferenceEvalImpl( config: MetaReferenceEvalConfig, datasetio_api: DatasetIO, datasets_api: Datasets, - scoring_api: Scoring, inference_api: Inference, agents_api: Agents, ) -> None: self.config = config self.datasetio_api = datasetio_api self.datasets_api = datasets_api - self.scoring_api = scoring_api + # TODO(xiyan): this implementation will be refactored + self.scoring_api = None self.inference_api = inference_api self.agents_api = agents_api diff --git a/llama_stack/templates/open-benchmark/open_benchmark.py b/llama_stack/templates/open-benchmark/open_benchmark.py index aa53f85a1..ff5601467 100644 --- a/llama_stack/templates/open-benchmark/open_benchmark.py +++ b/llama_stack/templates/open-benchmark/open_benchmark.py @@ -30,14 +30,12 @@ from llama_stack.providers.remote.vector_io.pgvector.config import ( from llama_stack.providers.utils.inference.model_registry import ProviderModelEntry from llama_stack.templates.template import ( DistributionTemplate, - get_model_registry, RunConfigSettings, + get_model_registry, ) -def get_inference_providers() -> ( - Tuple[List[Provider], Dict[str, List[ProviderModelEntry]]] -): +def get_inference_providers() -> Tuple[List[Provider], Dict[str, List[ProviderModelEntry]]]: # in this template, we allow each API key to be optional providers = [ ( @@ -118,9 +116,7 @@ def get_distribution_template() -> DistributionTemplate: Provider( provider_id="sqlite-vec", provider_type="inline::sqlite-vec", - config=SQLiteVectorIOConfig.sample_run_config( - f"~/.llama/distributions/{name}" - ), + config=SQLiteVectorIOConfig.sample_run_config(f"~/.llama/distributions/{name}"), ), Provider( provider_id="${env.ENABLE_CHROMADB+chromadb}",