diff --git a/llama_stack/providers/inline/eval/meta_reference/eval.py b/llama_stack/providers/inline/eval/meta_reference/eval.py index 20c7cca16..e58975ff8 100644 --- a/llama_stack/providers/inline/eval/meta_reference/eval.py +++ b/llama_stack/providers/inline/eval/meta_reference/eval.py @@ -14,7 +14,6 @@ from llama_stack.apis.datasetio import DatasetIO from llama_stack.apis.datasets import Datasets from llama_stack.apis.inference import Inference, SystemMessage, UserMessage from llama_stack.apis.scoring import Scoring -from llama_stack.distribution.datatypes import Api from llama_stack.providers.datatypes import BenchmarksProtocolPrivate from llama_stack.providers.inline.agents.meta_reference.agent_instance import ( MEMORY_QUERY_TOOL, @@ -88,11 +87,7 @@ class MetaReferenceEvalImpl( # TODO: validate dataset schema all_rows = await self.datasetio_api.iterrows( dataset_id=dataset_id, - limit=( - -1 - if benchmark_config.num_examples is None - else benchmark_config.num_examples - ), + limit=(-1 if benchmark_config.num_examples is None else benchmark_config.num_examples), ) res = await self.evaluate_rows( benchmark_id=benchmark_id, @@ -118,14 +113,10 @@ class MetaReferenceEvalImpl( for i, x in tqdm(enumerate(input_rows)): assert ColumnName.chat_completion_input.value in x, "Invalid input row" input_messages = json.loads(x[ColumnName.chat_completion_input.value]) - input_messages = [ - UserMessage(**x) for x in input_messages if x["role"] == "user" - ] + input_messages = [UserMessage(**x) for x in input_messages if x["role"] == "user"] # NOTE: only single-turn agent generation is supported. Create a new session for each input row - session_create_response = await self.agents_api.create_agent_session( - agent_id, f"session-{i}" - ) + session_create_response = await self.agents_api.create_agent_session(agent_id, f"session-{i}") session_id = session_create_response.session_id turn_request = dict( @@ -134,12 +125,7 @@ class MetaReferenceEvalImpl( messages=input_messages, stream=True, ) - turn_response = [ - chunk - async for chunk in await self.agents_api.create_agent_turn( - **turn_request - ) - ] + turn_response = [chunk async for chunk in await self.agents_api.create_agent_turn(**turn_request)] final_event = turn_response[-1].event.payload # check if there's a memory retrieval step and extract the context @@ -148,14 +134,10 @@ class MetaReferenceEvalImpl( if step.step_type == StepType.tool_execution.value: for tool_response in step.tool_responses: if tool_response.tool_name == MEMORY_QUERY_TOOL: - memory_rag_context = " ".join( - x.text for x in tool_response.content - ) + memory_rag_context = " ".join(x.text for x in tool_response.content) agent_generation = {} - agent_generation[ColumnName.generated_answer.value] = ( - final_event.turn.output_message.content - ) + agent_generation[ColumnName.generated_answer.value] = final_event.turn.output_message.content if memory_rag_context: agent_generation[ColumnName.context.value] = memory_rag_context @@ -167,9 +149,7 @@ class MetaReferenceEvalImpl( self, input_rows: List[Dict[str, Any]], benchmark_config: BenchmarkConfig ) -> List[Dict[str, Any]]: candidate = benchmark_config.eval_candidate - assert ( - candidate.sampling_params.max_tokens is not None - ), "SamplingParams.max_tokens must be provided" + assert candidate.sampling_params.max_tokens is not None, "SamplingParams.max_tokens must be provided" generations = [] for x in tqdm(input_rows): @@ -180,39 +160,21 @@ class MetaReferenceEvalImpl( content=input_content, sampling_params=candidate.sampling_params, ) - generations.append( - { - ColumnName.generated_answer.value: response.completion_message.content - } - ) + generations.append({ColumnName.generated_answer.value: response.completion_message.content}) elif ColumnName.chat_completion_input.value in x: - chat_completion_input_json = json.loads( - x[ColumnName.chat_completion_input.value] - ) - input_messages = [ - UserMessage(**x) - for x in chat_completion_input_json - if x["role"] == "user" - ] + chat_completion_input_json = json.loads(x[ColumnName.chat_completion_input.value]) + input_messages = [UserMessage(**x) for x in chat_completion_input_json if x["role"] == "user"] messages = [] if candidate.system_message: messages.append(candidate.system_message) - messages += [ - SystemMessage(**x) - for x in chat_completion_input_json - if x["role"] == "system" - ] + messages += [SystemMessage(**x) for x in chat_completion_input_json if x["role"] == "system"] messages += input_messages response = await self.inference_api.chat_completion( model_id=candidate.model, messages=messages, sampling_params=candidate.sampling_params, ) - generations.append( - { - ColumnName.generated_answer.value: response.completion_message.content - } - ) + generations.append({ColumnName.generated_answer.value: response.completion_message.content}) else: raise ValueError("Invalid input row") @@ -235,8 +197,7 @@ class MetaReferenceEvalImpl( # scoring with generated_answer score_input_rows = [ - input_r | generated_r - for input_r, generated_r in zip(input_rows, generations, strict=False) + input_r | generated_r for input_r, generated_r in zip(input_rows, generations, strict=False) ] if benchmark_config.scoring_params is not None: @@ -245,9 +206,7 @@ class MetaReferenceEvalImpl( for scoring_fn_id in scoring_functions } else: - scoring_functions_dict = { - scoring_fn_id: None for scoring_fn_id in scoring_functions - } + scoring_functions_dict = {scoring_fn_id: None for scoring_fn_id in scoring_functions} score_response = await self.scoring_api.score( input_rows=score_input_rows, scoring_functions=scoring_functions_dict diff --git a/tests/integration/eval/test_eval.py b/tests/integration/eval/test_eval.py index ee276e72b..c4aa0fa1b 100644 --- a/tests/integration/eval/test_eval.py +++ b/tests/integration/eval/test_eval.py @@ -3,7 +3,6 @@ # # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. -import os import uuid from pathlib import Path @@ -22,9 +21,7 @@ def test_evaluate_rows(llama_stack_client, text_model_id, scoring_fn_id): purpose="eval/messages-answer", source={ "type": "uri", - "uri": data_url_from_file( - Path(__file__).parent.parent / "datasets" / "test_dataset.csv" - ), + "uri": data_url_from_file(Path(__file__).parent.parent / "datasets" / "test_dataset.csv"), }, ) response = llama_stack_client.datasets.list() @@ -73,9 +70,7 @@ def test_evaluate_benchmark(llama_stack_client, text_model_id, scoring_fn_id): purpose="eval/messages-answer", source={ "type": "uri", - "uri": data_url_from_file( - Path(__file__).parent.parent / "datasets" / "test_dataset.csv" - ), + "uri": data_url_from_file(Path(__file__).parent.parent / "datasets" / "test_dataset.csv"), }, ) benchmark_id = str(uuid.uuid4()) @@ -98,14 +93,10 @@ def test_evaluate_benchmark(llama_stack_client, text_model_id, scoring_fn_id): }, ) assert response.job_id == "0" - job_status = llama_stack_client.eval.jobs.status( - job_id=response.job_id, benchmark_id=benchmark_id - ) + job_status = llama_stack_client.eval.jobs.status(job_id=response.job_id, benchmark_id=benchmark_id) assert job_status and job_status == "completed" - eval_response = llama_stack_client.eval.jobs.retrieve( - job_id=response.job_id, benchmark_id=benchmark_id - ) + eval_response = llama_stack_client.eval.jobs.retrieve(job_id=response.job_id, benchmark_id=benchmark_id) assert eval_response is not None assert len(eval_response.generations) == 5 assert scoring_fn_id in eval_response.scores