From 87ec4243eeda73cb9d2f528eb791d40d80796821 Mon Sep 17 00:00:00 2001 From: Xi Yan Date: Thu, 2 Jan 2025 11:18:43 -0800 Subject: [PATCH] [rag evals][3/n] add ability to eval retrieval + generation in agentic eval pipeline (#668) # What does this PR do? - This PR adds the ability s.t. users can evaluate on both retrieval + generation separately & as a whole by passing an AgentConfig to the /eval API - The memory_retrieval context will be stored in the "context" column used for scoring functions that can evaluate the retrieved context. ## Test Plan - E2E Test RAG Agent Notebook: https://gist.github.com/yanxi0830/0377594d29958f9b6f9317ab049fa836 image image ## Sources Please link relevant resources if necessary. ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Ran pre-commit to handle lint / formatting issues. - [ ] Read the [contributor guideline](https://github.com/meta-llama/llama-stack/blob/main/CONTRIBUTING.md), Pull Request section? - [ ] Updated relevant documentation. - [ ] Wrote necessary unit or integration tests. --- .../inline/eval/meta_reference/eval.py | 20 ++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/llama_stack/providers/inline/eval/meta_reference/eval.py b/llama_stack/providers/inline/eval/meta_reference/eval.py index f9af2a0b0..b555c9f2a 100644 --- a/llama_stack/providers/inline/eval/meta_reference/eval.py +++ b/llama_stack/providers/inline/eval/meta_reference/eval.py @@ -7,7 +7,7 @@ from typing import Any, Dict, List, Optional from tqdm import tqdm -from llama_stack.apis.agents import Agents +from llama_stack.apis.agents import Agents, StepType from llama_stack.apis.datasetio import DatasetIO from llama_stack.apis.datasets import Datasets from llama_stack.apis.eval_tasks import EvalTask @@ -139,11 +139,21 @@ class MetaReferenceEvalImpl(Eval, EvalTasksProtocolPrivate, DataSchemaValidatorM ) ] final_event = turn_response[-1].event.payload - generations.append( - { - ColumnName.generated_answer.value: final_event.turn.output_message.content - } + + # check if there's a memory retrieval step and extract the context + memory_rag_context = None + for step in final_event.turn.steps: + if step.step_type == StepType.memory_retrieval.value: + memory_rag_context = " ".join(x.text for x in step.inserted_context) + + agent_generation = {} + agent_generation[ColumnName.generated_answer.value] = ( + final_event.turn.output_message.content ) + if memory_rag_context: + agent_generation[ColumnName.context.value] = memory_rag_context + + generations.append(agent_generation) return generations