mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-03 17:29:01 +00:00
[rag evals][3/n] add ability to eval retrieval + generation in agentic eval pipeline (#668)
# What does this PR do? - This PR adds the ability s.t. users can evaluate on both retrieval + generation separately & as a whole by passing an AgentConfig to the /eval API - The memory_retrieval context will be stored in the "context" column used for scoring functions that can evaluate the retrieved context. ## Test Plan - E2E Test RAG Agent Notebook: https://gist.github.com/yanxi0830/0377594d29958f9b6f9317ab049fa836 <img width="758" alt="image" src="https://github.com/user-attachments/assets/58ed9db7-f07b-400a-931b-923b0d612902" /> <img width="682" alt="image" src="https://github.com/user-attachments/assets/9ebd7fbd-2a6d-4c93-92fa-a9456fae2378" /> ## Sources Please link relevant resources if necessary. ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Ran pre-commit to handle lint / formatting issues. - [ ] Read the [contributor guideline](https://github.com/meta-llama/llama-stack/blob/main/CONTRIBUTING.md), Pull Request section? - [ ] Updated relevant documentation. - [ ] Wrote necessary unit or integration tests.
This commit is contained in:
parent
41cff917ca
commit
87ec4243ee
1 changed files with 15 additions and 5 deletions
|
@ -7,7 +7,7 @@ from typing import Any, Dict, List, Optional
|
||||||
|
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
|
|
||||||
from llama_stack.apis.agents import Agents
|
from llama_stack.apis.agents import Agents, StepType
|
||||||
from llama_stack.apis.datasetio import DatasetIO
|
from llama_stack.apis.datasetio import DatasetIO
|
||||||
from llama_stack.apis.datasets import Datasets
|
from llama_stack.apis.datasets import Datasets
|
||||||
from llama_stack.apis.eval_tasks import EvalTask
|
from llama_stack.apis.eval_tasks import EvalTask
|
||||||
|
@ -139,11 +139,21 @@ class MetaReferenceEvalImpl(Eval, EvalTasksProtocolPrivate, DataSchemaValidatorM
|
||||||
)
|
)
|
||||||
]
|
]
|
||||||
final_event = turn_response[-1].event.payload
|
final_event = turn_response[-1].event.payload
|
||||||
generations.append(
|
|
||||||
{
|
# check if there's a memory retrieval step and extract the context
|
||||||
ColumnName.generated_answer.value: final_event.turn.output_message.content
|
memory_rag_context = None
|
||||||
}
|
for step in final_event.turn.steps:
|
||||||
|
if step.step_type == StepType.memory_retrieval.value:
|
||||||
|
memory_rag_context = " ".join(x.text for x in step.inserted_context)
|
||||||
|
|
||||||
|
agent_generation = {}
|
||||||
|
agent_generation[ColumnName.generated_answer.value] = (
|
||||||
|
final_event.turn.output_message.content
|
||||||
)
|
)
|
||||||
|
if memory_rag_context:
|
||||||
|
agent_generation[ColumnName.context.value] = memory_rag_context
|
||||||
|
|
||||||
|
generations.append(agent_generation)
|
||||||
|
|
||||||
return generations
|
return generations
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue