From 0560d1f4a24e325d3b0c22d8a0a93bdc838fa2d1 Mon Sep 17 00:00:00 2001 From: Xi Yan Date: Thu, 27 Feb 2025 14:14:05 -0800 Subject: [PATCH] better checking --- .../inline/agents/meta_reference/agent_instance.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/llama_stack/providers/inline/agents/meta_reference/agent_instance.py b/llama_stack/providers/inline/agents/meta_reference/agent_instance.py index de2c59101..3502c21f2 100644 --- a/llama_stack/providers/inline/agents/meta_reference/agent_instance.py +++ b/llama_stack/providers/inline/agents/meta_reference/agent_instance.py @@ -125,6 +125,13 @@ class ChatAgent(ShieldRunnerMixin): def turn_to_messages(self, turn: Turn) -> List[Message]: messages = [] + # NOTE: if a toolcall response is in a step, we do not add it when processing the input messages + tool_call_ids = set() + for step in turn.steps: + if step.step_type == StepType.tool_execution.value: + for response in step.tool_responses: + tool_call_ids.add(response.call_id) + for m in turn.input_messages: msg = m.model_copy() # We do not want to keep adding RAG context to the input messages @@ -133,8 +140,9 @@ class ChatAgent(ShieldRunnerMixin): if isinstance(msg, UserMessage): msg.context = None if isinstance(msg, ToolResponseMessage): - # NOTE: do not add ToolResponseMessage here, we'll add them in tool_execution steps - continue + if msg.call_id in tool_call_ids: + # NOTE: do not add ToolResponseMessage here, we'll add them in tool_execution steps + continue messages.append(msg)