diff --git a/llama_stack/providers/inline/agents/meta_reference/agent_instance.py b/llama_stack/providers/inline/agents/meta_reference/agent_instance.py index 8c10c6baf..4a1421245 100644 --- a/llama_stack/providers/inline/agents/meta_reference/agent_instance.py +++ b/llama_stack/providers/inline/agents/meta_reference/agent_instance.py @@ -909,7 +909,19 @@ class ChatAgent(ShieldRunnerMixin): if tool_def_map.get(built_in_type, None): raise ValueError(f"Tool {built_in_type} already exists") - tool_def_map[built_in_type] = ToolDefinition(tool_name=built_in_type) + tool_def_map[built_in_type] = ToolDefinition( + tool_name=built_in_type, + description=tool_def.description, + parameters={ + param.name: ToolParamDefinition( + param_type=param.parameter_type, + description=param.description, + required=param.required, + default=param.default, + ) + for param in tool_def.parameters + }, + ) tool_to_group[built_in_type] = tool_def.toolgroup_id continue diff --git a/llama_stack/providers/remote/inference/vllm/vllm.py b/llama_stack/providers/remote/inference/vllm/vllm.py index b9422d85d..967a3e44d 100644 --- a/llama_stack/providers/remote/inference/vllm/vllm.py +++ b/llama_stack/providers/remote/inference/vllm/vllm.py @@ -169,7 +169,7 @@ async def _process_vllm_chat_completion_stream_response( args = {} if not args_str else json.loads(args_str) except Exception as e: log.warning(f"Failed to parse tool call buffer arguments: {args_str} \nError: {e}") - if args is not None: + if args: yield ChatCompletionResponseStreamChunk( event=ChatCompletionResponseEvent( event_type=event_type, @@ -183,7 +183,7 @@ async def _process_vllm_chat_completion_stream_response( ), ) ) - else: + elif args_str: yield ChatCompletionResponseStreamChunk( event=ChatCompletionResponseEvent( event_type=ChatCompletionResponseEventType.progress,