feat: add more logs to agent_instance.py

This commit is contained in:
Ashwin Bharambe 2025-03-03 16:15:47 -08:00
parent ee5e9b935a
commit 0a76ece249
3 changed files with 12 additions and 12 deletions

View file

@ -247,7 +247,6 @@ def sort_providers_by_deps(
logcat.debug("core", f"Resolved {len(sorted_providers)} providers") logcat.debug("core", f"Resolved {len(sorted_providers)} providers")
for api_str, provider in sorted_providers: for api_str, provider in sorted_providers:
logcat.debug("core", f" {api_str} => {provider.provider_id}") logcat.debug("core", f" {api_str} => {provider.provider_id}")
logcat.debug("core", "")
return sorted_providers return sorted_providers

View file

@ -436,7 +436,7 @@ def main():
) )
) )
logcat.debug("server", f"Serving API {api_str}") logcat.debug("server", f"serving APIs: {apis_to_serve}")
app.exception_handler(RequestValidationError)(global_exception_handler) app.exception_handler(RequestValidationError)(global_exception_handler)
app.exception_handler(Exception)(global_exception_handler) app.exception_handler(Exception)(global_exception_handler)

View file

@ -6,7 +6,6 @@
import copy import copy
import json import json
import logging
import os import os
import re import re
import secrets import secrets
@ -18,6 +17,7 @@ from urllib.parse import urlparse
import httpx import httpx
from llama_stack import logcat
from llama_stack.apis.agents import ( from llama_stack.apis.agents import (
AgentConfig, AgentConfig,
AgentToolGroup, AgentToolGroup,
@ -79,8 +79,6 @@ from llama_stack.providers.utils.telemetry import tracing
from .persistence import AgentPersistence from .persistence import AgentPersistence
from .safety import SafetyException, ShieldRunnerMixin from .safety import SafetyException, ShieldRunnerMixin
log = logging.getLogger(__name__)
def make_random_string(length: int = 8): def make_random_string(length: int = 8):
return "".join(secrets.choice(string.ascii_letters + string.digits) for _ in range(length)) return "".join(secrets.choice(string.ascii_letters + string.digits) for _ in range(length))
@ -219,8 +217,9 @@ class ChatAgent(ShieldRunnerMixin):
toolgroups_for_turn=request.toolgroups, toolgroups_for_turn=request.toolgroups,
): ):
if isinstance(chunk, CompletionMessage): if isinstance(chunk, CompletionMessage):
log.info( logcat.info(
f"{chunk.role.capitalize()}: {chunk.content}", "agents",
f"returning result from the agent turn: {chunk}",
) )
output_message = chunk output_message = chunk
continue continue
@ -665,7 +664,7 @@ class ChatAgent(ShieldRunnerMixin):
) )
if n_iter >= self.agent_config.max_infer_iters: if n_iter >= self.agent_config.max_infer_iters:
log.info("Done with MAX iterations, exiting.") logcat.info("agents", f"done with MAX iterations ({n_iter}), exiting.")
# NOTE: mark end_of_turn to indicate to client that we are done with the turn # NOTE: mark end_of_turn to indicate to client that we are done with the turn
# Do not continue the tool call loop after this point # Do not continue the tool call loop after this point
message.stop_reason = StopReason.end_of_turn message.stop_reason = StopReason.end_of_turn
@ -673,7 +672,7 @@ class ChatAgent(ShieldRunnerMixin):
break break
if stop_reason == StopReason.out_of_tokens: if stop_reason == StopReason.out_of_tokens:
log.info("Out of token budget, exiting.") logcat.info("agents", "out of token budget, exiting.")
yield message yield message
break break
@ -687,10 +686,10 @@ class ChatAgent(ShieldRunnerMixin):
message.content = [message.content] + output_attachments message.content = [message.content] + output_attachments
yield message yield message
else: else:
log.info(f"Partial message: {str(message)}") logcat.debug("agents", f"completion message with EOM (iter: {n_iter}): {str(message)}")
input_messages = input_messages + [message] input_messages = input_messages + [message]
else: else:
log.info(f"{str(message)}") logcat.debug("agents", f"completion message (iter: {n_iter}) from the model: {str(message)}")
# 1. Start the tool execution step and progress # 1. Start the tool execution step and progress
step_id = str(uuid.uuid4()) step_id = str(uuid.uuid4())
yield AgentTurnResponseStreamChunk( yield AgentTurnResponseStreamChunk(
@ -1042,7 +1041,7 @@ async def attachment_message(tempdir: str, urls: List[URL]) -> ToolResponseMessa
path = urlparse(uri).path path = urlparse(uri).path
basename = os.path.basename(path) basename = os.path.basename(path)
filepath = f"{tempdir}/{make_random_string() + basename}" filepath = f"{tempdir}/{make_random_string() + basename}"
log.info(f"Downloading {url} -> {filepath}") logcat.info("agents", f"Downloading {url} -> {filepath}")
async with httpx.AsyncClient() as client: async with httpx.AsyncClient() as client:
r = await client.get(uri) r = await client.get(uri)
@ -1082,6 +1081,7 @@ async def execute_tool_call_maybe(
else: else:
name = name.value name = name.value
logcat.info("agents", f"executing tool call: {name} with args: {tool_call.arguments}")
result = await tool_runtime_api.invoke_tool( result = await tool_runtime_api.invoke_tool(
tool_name=name, tool_name=name,
kwargs={ kwargs={
@ -1091,6 +1091,7 @@ async def execute_tool_call_maybe(
**toolgroup_args.get(group_name, {}), **toolgroup_args.get(group_name, {}),
}, },
) )
logcat.debug("agents", f"tool call {name} completed with result: {result}")
return result return result