revert print

This commit is contained in:
Xi Yan 2025-02-04 10:30:19 -08:00
parent 5595f5b9b8
commit b1492ecb4e
2 changed files with 0 additions and 17 deletions

View file

@ -168,17 +168,10 @@ class ChatAgent(ShieldRunnerMixin):
if self.agent_config.instructions != "":
messages.append(SystemMessage(content=self.agent_config.instructions))
from rich.pretty import pprint
print("create_and_execute_turn")
pprint(request)
for i, turn in enumerate(turns):
messages.extend(self.turn_to_messages(turn))
messages.extend(request.messages)
print("create_and_execute_turn turn to messages")
pprint(messages)
turn_id = str(uuid.uuid4())
span.set_attribute("turn_id", turn_id)
@ -367,7 +360,6 @@ class ChatAgent(ShieldRunnerMixin):
documents: Optional[List[Document]] = None,
toolgroups_for_turn: Optional[List[AgentToolGroup]] = None,
) -> AsyncGenerator:
print("_run messages", input_messages)
# TODO: simplify all of this code, it can be simpler
toolgroup_args = {}
toolgroups = set()
@ -498,7 +490,6 @@ class ChatAgent(ShieldRunnerMixin):
stop_reason = None
with tracing.span("inference") as span:
print("just before chat completion", input_messages)
async for chunk in await self.inference_api.chat_completion(
self.agent_config.model,
input_messages,

View file

@ -196,8 +196,6 @@ class TogetherInferenceAdapter(ModelRegistryHelper, Inference, NeedsRequestProvi
logprobs: Optional[LogProbConfig] = None,
) -> AsyncGenerator:
model = await self.model_store.get_model(model_id)
print("inside together chat completion messages", messages)
breakpoint()
request = ChatCompletionRequest(
model=model.provider_resource_id,
messages=messages,
@ -225,11 +223,6 @@ class TogetherInferenceAdapter(ModelRegistryHelper, Inference, NeedsRequestProvi
async def _stream_chat_completion(self, request: ChatCompletionRequest) -> AsyncGenerator:
params = await self._get_params(request)
from rich.pretty import pprint
print("together stream completion")
pprint(request)
pprint(params)
# if we shift to TogetherAsyncClient, we won't need this wrapper
async def _to_async_generator():
@ -247,7 +240,6 @@ class TogetherInferenceAdapter(ModelRegistryHelper, Inference, NeedsRequestProvi
async def _get_params(self, request: Union[ChatCompletionRequest, CompletionRequest]) -> dict:
input_dict = {}
media_present = request_has_media(request)
breakpoint()
if isinstance(request, ChatCompletionRequest):
if media_present:
input_dict["messages"] = [await convert_message_to_openai_dict(m) for m in request.messages]