Rename ipython to tool (#756)

See https://github.com/meta-llama/llama-models/pull/261 for the
corresponding PR in llama-models.

Once these PRs land, you need to work `main` from llama-models (vs. from
pypi)
This commit is contained in:
Ashwin Bharambe 2025-01-13 19:11:51 -08:00 committed by GitHub
parent fdcc74fda2
commit ee4e04804f
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 3 additions and 3 deletions

View file

@ -82,7 +82,7 @@ class SystemMessage(BaseModel):
@json_schema_type @json_schema_type
class ToolResponseMessage(BaseModel): class ToolResponseMessage(BaseModel):
role: Literal["ipython"] = "ipython" role: Literal["tool"] = "tool"
# it was nice to re-use the ToolResponse type, but having all messages # it was nice to re-use the ToolResponse type, but having all messages
# have a `content` type makes things nicer too # have a `content` type makes things nicer too
call_id: str call_id: str

View file

@ -144,7 +144,7 @@ def _convert_message(message: Message | Dict) -> OpenAIChatCompletionMessage:
message = UserMessage(**message) message = UserMessage(**message)
elif message["role"] == "assistant": elif message["role"] == "assistant":
message = CompletionMessage(**message) message = CompletionMessage(**message)
elif message["role"] == "ipython": elif message["role"] == "tool":
message = ToolResponseMessage(**message) message = ToolResponseMessage(**message)
elif message["role"] == "system": elif message["role"] == "system":
message = SystemMessage(**message) message = SystemMessage(**message)

View file

@ -40,10 +40,10 @@ class TestClientTool(ClientTool):
response_str = f"Error when running tool: {e}" response_str = f"Error when running tool: {e}"
message = ToolResponseMessage( message = ToolResponseMessage(
role="tool",
call_id=tool_call.call_id, call_id=tool_call.call_id,
tool_name=tool_call.tool_name, tool_name=tool_call.tool_name,
content=response_str, content=response_str,
role="ipython",
) )
return [message] return [message]