forked from phoenix-oss/llama-stack-mirror
Rename ipython to tool (#756)
See https://github.com/meta-llama/llama-models/pull/261 for the corresponding PR in llama-models. Once these PRs land, you need to work `main` from llama-models (vs. from pypi)
This commit is contained in:
parent
fdcc74fda2
commit
ee4e04804f
3 changed files with 3 additions and 3 deletions
|
@ -82,7 +82,7 @@ class SystemMessage(BaseModel):
|
|||
|
||||
@json_schema_type
|
||||
class ToolResponseMessage(BaseModel):
|
||||
role: Literal["ipython"] = "ipython"
|
||||
role: Literal["tool"] = "tool"
|
||||
# it was nice to re-use the ToolResponse type, but having all messages
|
||||
# have a `content` type makes things nicer too
|
||||
call_id: str
|
||||
|
|
|
@ -144,7 +144,7 @@ def _convert_message(message: Message | Dict) -> OpenAIChatCompletionMessage:
|
|||
message = UserMessage(**message)
|
||||
elif message["role"] == "assistant":
|
||||
message = CompletionMessage(**message)
|
||||
elif message["role"] == "ipython":
|
||||
elif message["role"] == "tool":
|
||||
message = ToolResponseMessage(**message)
|
||||
elif message["role"] == "system":
|
||||
message = SystemMessage(**message)
|
||||
|
|
|
@ -40,10 +40,10 @@ class TestClientTool(ClientTool):
|
|||
response_str = f"Error when running tool: {e}"
|
||||
|
||||
message = ToolResponseMessage(
|
||||
role="tool",
|
||||
call_id=tool_call.call_id,
|
||||
tool_name=tool_call.tool_name,
|
||||
content=response_str,
|
||||
role="ipython",
|
||||
)
|
||||
return [message]
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue