From 3ed2e816fa3d6ff7b3ac49e7cbec2976cd3ef8ef Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Thu, 21 Nov 2024 10:04:26 -0500 Subject: [PATCH] use pydantic v2's model_dump() instead of dict() --- llama_stack/providers/remote/inference/nvidia/_openai_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama_stack/providers/remote/inference/nvidia/_openai_utils.py b/llama_stack/providers/remote/inference/nvidia/_openai_utils.py index edd6edda4..595cf0c93 100644 --- a/llama_stack/providers/remote/inference/nvidia/_openai_utils.py +++ b/llama_stack/providers/remote/inference/nvidia/_openai_utils.py @@ -118,7 +118,7 @@ def _convert_message(message: Message) -> Dict: """ Convert a Message to an OpenAI API-compatible dictionary. """ - out_dict = message.dict() + out_dict = message.model_dump() # Llama Stack uses role="ipython" for tool call messages, OpenAI uses "tool" if out_dict["role"] == "ipython": out_dict.update(role="tool")