diff --git a/litellm/integrations/llmonitor.py b/litellm/integrations/llmonitor.py index 761b12c9e1..ff4c3990f8 100644 --- a/litellm/integrations/llmonitor.py +++ b/litellm/integrations/llmonitor.py @@ -28,10 +28,13 @@ def parse_messages(input): if "message" in message: return clean_message(message["message"]) + text = message["content"] + if text == None: + text = message.get("function_call", None) return { "role": message["role"], - "text": message["content"], + "text": text, } if isinstance(input, list): diff --git a/litellm/tests/test_llmonitor_integration.py b/litellm/tests/test_llmonitor_integration.py index 9a13d949ac..006e13b528 100644 --- a/litellm/tests/test_llmonitor_integration.py +++ b/litellm/tests/test_llmonitor_integration.py @@ -38,5 +38,39 @@ def test_embedding_openai(): print(e) -test_chat_openai() -test_embedding_openai() +# test_chat_openai() +# test_embedding_openai() + + +def test_llmonitor_logging_function_calling(): + function1 = [ + { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], + }, + } + ] + try: + response = completion(model="gpt-3.5-turbo", + messages=[{ + "role": "user", + "content": "what's the weather in boston" + }], + temperature=0.1, + functions=function1, + ) + print(response) + except Exception as e: + print(e) + +# test_llmonitor_logging_function_calling()