Handle fireworks ai tool calling response (#10130)

* feat(fireworks_ai/chat): handle tool calling with fireworks ai correctly

Fixes https://github.com/BerriAI/litellm/issues/7209

* fix(utils.py): handle none type in message

* fix: fix model name in test

* fix(utils.py): fix validate check for openai messages

* fix: fix model returned

* fix(main.py): fix text completion routing

* test: update testing

* test: skip test - cohere having RBAC issues
This commit is contained in:
Krish Dholakia 2025-04-19 09:37:45 -07:00 committed by GitHub
parent 4663a66b47
commit e122f2df56
9 changed files with 242 additions and 74 deletions

View file

@ -896,6 +896,13 @@ class BaseLLMChatTest(ABC):
assert response is not None
# if the provider did not return any tool calls do not make a subsequent llm api call
if response.choices[0].message.content is not None:
try:
json.loads(response.choices[0].message.content)
pytest.fail(f"Tool call returned in content instead of tool_calls")
except Exception as e:
print(f"Error: {e}")
pass
if response.choices[0].message.tool_calls is None:
return
# Add any assertions here to check the response