mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
Handle fireworks ai tool calling response (#10130)
* feat(fireworks_ai/chat): handle tool calling with fireworks ai correctly Fixes https://github.com/BerriAI/litellm/issues/7209 * fix(utils.py): handle none type in message * fix: fix model name in test * fix(utils.py): fix validate check for openai messages * fix: fix model returned * fix(main.py): fix text completion routing * test: update testing * test: skip test - cohere having RBAC issues
This commit is contained in:
parent
4663a66b47
commit
e122f2df56
9 changed files with 242 additions and 74 deletions
|
@ -896,6 +896,13 @@ class BaseLLMChatTest(ABC):
|
|||
assert response is not None
|
||||
|
||||
# if the provider did not return any tool calls do not make a subsequent llm api call
|
||||
if response.choices[0].message.content is not None:
|
||||
try:
|
||||
json.loads(response.choices[0].message.content)
|
||||
pytest.fail(f"Tool call returned in content instead of tool_calls")
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
pass
|
||||
if response.choices[0].message.tool_calls is None:
|
||||
return
|
||||
# Add any assertions here to check the response
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue