mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
LiteLLM Minor Fixes & Improvements (10/02/2024) (#6023)
* feat(together_ai/completion): handle together ai completion calls * fix: handle list of int / list of list of int for text completion calls * fix(utils.py): check if base model in bedrock converse model list Fixes https://github.com/BerriAI/litellm/issues/6003 * test(test_optional_params.py): add unit tests for bedrock optional param mapping Fixes https://github.com/BerriAI/litellm/issues/6003 * feat(utils.py): enable passing dummy tool call for anthropic/bedrock calls if tool_use blocks exist Fixes https://github.com/BerriAI/litellm/issues/5388 * fixed an issue with tool use of claude models with anthropic and bedrock (#6013) * fix(utils.py): handle empty schema for anthropic/bedrock Fixes https://github.com/BerriAI/litellm/issues/6012 * fix: fix linting errors * fix: fix linting errors * fix: fix linting errors * fix(proxy_cli.py): fix import route for app + health checks path (#6026) * (testing): Enable testing us.anthropic.claude-3-haiku-20240307-v1:0. (#6018) * fix(proxy_cli.py): fix import route for app + health checks gettsburg.wav Fixes https://github.com/BerriAI/litellm/issues/5999 --------- Co-authored-by: David Manouchehri <david.manouchehri@ai.moda> --------- Co-authored-by: Ved Patwardhan <54766411+vedpatwardhan@users.noreply.github.com> Co-authored-by: David Manouchehri <david.manouchehri@ai.moda>
This commit is contained in:
parent
8995ff49ae
commit
14165d3648
20 changed files with 443 additions and 125 deletions
|
@ -47,16 +47,17 @@ def get_current_weather(location, unit="fahrenheit"):
|
|||
[
|
||||
"gpt-3.5-turbo-1106",
|
||||
# "mistral/mistral-large-latest",
|
||||
# "claude-3-haiku-20240307",
|
||||
# "gemini/gemini-1.5-pro",
|
||||
"claude-3-haiku-20240307",
|
||||
"gemini/gemini-1.5-pro",
|
||||
"anthropic.claude-3-sonnet-20240229-v1:0",
|
||||
"groq/llama3-8b-8192",
|
||||
# "groq/llama3-8b-8192",
|
||||
],
|
||||
)
|
||||
@pytest.mark.flaky(retries=3, delay=1)
|
||||
def test_aaparallel_function_call(model):
|
||||
try:
|
||||
litellm.set_verbose = True
|
||||
litellm.modify_params = True
|
||||
# Step 1: send the conversation and available functions to the model
|
||||
messages = [
|
||||
{
|
||||
|
@ -97,7 +98,6 @@ def test_aaparallel_function_call(model):
|
|||
response_message = response.choices[0].message
|
||||
tool_calls = response_message.tool_calls
|
||||
|
||||
print("length of tool calls", len(tool_calls))
|
||||
print("Expecting there to be 3 tool calls")
|
||||
assert (
|
||||
len(tool_calls) > 0
|
||||
|
@ -141,7 +141,7 @@ def test_aaparallel_function_call(model):
|
|||
messages=messages,
|
||||
temperature=0.2,
|
||||
seed=22,
|
||||
tools=tools,
|
||||
# tools=tools,
|
||||
drop_params=True,
|
||||
) # get a new response from the model where it can see the function response
|
||||
print("second response\n", second_response)
|
||||
|
@ -445,3 +445,29 @@ def test_groq_parallel_function_call():
|
|||
print("second response\n", second_response)
|
||||
except Exception as e:
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"model",
|
||||
[
|
||||
"anthropic.claude-3-sonnet-20240229-v1:0",
|
||||
"claude-3-haiku-20240307",
|
||||
],
|
||||
)
|
||||
def test_anthropic_function_call_with_no_schema(model):
|
||||
"""
|
||||
Relevant Issue: https://github.com/BerriAI/litellm/issues/6012
|
||||
"""
|
||||
tools = [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "get_current_weather",
|
||||
"description": "Get the current weather in New York",
|
||||
},
|
||||
}
|
||||
]
|
||||
messages = [
|
||||
{"role": "user", "content": "What is the current temperature in New York?"}
|
||||
]
|
||||
completion(model=model, messages=messages, tools=tools, tool_choice="auto")
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue