refactor: add black formatting

This commit is contained in:
Krrish Dholakia 2023-12-25 14:10:38 +05:30
parent b87d630b0a
commit 4905929de3
156 changed files with 19723 additions and 10869 deletions

View file

@ -13,6 +13,7 @@ import litellm
from litellm import embedding, completion, completion_cost, Timeout
from litellm import RateLimitError
import pytest
litellm.num_retries = 0
litellm.cache = None
# litellm.set_verbose=True
@ -20,23 +21,32 @@ import json
# litellm.success_callback = ["langfuse"]
def get_current_weather(location, unit="fahrenheit"):
"""Get the current weather in a given location"""
if "tokyo" in location.lower():
return json.dumps({"location": "Tokyo", "temperature": "10", "unit": "celsius"})
elif "san francisco" in location.lower():
return json.dumps({"location": "San Francisco", "temperature": "72", "unit": "fahrenheit"})
return json.dumps(
{"location": "San Francisco", "temperature": "72", "unit": "fahrenheit"}
)
elif "paris" in location.lower():
return json.dumps({"location": "Paris", "temperature": "22", "unit": "celsius"})
else:
return json.dumps({"location": location, "temperature": "unknown"})
# Example dummy function hard coded to return the same weather
# In production, this could be your backend API or an external API
def test_parallel_function_call():
try:
# Step 1: send the conversation and available functions to the model
messages = [{"role": "user", "content": "What's the weather like in San Francisco, Tokyo, and Paris?"}]
messages = [
{
"role": "user",
"content": "What's the weather like in San Francisco, Tokyo, and Paris?",
}
]
tools = [
{
"type": "function",
@ -50,7 +60,10 @@ def test_parallel_function_call():
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
},
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
},
},
"required": ["location"],
},
@ -69,7 +82,9 @@ def test_parallel_function_call():
print("length of tool calls", len(tool_calls))
print("Expecting there to be 3 tool calls")
assert len(tool_calls) > 1 # this has to call the function for SF, Tokyo and parise
assert (
len(tool_calls) > 1
) # this has to call the function for SF, Tokyo and parise
# Step 2: check if the model wanted to call a function
if tool_calls:
@ -78,7 +93,9 @@ def test_parallel_function_call():
available_functions = {
"get_current_weather": get_current_weather,
} # only one function in this example, but you can have multiple
messages.append(response_message) # extend conversation with assistant's reply
messages.append(
response_message
) # extend conversation with assistant's reply
print("Response message\n", response_message)
# Step 4: send the info for each function call and function response to the model
for tool_call in tool_calls:
@ -99,25 +116,26 @@ def test_parallel_function_call():
) # extend conversation with function response
print(f"messages: {messages}")
second_response = litellm.completion(
model="gpt-3.5-turbo-1106",
messages=messages,
temperature=0.2,
seed=22
model="gpt-3.5-turbo-1106", messages=messages, temperature=0.2, seed=22
) # get a new response from the model where it can see the function response
print("second response\n", second_response)
return second_response
except Exception as e:
pytest.fail(f"Error occurred: {e}")
test_parallel_function_call()
def test_parallel_function_call_stream():
try:
# Step 1: send the conversation and available functions to the model
messages = [{"role": "user", "content": "What's the weather like in San Francisco, Tokyo, and Paris?"}]
messages = [
{
"role": "user",
"content": "What's the weather like in San Francisco, Tokyo, and Paris?",
}
]
tools = [
{
"type": "function",
@ -131,7 +149,10 @@ def test_parallel_function_call_stream():
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
},
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
},
},
"required": ["location"],
},
@ -144,7 +165,7 @@ def test_parallel_function_call_stream():
tools=tools,
stream=True,
tool_choice="auto", # auto is default, but we'll be explicit
complete_response = True
complete_response=True,
)
print("Response\n", response)
# for chunk in response:
@ -154,7 +175,9 @@ def test_parallel_function_call_stream():
print("length of tool calls", len(tool_calls))
print("Expecting there to be 3 tool calls")
assert len(tool_calls) > 1 # this has to call the function for SF, Tokyo and parise
assert (
len(tool_calls) > 1
) # this has to call the function for SF, Tokyo and parise
# Step 2: check if the model wanted to call a function
if tool_calls:
@ -163,7 +186,9 @@ def test_parallel_function_call_stream():
available_functions = {
"get_current_weather": get_current_weather,
} # only one function in this example, but you can have multiple
messages.append(response_message) # extend conversation with assistant's reply
messages.append(
response_message
) # extend conversation with assistant's reply
print("Response message\n", response_message)
# Step 4: send the info for each function call and function response to the model
for tool_call in tool_calls:
@ -184,14 +209,12 @@ def test_parallel_function_call_stream():
) # extend conversation with function response
print(f"messages: {messages}")
second_response = litellm.completion(
model="gpt-3.5-turbo-1106",
messages=messages,
temperature=0.2,
seed=22
model="gpt-3.5-turbo-1106", messages=messages, temperature=0.2, seed=22
) # get a new response from the model where it can see the function response
print("second response\n", second_response)
return second_response
except Exception as e:
pytest.fail(f"Error occurred: {e}")
test_parallel_function_call_stream()
test_parallel_function_call_stream()