(test) tool/function calling + streaming

This commit is contained in:
ishaan-jaff 2023-11-18 16:23:28 -08:00
parent 70fc5afb5d
commit 4a364bcbc0

View file

@ -108,4 +108,87 @@ def test_parallel_function_call():
except Exception as e:
pytest.fail(f"Error occurred: {e}")
test_parallel_function_call()
# test_parallel_function_call()
def test_parallel_function_call_stream():
try:
# Step 1: send the conversation and available functions to the model
messages = [{"role": "user", "content": "What's the weather like in San Francisco, Tokyo, and Paris?"}]
tools = [
{
"type": "function",
"function": {
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
},
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
},
"required": ["location"],
},
},
}
]
response = litellm.completion(
model="gpt-3.5-turbo-1106",
messages=messages,
tools=tools,
stream=True,
tool_choice="auto", # auto is default, but we'll be explicit
)
print("Response\n", response)
for chunk in response:
print(chunk)
# response_message = response.choices[0].message
# tool_calls = response_message.tool_calls
# print("length of tool calls", len(tool_calls))
# print("Expecting there to be 3 tool calls")
# assert len(tool_calls) > 1 # this has to call the function for SF, Tokyo and parise
# # Step 2: check if the model wanted to call a function
# if tool_calls:
# # Step 3: call the function
# # Note: the JSON response may not always be valid; be sure to handle errors
# available_functions = {
# "get_current_weather": get_current_weather,
# } # only one function in this example, but you can have multiple
# messages.append(response_message) # extend conversation with assistant's reply
# print("Response message\n", response_message)
# # Step 4: send the info for each function call and function response to the model
# for tool_call in tool_calls:
# function_name = tool_call.function.name
# function_to_call = available_functions[function_name]
# function_args = json.loads(tool_call.function.arguments)
# function_response = function_to_call(
# location=function_args.get("location"),
# unit=function_args.get("unit"),
# )
# messages.append(
# {
# "tool_call_id": tool_call.id,
# "role": "tool",
# "name": function_name,
# "content": function_response,
# }
# ) # extend conversation with function response
# second_response = litellm.completion(
# model="gpt-3.5-turbo-1106",
# messages=messages,
# temperature=0.2,
# seed=22
# ) # get a new response from the model where it can see the function response
# print("second response\n", second_response)
# return second_response
except Exception as e:
pytest.fail(f"Error occurred: {e}")
test_parallel_function_call_stream()