mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
check function call + streaming format
This commit is contained in:
parent
61874f77ab
commit
15bc5f2bdc
2 changed files with 243 additions and 241 deletions
|
@ -384,7 +384,6 @@ def test_completion_openai_with_functions():
|
||||||
print(chunk["choices"][0]["delta"]["content"])
|
print(chunk["choices"][0]["delta"]["content"])
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
pytest.fail(f"Error occurred: {e}")
|
pytest.fail(f"Error occurred: {e}")
|
||||||
test_completion_openai_with_functions()
|
|
||||||
|
|
||||||
#### Test Async streaming ####
|
#### Test Async streaming ####
|
||||||
|
|
||||||
|
@ -441,259 +440,262 @@ async def completion_call():
|
||||||
|
|
||||||
#### Test Function Calling + Streaming ####
|
#### Test Function Calling + Streaming ####
|
||||||
|
|
||||||
# final_openai_function_call_example = {
|
final_openai_function_call_example = {
|
||||||
# "id": "chatcmpl-7zVNA4sXUftpIg6W8WlntCyeBj2JY",
|
"id": "chatcmpl-7zVNA4sXUftpIg6W8WlntCyeBj2JY",
|
||||||
# "object": "chat.completion",
|
"object": "chat.completion",
|
||||||
# "created": 1694892960,
|
"created": 1694892960,
|
||||||
# "model": "gpt-3.5-turbo-0613",
|
"model": "gpt-3.5-turbo-0613",
|
||||||
# "choices": [
|
"choices": [
|
||||||
# {
|
{
|
||||||
# "index": 0,
|
"index": 0,
|
||||||
# "message": {
|
"message": {
|
||||||
# "role": "assistant",
|
"role": "assistant",
|
||||||
# "content": None,
|
"content": None,
|
||||||
# "function_call": {
|
"function_call": {
|
||||||
# "name": "get_current_weather",
|
"name": "get_current_weather",
|
||||||
# "arguments": "{\n \"location\": \"Boston, MA\"\n}"
|
"arguments": "{\n \"location\": \"Boston, MA\"\n}"
|
||||||
# }
|
}
|
||||||
# },
|
},
|
||||||
# "finish_reason": "function_call"
|
"finish_reason": "function_call"
|
||||||
# }
|
}
|
||||||
# ],
|
],
|
||||||
# "usage": {
|
"usage": {
|
||||||
# "prompt_tokens": 82,
|
"prompt_tokens": 82,
|
||||||
# "completion_tokens": 18,
|
"completion_tokens": 18,
|
||||||
# "total_tokens": 100
|
"total_tokens": 100
|
||||||
# }
|
}
|
||||||
# }
|
}
|
||||||
|
|
||||||
# function_calling_output_structure = {
|
function_calling_output_structure = {
|
||||||
# "id": str,
|
"id": str,
|
||||||
# "object": str,
|
"object": str,
|
||||||
# "created": int,
|
"created": int,
|
||||||
# "model": str,
|
"model": str,
|
||||||
# "choices": [
|
"choices": [
|
||||||
# {
|
{
|
||||||
# "index": int,
|
"index": int,
|
||||||
# "message": {
|
"message": {
|
||||||
# "role": str,
|
"role": str,
|
||||||
# "content": [type(None), str],
|
"content": (type(None), str),
|
||||||
# "function_call": {
|
"function_call": {
|
||||||
# "name": str,
|
"name": str,
|
||||||
# "arguments": str
|
"arguments": str
|
||||||
# }
|
}
|
||||||
# },
|
},
|
||||||
# "finish_reason": str
|
"finish_reason": str
|
||||||
# }
|
}
|
||||||
# ],
|
],
|
||||||
# "usage": {
|
"usage": {
|
||||||
# "prompt_tokens": int,
|
"prompt_tokens": int,
|
||||||
# "completion_tokens": int,
|
"completion_tokens": int,
|
||||||
# "total_tokens": int
|
"total_tokens": int
|
||||||
# }
|
}
|
||||||
# }
|
}
|
||||||
|
|
||||||
# def validate_final_structure(item, structure=function_calling_output_structure):
|
def validate_final_structure(item, structure=function_calling_output_structure):
|
||||||
# if isinstance(item, list):
|
if isinstance(item, list):
|
||||||
# if not all(validate_final_structure(i, structure[0]) for i in item):
|
if not all(validate_final_structure(i, structure[0]) for i in item):
|
||||||
# return Exception("Function calling final output doesn't match expected output format")
|
return Exception("Function calling final output doesn't match expected output format")
|
||||||
# elif isinstance(item, dict):
|
elif isinstance(item, dict):
|
||||||
# if not all(k in item and validate_final_structure(item[k], v) for k, v in structure.items()):
|
if not all(k in item and validate_final_structure(item[k], v) for k, v in structure.items()):
|
||||||
# return Exception("Function calling final output doesn't match expected output format")
|
return Exception("Function calling final output doesn't match expected output format")
|
||||||
# else:
|
else:
|
||||||
# if not isinstance(item, structure):
|
if not isinstance(item, structure):
|
||||||
# return Exception("Function calling final output doesn't match expected output format")
|
return Exception("Function calling final output doesn't match expected output format")
|
||||||
# return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
# first_openai_function_call_example = {
|
first_openai_function_call_example = {
|
||||||
# "id": "chatcmpl-7zVRoE5HjHYsCMaVSNgOjzdhbS3P0",
|
"id": "chatcmpl-7zVRoE5HjHYsCMaVSNgOjzdhbS3P0",
|
||||||
# "object": "chat.completion.chunk",
|
"object": "chat.completion.chunk",
|
||||||
# "created": 1694893248,
|
"created": 1694893248,
|
||||||
# "model": "gpt-3.5-turbo-0613",
|
"model": "gpt-3.5-turbo-0613",
|
||||||
# "choices": [
|
"choices": [
|
||||||
# {
|
{
|
||||||
# "index": 0,
|
"index": 0,
|
||||||
# "delta": {
|
"delta": {
|
||||||
# "role": "assistant",
|
"role": "assistant",
|
||||||
# "content": None,
|
"content": None,
|
||||||
# "function_call": {
|
"function_call": {
|
||||||
# "name": "get_current_weather",
|
"name": "get_current_weather",
|
||||||
# "arguments": ""
|
"arguments": ""
|
||||||
# }
|
}
|
||||||
# },
|
},
|
||||||
# "finish_reason": None
|
"finish_reason": None
|
||||||
# }
|
}
|
||||||
# ]
|
]
|
||||||
# }
|
}
|
||||||
|
|
||||||
|
def validate_first_function_call_chunk_structure(item):
|
||||||
|
if not isinstance(item, dict):
|
||||||
|
raise Exception("Incorrect format")
|
||||||
|
|
||||||
|
required_keys = {"id", "object", "created", "model", "choices"}
|
||||||
|
for key in required_keys:
|
||||||
|
if key not in item:
|
||||||
|
raise Exception("Incorrect format")
|
||||||
|
|
||||||
|
if not isinstance(item["choices"], list) or not item["choices"]:
|
||||||
|
raise Exception("Incorrect format")
|
||||||
|
|
||||||
|
required_keys_in_choices_array = {"index", "delta", "finish_reason"}
|
||||||
|
for choice in item["choices"]:
|
||||||
|
if not isinstance(choice, dict):
|
||||||
|
raise Exception("Incorrect format")
|
||||||
|
for key in required_keys_in_choices_array:
|
||||||
|
if key not in choice:
|
||||||
|
raise Exception("Incorrect format")
|
||||||
|
|
||||||
|
if not isinstance(choice["delta"], dict):
|
||||||
|
raise Exception("Incorrect format")
|
||||||
|
|
||||||
|
required_keys_in_delta = {"role", "content", "function_call"}
|
||||||
|
for key in required_keys_in_delta:
|
||||||
|
if key not in choice["delta"]:
|
||||||
|
raise Exception("Incorrect format")
|
||||||
|
|
||||||
|
if not isinstance(choice["delta"]["function_call"], dict):
|
||||||
|
raise Exception("Incorrect format")
|
||||||
|
|
||||||
|
required_keys_in_function_call = {"name", "arguments"}
|
||||||
|
for key in required_keys_in_function_call:
|
||||||
|
if key not in choice["delta"]["function_call"]:
|
||||||
|
raise Exception("Incorrect format")
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
second_function_call_chunk_format = {
|
||||||
|
"id": "chatcmpl-7zVRoE5HjHYsCMaVSNgOjzdhbS3P0",
|
||||||
|
"object": "chat.completion.chunk",
|
||||||
|
"created": 1694893248,
|
||||||
|
"model": "gpt-3.5-turbo-0613",
|
||||||
|
"choices": [
|
||||||
|
{
|
||||||
|
"index": 0,
|
||||||
|
"delta": {
|
||||||
|
"function_call": {
|
||||||
|
"arguments": "{\n"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"finish_reason": None
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
# first_function_calling_chunk_structure = {
|
def validate_second_function_call_chunk_structure(data):
|
||||||
# "id": str,
|
if not isinstance(data, dict):
|
||||||
# "object": str,
|
raise Exception("Incorrect format")
|
||||||
# "created": int,
|
|
||||||
# "model": str,
|
|
||||||
# "choices": [
|
|
||||||
# {
|
|
||||||
# "index": int,
|
|
||||||
# "delta": {
|
|
||||||
# "role": str,
|
|
||||||
# "content": [type(None), str],
|
|
||||||
# "function_call": {
|
|
||||||
# "name": str,
|
|
||||||
# "arguments": str
|
|
||||||
# }
|
|
||||||
# },
|
|
||||||
# "finish_reason": [type(None), str]
|
|
||||||
# }
|
|
||||||
# ]
|
|
||||||
# }
|
|
||||||
|
|
||||||
# def validate_first_function_call_chunk_structure(item, structure = first_function_calling_chunk_structure):
|
required_keys = {"id", "object", "created", "model", "choices"}
|
||||||
# if isinstance(item, list):
|
for key in required_keys:
|
||||||
# if not all(validate_first_function_call_chunk_structure(i, structure[0]) for i in item):
|
if key not in data:
|
||||||
# return Exception("Function calling first output doesn't match expected output format")
|
raise Exception("Incorrect format")
|
||||||
# elif isinstance(item, dict):
|
|
||||||
# if not all(k in item and validate_first_function_call_chunk_structure(item[k], v) for k, v in structure.items()):
|
|
||||||
# return Exception("Function calling first output doesn't match expected output format")
|
|
||||||
# else:
|
|
||||||
# if not isinstance(item, structure):
|
|
||||||
# return Exception("Function calling first output doesn't match expected output format")
|
|
||||||
# return True
|
|
||||||
|
|
||||||
# second_function_call_chunk_format = {
|
if not isinstance(data["choices"], list) or not data["choices"]:
|
||||||
# "id": "chatcmpl-7zVRoE5HjHYsCMaVSNgOjzdhbS3P0",
|
raise Exception("Incorrect format")
|
||||||
# "object": "chat.completion.chunk",
|
|
||||||
# "created": 1694893248,
|
required_keys_in_choices_array = {"index", "delta", "finish_reason"}
|
||||||
# "model": "gpt-3.5-turbo-0613",
|
for choice in data["choices"]:
|
||||||
# "choices": [
|
if not isinstance(choice, dict):
|
||||||
# {
|
raise Exception("Incorrect format")
|
||||||
# "index": 0,
|
for key in required_keys_in_choices_array:
|
||||||
# "delta": {
|
if key not in choice:
|
||||||
# "function_call": {
|
raise Exception("Incorrect format")
|
||||||
# "arguments": "{\n"
|
|
||||||
# }
|
if "function_call" not in choice["delta"] or "arguments" not in choice["delta"]["function_call"]:
|
||||||
# },
|
raise Exception("Incorrect format")
|
||||||
# "finish_reason": None
|
|
||||||
# }
|
return True
|
||||||
# ]
|
|
||||||
# }
|
|
||||||
|
|
||||||
|
|
||||||
# second_function_calling_chunk_structure = {
|
final_function_call_chunk_example = {
|
||||||
# "id": str,
|
"id": "chatcmpl-7zVRoE5HjHYsCMaVSNgOjzdhbS3P0",
|
||||||
# "object": str,
|
"object": "chat.completion.chunk",
|
||||||
# "created": int,
|
"created": 1694893248,
|
||||||
# "model": str,
|
"model": "gpt-3.5-turbo-0613",
|
||||||
# "choices": [
|
"choices": [
|
||||||
# {
|
{
|
||||||
# "index": int,
|
"index": 0,
|
||||||
# "delta": {
|
"delta": {},
|
||||||
# "function_call": {
|
"finish_reason": "function_call"
|
||||||
# "arguments": str,
|
}
|
||||||
# }
|
]
|
||||||
# },
|
}
|
||||||
# "finish_reason": [type(None), str]
|
|
||||||
# }
|
|
||||||
# ]
|
|
||||||
# }
|
|
||||||
|
|
||||||
# def validate_second_function_call_chunk_structure(item, structure = second_function_calling_chunk_structure):
|
|
||||||
# if isinstance(item, list):
|
|
||||||
# if not all(validate_second_function_call_chunk_structure(i, structure[0]) for i in item):
|
|
||||||
# return Exception("Function calling second output doesn't match expected output format")
|
|
||||||
# elif isinstance(item, dict):
|
|
||||||
# if not all(k in item and validate_second_function_call_chunk_structure(item[k], v) for k, v in structure.items()):
|
|
||||||
# return Exception("Function calling second output doesn't match expected output format")
|
|
||||||
# else:
|
|
||||||
# if not isinstance(item, structure):
|
|
||||||
# return Exception("Function calling second output doesn't match expected output format")
|
|
||||||
# return True
|
|
||||||
|
|
||||||
|
|
||||||
# final_function_call_chunk_example = {
|
def validate_final_function_call_chunk_structure(data):
|
||||||
# "id": "chatcmpl-7zVRoE5HjHYsCMaVSNgOjzdhbS3P0",
|
if not isinstance(data, dict):
|
||||||
# "object": "chat.completion.chunk",
|
raise Exception("Incorrect format")
|
||||||
# "created": 1694893248,
|
|
||||||
# "model": "gpt-3.5-turbo-0613",
|
|
||||||
# "choices": [
|
|
||||||
# {
|
|
||||||
# "index": 0,
|
|
||||||
# "delta": {},
|
|
||||||
# "finish_reason": "function_call"
|
|
||||||
# }
|
|
||||||
# ]
|
|
||||||
# }
|
|
||||||
|
|
||||||
|
required_keys = {"id", "object", "created", "model", "choices"}
|
||||||
|
for key in required_keys:
|
||||||
|
if key not in data:
|
||||||
|
raise Exception("Incorrect format")
|
||||||
|
|
||||||
# final_function_calling_chunk_structure = {
|
if not isinstance(data["choices"], list) or not data["choices"]:
|
||||||
# "id": str,
|
raise Exception("Incorrect format")
|
||||||
# "object": str,
|
|
||||||
# "created": int,
|
|
||||||
# "model": str,
|
|
||||||
# "choices": [
|
|
||||||
# {
|
|
||||||
# "index": int,
|
|
||||||
# "delta": dict,
|
|
||||||
# "finish_reason": str
|
|
||||||
# }
|
|
||||||
# ]
|
|
||||||
# }
|
|
||||||
|
|
||||||
# def validate_final_function_call_chunk_structure(item, structure = final_function_calling_chunk_structure):
|
required_keys_in_choices_array = {"index", "delta", "finish_reason"}
|
||||||
# if isinstance(item, list):
|
for choice in data["choices"]:
|
||||||
# if not all(validate_final_function_call_chunk_structure(i, structure[0]) for i in item):
|
if not isinstance(choice, dict):
|
||||||
# return Exception("Function calling final output doesn't match expected output format")
|
raise Exception("Incorrect format")
|
||||||
# elif isinstance(item, dict):
|
for key in required_keys_in_choices_array:
|
||||||
# if not all(k in item and validate_final_function_call_chunk_structure(item[k], v) for k, v in structure.items()):
|
if key not in choice:
|
||||||
# return Exception("Function calling final output doesn't match expected output format")
|
raise Exception("Incorrect format")
|
||||||
# else:
|
|
||||||
# if not isinstance(item, structure):
|
|
||||||
# return Exception("Function calling final output doesn't match expected output format")
|
|
||||||
# return True
|
|
||||||
|
|
||||||
# def streaming_and_function_calling_format_tests(idx, chunk):
|
return True
|
||||||
# extracted_chunk = ""
|
|
||||||
# finished = False
|
|
||||||
# print(f"chunk: {chunk}")
|
|
||||||
# if idx == 0: # ensure role assistant is set
|
|
||||||
# validate_first_function_call_chunk_structure(item=chunk, structure=first_function_calling_chunk_structure)
|
|
||||||
# role = chunk["choices"][0]["delta"]["role"]
|
|
||||||
# assert role == "assistant"
|
|
||||||
# elif idx != 1: # second chunk
|
|
||||||
# validate_second_function_call_chunk_structure(item=chunk, structure=second_function_calling_chunk_structure)
|
|
||||||
# if chunk["choices"][0]["finish_reason"]:
|
|
||||||
# validate_final_function_call_chunk_structure(item=chunk, structure=final_function_calling_chunk_structure)
|
|
||||||
# finished = True
|
|
||||||
# if "content" in chunk["choices"][0]["delta"]:
|
|
||||||
# extracted_chunk = chunk["choices"][0]["delta"]["content"]
|
|
||||||
# return extracted_chunk, finished
|
|
||||||
|
|
||||||
# def test_openai_streaming_and_function_calling():
|
def streaming_and_function_calling_format_tests(idx, chunk):
|
||||||
# function1 = [
|
extracted_chunk = ""
|
||||||
# {
|
finished = False
|
||||||
# "name": "get_current_weather",
|
print(f"idx: {idx}")
|
||||||
# "description": "Get the current weather in a given location",
|
print(f"chunk: {chunk}")
|
||||||
# "parameters": {
|
decision = False
|
||||||
# "type": "object",
|
if idx == 0: # ensure role assistant is set
|
||||||
# "properties": {
|
decision = validate_first_function_call_chunk_structure(chunk)
|
||||||
# "location": {
|
role = chunk["choices"][0]["delta"]["role"]
|
||||||
# "type": "string",
|
assert role == "assistant"
|
||||||
# "description": "The city and state, e.g. San Francisco, CA",
|
elif idx != 0: # second chunk
|
||||||
# },
|
try:
|
||||||
# "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
|
decision = validate_second_function_call_chunk_structure(data=chunk)
|
||||||
# },
|
except: # check if it's the last chunk (returns an empty delta {} )
|
||||||
# "required": ["location"],
|
decision = validate_final_function_call_chunk_structure(data=chunk)
|
||||||
# },
|
finished = True
|
||||||
# }
|
if "content" in chunk["choices"][0]["delta"]:
|
||||||
# ]
|
extracted_chunk = chunk["choices"][0]["delta"]["content"]
|
||||||
# try:
|
if decision == False:
|
||||||
# response = completion(
|
raise Exception("incorrect format")
|
||||||
# model="gpt-3.5-turbo", messages=messages, stream=True
|
return extracted_chunk, finished
|
||||||
# )
|
|
||||||
# # Add any assertions here to check the response
|
def test_openai_streaming_and_function_calling():
|
||||||
# print(response)
|
function1 = [
|
||||||
# for idx, chunk in enumerate(response):
|
{
|
||||||
# streaming_and_function_calling_format_tests(idx=idx, chunk=chunk)
|
"name": "get_current_weather",
|
||||||
# except Exception as e:
|
"description": "Get the current weather in a given location",
|
||||||
# pytest.fail(f"Error occurred: {e}")
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"location": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The city and state, e.g. San Francisco, CA",
|
||||||
|
},
|
||||||
|
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
|
||||||
|
},
|
||||||
|
"required": ["location"],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
]
|
||||||
|
messages=[{"role": "user", "content": "What is the weather like in Boston?"}]
|
||||||
|
try:
|
||||||
|
response = completion(
|
||||||
|
model="gpt-3.5-turbo", functions=function1, messages=messages, stream=True
|
||||||
|
)
|
||||||
|
# Add any assertions here to check the response
|
||||||
|
for idx, chunk in enumerate(response):
|
||||||
|
streaming_and_function_calling_format_tests(idx=idx, chunk=chunk)
|
||||||
|
except Exception as e:
|
||||||
|
pytest.fail(f"Error occurred: {e}")
|
||||||
|
raise e
|
||||||
|
|
||||||
|
test_openai_streaming_and_function_calling()
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[tool.poetry]
|
[tool.poetry]
|
||||||
name = "litellm"
|
name = "litellm"
|
||||||
version = "0.1.680"
|
version = "0.1.681"
|
||||||
description = "Library to easily interface with LLM API providers"
|
description = "Library to easily interface with LLM API providers"
|
||||||
authors = ["BerriAI"]
|
authors = ["BerriAI"]
|
||||||
license = "MIT License"
|
license = "MIT License"
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue