mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
fix(stream_chunk_builder): adding support for tool calling in completion counting
This commit is contained in:
parent
40d9e8ab23
commit
4cdd930fa2
3 changed files with 109 additions and 10 deletions
|
@ -7,6 +7,7 @@ sys.path.insert(
|
|||
from litellm import completion, stream_chunk_builder
|
||||
import litellm
|
||||
import os, dotenv
|
||||
from openai import OpenAI
|
||||
import pytest
|
||||
dotenv.load_dotenv()
|
||||
|
||||
|
@ -30,20 +31,81 @@ function_schema = {
|
|||
},
|
||||
}
|
||||
|
||||
def test_stream_chunk_builder():
|
||||
|
||||
tools_schema = [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "get_current_weather",
|
||||
"description": "Get the current weather in a given location",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"location": {
|
||||
"type": "string",
|
||||
"description": "The city and state, e.g. San Francisco, CA"
|
||||
},
|
||||
"unit": {
|
||||
"type": "string",
|
||||
"enum": ["celsius", "fahrenheit"]
|
||||
}
|
||||
},
|
||||
"required": ["location"]
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
# def test_stream_chunk_builder_tools():
|
||||
# try:
|
||||
# litellm.set_verbose = False
|
||||
# response = client.chat.completions.create(
|
||||
# model="gpt-3.5-turbo",
|
||||
# messages=messages,
|
||||
# tools=tools_schema,
|
||||
# # stream=True,
|
||||
# # complete_response=True # runs stream_chunk_builder under-the-hood
|
||||
# )
|
||||
|
||||
# print(f"response: {response}")
|
||||
# print(f"response usage: {response.usage}")
|
||||
# except Exception as e:
|
||||
# pytest.fail(f"An exception occurred - {str(e)}")
|
||||
|
||||
# test_stream_chunk_builder_tools()
|
||||
|
||||
def test_stream_chunk_builder_litellm_function_call():
|
||||
try:
|
||||
litellm.set_verbose = False
|
||||
response = completion(
|
||||
response = litellm.completion(
|
||||
model="gpt-3.5-turbo",
|
||||
messages=messages,
|
||||
functions=[function_schema],
|
||||
stream=True,
|
||||
complete_response=True # runs stream_chunk_builder under-the-hood
|
||||
# stream=True,
|
||||
# complete_response=True # runs stream_chunk_builder under-the-hood
|
||||
)
|
||||
|
||||
print(f"response: {response}")
|
||||
print(f"response usage: {response['usage']}")
|
||||
except Exception as e:
|
||||
pytest.fail(f"An exception occurred - {str(e)}")
|
||||
|
||||
test_stream_chunk_builder()
|
||||
# test_stream_chunk_builder_litellm_function_call()
|
||||
|
||||
def test_stream_chunk_builder_litellm_tool_call():
|
||||
try:
|
||||
litellm.set_verbose = False
|
||||
response = litellm.completion(
|
||||
model="gpt-3.5-turbo",
|
||||
messages=messages,
|
||||
tools=tools_schema,
|
||||
stream=True,
|
||||
complete_response = True
|
||||
)
|
||||
|
||||
print(f"complete response: {response}")
|
||||
print(f"complete response usage: {response.usage}")
|
||||
|
||||
except Exception as e:
|
||||
pytest.fail(f"An exception occurred - {str(e)}")
|
||||
|
||||
test_stream_chunk_builder_litellm_tool_call()
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue