diff --git a/dist/litellm-0.1.687-py3-none-any.whl b/dist/litellm-0.1.687-py3-none-any.whl deleted file mode 100644 index 706de4e3d..000000000 Binary files a/dist/litellm-0.1.687-py3-none-any.whl and /dev/null differ diff --git a/dist/litellm-0.1.687.tar.gz b/dist/litellm-0.1.687.tar.gz deleted file mode 100644 index 18975833d..000000000 Binary files a/dist/litellm-0.1.687.tar.gz and /dev/null differ diff --git a/litellm/main.py b/litellm/main.py index e8dfc7b19..c77f56d8f 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -1158,6 +1158,10 @@ def stream_chunk_builder(chunks: list): # Extract the "content" strings from the nested dictionaries within "choices" content_list = [] + if "function_call" in chunks[0]["choices"][0]["delta"]: + function_call_name = chunks[0]["choices"][0]["delta"]["function_call"]["name"] + print(function_call_name) + for chunk in chunks: choices = chunk["choices"] for choice in choices: diff --git a/litellm/tests/test_response.json b/litellm/tests/test_response.json new file mode 100644 index 000000000..6e3294f39 --- /dev/null +++ b/litellm/tests/test_response.json @@ -0,0 +1,249 @@ + +OpenAI non stream response: +{ + "id": "chatcmpl-7zoXylnQH7IVUQXpbE5AHZWxQhjCW", + "object": "chat.completion", + "created": 1694966666, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": null, + "function_call": { + "name": "get_current_weather", + "arguments": "{\n\"location\": \"Boston\"\n}" + } + }, + "finish_reason": "function_call" + } + ], + "usage": { + "prompt_tokens": 82, + "completion_tokens": 15, + "total_tokens": 97 + } +} + +OpenAI stream response: +{ + "id": "chatcmpl-7zoaRRfxJErRzGNgfrqXrTHRexvG7", + "object": "chat.completion.chunk", + "created": 1694966819, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "delta": { + "role": "assistant", + "content": null, + "function_call": { + "name": "get_current_weather", + "arguments": "" + } + }, + "finish_reason": null + } + ] +} +{ + "id": "chatcmpl-7zoaRRfxJErRzGNgfrqXrTHRexvG7", + "object": "chat.completion.chunk", + "created": 1694966819, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "delta": { + "function_call": { + "arguments": "{\n" + } + }, + "finish_reason": null + } + ] +} +{ + "id": "chatcmpl-7zoaRRfxJErRzGNgfrqXrTHRexvG7", + "object": "chat.completion.chunk", + "created": 1694966819, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "delta": { + "function_call": { + "arguments": " " + } + }, + "finish_reason": null + } + ] +} +{ + "id": "chatcmpl-7zoaRRfxJErRzGNgfrqXrTHRexvG7", + "object": "chat.completion.chunk", + "created": 1694966819, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "delta": { + "function_call": { + "arguments": " \"" + } + }, + "finish_reason": null + } + ] +} +{ + "id": "chatcmpl-7zoaRRfxJErRzGNgfrqXrTHRexvG7", + "object": "chat.completion.chunk", + "created": 1694966819, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "delta": { + "function_call": { + "arguments": "location" + } + }, + "finish_reason": null + } + ] +} +{ + "id": "chatcmpl-7zoaRRfxJErRzGNgfrqXrTHRexvG7", + "object": "chat.completion.chunk", + "created": 1694966819, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "delta": { + "function_call": { + "arguments": "\":" + } + }, + "finish_reason": null + } + ] +} +{ + "id": "chatcmpl-7zoaRRfxJErRzGNgfrqXrTHRexvG7", + "object": "chat.completion.chunk", + "created": 1694966819, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "delta": { + "function_call": { + "arguments": " \"" + } + }, + "finish_reason": null + } + ] +} +{ + "id": "chatcmpl-7zoaRRfxJErRzGNgfrqXrTHRexvG7", + "object": "chat.completion.chunk", + "created": 1694966819, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "delta": { + "function_call": { + "arguments": "Boston" + } + }, + "finish_reason": null + } + ] +} +{ + "id": "chatcmpl-7zoaRRfxJErRzGNgfrqXrTHRexvG7", + "object": "chat.completion.chunk", + "created": 1694966819, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "delta": { + "function_call": { + "arguments": "," + } + }, + "finish_reason": null + } + ] +} +{ + "id": "chatcmpl-7zoaRRfxJErRzGNgfrqXrTHRexvG7", + "object": "chat.completion.chunk", + "created": 1694966819, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "delta": { + "function_call": { + "arguments": " MA" + } + }, + "finish_reason": null + } + ] +} +{ + "id": "chatcmpl-7zoaRRfxJErRzGNgfrqXrTHRexvG7", + "object": "chat.completion.chunk", + "created": 1694966819, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "delta": { + "function_call": { + "arguments": "\"\n" + } + }, + "finish_reason": null + } + ] +} +{ + "id": "chatcmpl-7zoaRRfxJErRzGNgfrqXrTHRexvG7", + "object": "chat.completion.chunk", + "created": 1694966819, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "delta": { + "function_call": { + "arguments": "}" + } + }, + "finish_reason": null + } + ] +} +{ + "id": "chatcmpl-7zoaRRfxJErRzGNgfrqXrTHRexvG7", + "object": "chat.completion.chunk", + "created": 1694966819, + "model": "gpt-3.5-turbo-0613", + "choices": [ + { + "index": 0, + "delta": {}, + "finish_reason": "function_call" + } + ] +} \ No newline at end of file diff --git a/litellm/tests/test_stream_chunk_builder.py b/litellm/tests/test_stream_chunk_builder.py index e363f648a..23dcf57a8 100644 --- a/litellm/tests/test_stream_chunk_builder.py +++ b/litellm/tests/test_stream_chunk_builder.py @@ -2,16 +2,33 @@ from litellm import completion, stream_chunk_builder import litellm import os -user_message = "Write a short poem about the sky" +user_message = "What is the current weather in Boston?" messages = [{"content": user_message, "role": "user"}] +function_schema = { + "name": "get_weather", + "description": + "gets the current weather", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": + "The city and state, e.g. San Francisco, CA" + }, + }, + "required": ["location"] + }, +} + def test_stream_chunk_builder(): litellm.api_key = os.environ["OPENAI_API_KEY"] response = completion( model="gpt-3.5-turbo", messages=messages, + functions=[function_schema], stream=True, - max_tokens=10, ) chunks = []