mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
Merge branch 'main' into litellm_anthropic_api_streaming
This commit is contained in:
commit
bca71019ad
19 changed files with 1195 additions and 40 deletions
|
@ -5101,23 +5101,27 @@ def stream_chunk_builder(
|
|||
combined_content = ""
|
||||
combined_arguments = ""
|
||||
|
||||
if (
|
||||
"tool_calls" in chunks[0]["choices"][0]["delta"]
|
||||
and chunks[0]["choices"][0]["delta"]["tool_calls"] is not None
|
||||
):
|
||||
tool_call_chunks = [
|
||||
chunk
|
||||
for chunk in chunks
|
||||
if "tool_calls" in chunk["choices"][0]["delta"]
|
||||
and chunk["choices"][0]["delta"]["tool_calls"] is not None
|
||||
]
|
||||
|
||||
if len(tool_call_chunks) > 0:
|
||||
argument_list = []
|
||||
delta = chunks[0]["choices"][0]["delta"]
|
||||
delta = tool_call_chunks[0]["choices"][0]["delta"]
|
||||
message = response["choices"][0]["message"]
|
||||
message["tool_calls"] = []
|
||||
id = None
|
||||
name = None
|
||||
type = None
|
||||
tool_calls_list = []
|
||||
prev_index = 0
|
||||
prev_index = None
|
||||
prev_id = None
|
||||
curr_id = None
|
||||
curr_index = 0
|
||||
for chunk in chunks:
|
||||
for chunk in tool_call_chunks:
|
||||
choices = chunk["choices"]
|
||||
for choice in choices:
|
||||
delta = choice.get("delta", {})
|
||||
|
@ -5139,6 +5143,8 @@ def stream_chunk_builder(
|
|||
name = tool_calls[0].function.name
|
||||
if tool_calls[0].type:
|
||||
type = tool_calls[0].type
|
||||
if prev_index is None:
|
||||
prev_index = curr_index
|
||||
if curr_index != prev_index: # new tool call
|
||||
combined_arguments = "".join(argument_list)
|
||||
tool_calls_list.append(
|
||||
|
@ -5157,18 +5163,24 @@ def stream_chunk_builder(
|
|||
tool_calls_list.append(
|
||||
{
|
||||
"id": id,
|
||||
"index": curr_index,
|
||||
"function": {"arguments": combined_arguments, "name": name},
|
||||
"type": type,
|
||||
}
|
||||
)
|
||||
response["choices"][0]["message"]["content"] = None
|
||||
response["choices"][0]["message"]["tool_calls"] = tool_calls_list
|
||||
elif (
|
||||
"function_call" in chunks[0]["choices"][0]["delta"]
|
||||
and chunks[0]["choices"][0]["delta"]["function_call"] is not None
|
||||
):
|
||||
|
||||
function_call_chunks = [
|
||||
chunk
|
||||
for chunk in chunks
|
||||
if "function_call" in chunk["choices"][0]["delta"]
|
||||
and chunk["choices"][0]["delta"]["function_call"] is not None
|
||||
]
|
||||
|
||||
if len(function_call_chunks) > 0:
|
||||
argument_list = []
|
||||
delta = chunks[0]["choices"][0]["delta"]
|
||||
delta = function_call_chunks[0]["choices"][0]["delta"]
|
||||
function_call = delta.get("function_call", "")
|
||||
function_call_name = function_call.name
|
||||
|
||||
|
@ -5176,7 +5188,7 @@ def stream_chunk_builder(
|
|||
message["function_call"] = {}
|
||||
message["function_call"]["name"] = function_call_name
|
||||
|
||||
for chunk in chunks:
|
||||
for chunk in function_call_chunks:
|
||||
choices = chunk["choices"]
|
||||
for choice in choices:
|
||||
delta = choice.get("delta", {})
|
||||
|
@ -5193,7 +5205,15 @@ def stream_chunk_builder(
|
|||
response["choices"][0]["message"]["function_call"][
|
||||
"arguments"
|
||||
] = combined_arguments
|
||||
else:
|
||||
|
||||
content_chunks = [
|
||||
chunk
|
||||
for chunk in chunks
|
||||
if "content" in chunk["choices"][0]["delta"]
|
||||
and chunk["choices"][0]["delta"]["content"] is not None
|
||||
]
|
||||
|
||||
if len(content_chunks) > 0:
|
||||
for chunk in chunks:
|
||||
choices = chunk["choices"]
|
||||
for choice in choices:
|
||||
|
@ -5209,12 +5229,12 @@ def stream_chunk_builder(
|
|||
# Update the "content" field within the response dictionary
|
||||
response["choices"][0]["message"]["content"] = combined_content
|
||||
|
||||
completion_output = ""
|
||||
if len(combined_content) > 0:
|
||||
completion_output = combined_content
|
||||
elif len(combined_arguments) > 0:
|
||||
completion_output = combined_arguments
|
||||
else:
|
||||
completion_output = ""
|
||||
completion_output += combined_content
|
||||
if len(combined_arguments) > 0:
|
||||
completion_output += combined_arguments
|
||||
|
||||
# # Update usage information if needed
|
||||
prompt_tokens = 0
|
||||
completion_tokens = 0
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue