mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
* fix #9783: Retain schema field ordering for google gemini and vertex (#9828) * test: update test * refactor(groq.py): initial commit migrating groq to base_llm_http_handler * fix(streaming_chunk_builder_utils.py): fix how tool content is combined Fixes https://github.com/BerriAI/litellm/issues/10034 * fix(vertex_ai/common_utils.py): prevent infinite loop in helper function * fix(groq/chat/transformation.py): handle groq streaming errors correctly * fix(groq/chat/transformation.py): handle max_retries --------- Co-authored-by: Adrian Lyjak <adrian@chatmeter.com>
This commit is contained in:
parent
1b9b745cae
commit
fdfa1108a6
12 changed files with 493 additions and 201 deletions
|
@ -926,12 +926,17 @@ def execute_completion(opts: dict):
|
|||
response_gen = litellm.completion(**opts)
|
||||
for i, part in enumerate(response_gen):
|
||||
partial_streaming_chunks.append(part)
|
||||
print("\n\n")
|
||||
print(f"partial_streaming_chunks: {partial_streaming_chunks}")
|
||||
print("\n\n")
|
||||
assembly = litellm.stream_chunk_builder(partial_streaming_chunks)
|
||||
print(assembly.choices[0].message.tool_calls)
|
||||
print(f"assembly.choices[0].message.tool_calls: {assembly.choices[0].message.tool_calls}")
|
||||
assert len(assembly.choices[0].message.tool_calls) == 3, (
|
||||
assembly.choices[0].message.tool_calls[0].function.arguments[0]
|
||||
)
|
||||
print(assembly.choices[0].message.tool_calls)
|
||||
for tool_call in assembly.choices[0].message.tool_calls:
|
||||
json.loads(tool_call.function.arguments) # assert valid json - https://github.com/BerriAI/litellm/issues/10034
|
||||
|
||||
|
||||
def test_grok_bug(load_env):
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue