LiteLLM Minor Fixes & Improvements (01/08/2025) - p2 (#7643)

* fix(streaming_chunk_builder_utils.py): add test for groq tool calling + streaming + combine chunks

Addresses https://github.com/BerriAI/litellm/issues/7621

* fix(streaming_utils.py): fix modelresponseiterator for openai like chunk parser

ensures chunk parser uses the correct tool call id when translating the chunk

 Fixes https://github.com/BerriAI/litellm/issues/7621

* build(model_hub.tsx): display cost pricing on model hub

* build(model_hub.tsx): show cost per token pricing + complete model information

* fix(types/utils.py): fix usage object handling
This commit is contained in:
Krish Dholakia 2025-01-08 19:45:19 -08:00 committed by GitHub
parent 39ee4c6bb4
commit 1e3370f3cb
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
9 changed files with 206 additions and 21 deletions

View file

@ -103,7 +103,8 @@ class ChunkProcessor:
def get_combined_tool_content(
self, tool_call_chunks: List[Dict[str, Any]]
) -> List[ChatCompletionMessageToolCall]:
argument_list: List = []
argument_list: List[str] = []
delta = tool_call_chunks[0]["choices"][0]["delta"]
id = None
name = None
@ -171,6 +172,7 @@ class ChunkProcessor:
),
)
)
return tool_calls_list
def get_combined_function_call_content(