mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 03:34:10 +00:00
Litellm dev 04 05 2025 p2 (#9774)
* test: move test to just checking async * fix(transformation.py): handle function call with no schema * fix(utils.py): handle pydantic base model in message tool calls Fix https://github.com/BerriAI/litellm/issues/9321 * fix(vertex_and_google_ai_studio.py): handle tools=[] Fixes https://github.com/BerriAI/litellm/issues/9080 * test: remove max token restriction * test: fix basic test * fix(get_supported_openai_params.py): fix check * fix(converse_transformation.py): support fake streaming for meta.llama3-3-70b-instruct-v1:0 * fix: fix test * fix: parse out empty dictionary on dbrx streaming + tool calls * fix(handle-'strict'-param-when-calling-fireworks-ai): fireworks ai does not support 'strict' param * fix: fix ruff check ' * fix: handle no strict in function * fix: revert bedrock change - handle in separate PR
This commit is contained in:
parent
d8f47fc9e5
commit
fcf17d114f
10 changed files with 214 additions and 11 deletions
|
@ -695,6 +695,7 @@ class ChatCompletionToolParamFunctionChunk(TypedDict, total=False):
|
|||
name: Required[str]
|
||||
description: str
|
||||
parameters: dict
|
||||
strict: bool
|
||||
|
||||
|
||||
class OpenAIChatCompletionToolParam(TypedDict):
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue