mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 02:34:29 +00:00
build: merge squashed commit
Squashed commit of the following: commit6678e15381
Author: Ishaan Jaff <ishaanjaffer0324@gmail.com> Date: Wed Feb 26 09:29:15 2025 -0800 test_prompt_caching commitbd86e0ac47
Author: Ishaan Jaff <ishaanjaffer0324@gmail.com> Date: Wed Feb 26 08:57:16 2025 -0800 test_prompt_caching commit2fc21ad51e
Author: Ishaan Jaff <ishaanjaffer0324@gmail.com> Date: Wed Feb 26 08:13:45 2025 -0800 test_aprompt_caching commitd94cff55ff
Author: Ishaan Jaff <ishaanjaffer0324@gmail.com> Date: Wed Feb 26 08:13:12 2025 -0800 test_prompt_caching commit49c5e7811e
Author: Ishaan Jaff <ishaanjaffer0324@gmail.com> Date: Wed Feb 26 07:43:53 2025 -0800 ui new build commitcb8d5e5917
Author: Ishaan Jaff <ishaanjaffer0324@gmail.com> Date: Wed Feb 26 07:38:56 2025 -0800 (UI) - Create Key flow for existing users (#8844) * working create user button * working create user for a key flow * allow searching users * working create user + key * use clear sections on create key * better search for users * fix create key * ui fix create key button - make it neater / cleaner * ui fix all keys table commit335ba30467
Author: Krrish Dholakia <krrishdholakia@gmail.com> Date: Wed Feb 26 08:53:17 2025 -0800 fix: fix file name commitb8c5b31a4e
Author: Krrish Dholakia <krrishdholakia@gmail.com> Date: Tue Feb 25 22:54:46 2025 -0800 fix: fix utils commitac6e503461
Author: Krrish Dholakia <krrishdholakia@gmail.com> Date: Mon Feb 24 10:43:31 2025 -0800 fix(main.py): fix openai message for assistant msg if role is missing - openai allows this Fixes https://github.com/BerriAI/litellm/issues/8661 commitde3989dbc5
Author: Krrish Dholakia <krrishdholakia@gmail.com> Date: Mon Feb 24 21:19:25 2025 -0800 fix(get_litellm_params.py): handle no-log being passed in via kwargs Fixes https://github.com/BerriAI/litellm/issues/8380
This commit is contained in:
parent
da1fd9b25f
commit
fcf4ea3608
8 changed files with 172 additions and 12 deletions
|
@ -5932,6 +5932,18 @@ def convert_to_dict(message: Union[BaseModel, dict]) -> dict:
|
|||
)
|
||||
|
||||
|
||||
def validate_and_fix_openai_messages(messages: List):
|
||||
"""
|
||||
Ensures all messages are valid OpenAI chat completion messages.
|
||||
|
||||
Handles missing role for assistant messages.
|
||||
"""
|
||||
for message in messages:
|
||||
if not message.get("role"):
|
||||
message["role"] = "assistant"
|
||||
return validate_chat_completion_messages(messages=messages)
|
||||
|
||||
|
||||
def validate_chat_completion_messages(messages: List[AllMessageValues]):
|
||||
"""
|
||||
Ensures all messages are valid OpenAI chat completion messages.
|
||||
|
@ -6282,11 +6294,18 @@ def get_end_user_id_for_cost_tracking(
|
|||
return None
|
||||
return end_user_id
|
||||
|
||||
def should_use_cohere_v1_client(api_base: Optional[str], present_version_params: List[str]):
|
||||
|
||||
def should_use_cohere_v1_client(
|
||||
api_base: Optional[str], present_version_params: List[str]
|
||||
):
|
||||
if not api_base:
|
||||
return False
|
||||
uses_v1_params = ("max_chunks_per_doc" in present_version_params) and ('max_tokens_per_doc' not in present_version_params)
|
||||
return api_base.endswith("/v1/rerank") or (uses_v1_params and not api_base.endswith("/v2/rerank"))
|
||||
uses_v1_params = ("max_chunks_per_doc" in present_version_params) and (
|
||||
"max_tokens_per_doc" not in present_version_params
|
||||
)
|
||||
return api_base.endswith("/v1/rerank") or (
|
||||
uses_v1_params and not api_base.endswith("/v2/rerank")
|
||||
)
|
||||
|
||||
|
||||
def is_prompt_caching_valid_prompt(
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue