mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
build: merge squashed commit
Squashed commit of the following: commit6678e15381
Author: Ishaan Jaff <ishaanjaffer0324@gmail.com> Date: Wed Feb 26 09:29:15 2025 -0800 test_prompt_caching commitbd86e0ac47
Author: Ishaan Jaff <ishaanjaffer0324@gmail.com> Date: Wed Feb 26 08:57:16 2025 -0800 test_prompt_caching commit2fc21ad51e
Author: Ishaan Jaff <ishaanjaffer0324@gmail.com> Date: Wed Feb 26 08:13:45 2025 -0800 test_aprompt_caching commitd94cff55ff
Author: Ishaan Jaff <ishaanjaffer0324@gmail.com> Date: Wed Feb 26 08:13:12 2025 -0800 test_prompt_caching commit49c5e7811e
Author: Ishaan Jaff <ishaanjaffer0324@gmail.com> Date: Wed Feb 26 07:43:53 2025 -0800 ui new build commitcb8d5e5917
Author: Ishaan Jaff <ishaanjaffer0324@gmail.com> Date: Wed Feb 26 07:38:56 2025 -0800 (UI) - Create Key flow for existing users (#8844) * working create user button * working create user for a key flow * allow searching users * working create user + key * use clear sections on create key * better search for users * fix create key * ui fix create key button - make it neater / cleaner * ui fix all keys table commit335ba30467
Author: Krrish Dholakia <krrishdholakia@gmail.com> Date: Wed Feb 26 08:53:17 2025 -0800 fix: fix file name commitb8c5b31a4e
Author: Krrish Dholakia <krrishdholakia@gmail.com> Date: Tue Feb 25 22:54:46 2025 -0800 fix: fix utils commitac6e503461
Author: Krrish Dholakia <krrishdholakia@gmail.com> Date: Mon Feb 24 10:43:31 2025 -0800 fix(main.py): fix openai message for assistant msg if role is missing - openai allows this Fixes https://github.com/BerriAI/litellm/issues/8661 commitde3989dbc5
Author: Krrish Dholakia <krrishdholakia@gmail.com> Date: Mon Feb 24 21:19:25 2025 -0800 fix(get_litellm_params.py): handle no-log being passed in via kwargs Fixes https://github.com/BerriAI/litellm/issues/8380
This commit is contained in:
parent
da1fd9b25f
commit
fcf4ea3608
8 changed files with 172 additions and 12 deletions
119
tests/litellm/test_main.py
Normal file
119
tests/litellm/test_main.py
Normal file
|
@ -0,0 +1,119 @@
|
|||
import json
|
||||
import os
|
||||
import sys
|
||||
|
||||
import pytest
|
||||
from fastapi.testclient import TestClient
|
||||
|
||||
sys.path.insert(
|
||||
0, os.path.abspath("../..")
|
||||
) # Adds the parent directory to the system path
|
||||
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import litellm
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def openai_api_response():
|
||||
mock_response_data = {
|
||||
"id": "chatcmpl-B0W3vmiM78Xkgx7kI7dr7PC949DMS",
|
||||
"choices": [
|
||||
{
|
||||
"finish_reason": "stop",
|
||||
"index": 0,
|
||||
"logprobs": None,
|
||||
"message": {
|
||||
"content": "",
|
||||
"refusal": None,
|
||||
"role": "assistant",
|
||||
"audio": None,
|
||||
"function_call": None,
|
||||
"tool_calls": None,
|
||||
},
|
||||
}
|
||||
],
|
||||
"created": 1739462947,
|
||||
"model": "gpt-4o-mini-2024-07-18",
|
||||
"object": "chat.completion",
|
||||
"service_tier": "default",
|
||||
"system_fingerprint": "fp_bd83329f63",
|
||||
"usage": {
|
||||
"completion_tokens": 1,
|
||||
"prompt_tokens": 121,
|
||||
"total_tokens": 122,
|
||||
"completion_tokens_details": {
|
||||
"accepted_prediction_tokens": 0,
|
||||
"audio_tokens": 0,
|
||||
"reasoning_tokens": 0,
|
||||
"rejected_prediction_tokens": 0,
|
||||
},
|
||||
"prompt_tokens_details": {"audio_tokens": 0, "cached_tokens": 0},
|
||||
},
|
||||
}
|
||||
|
||||
return mock_response_data
|
||||
|
||||
|
||||
def test_completion_missing_role(openai_api_response):
|
||||
from openai import OpenAI
|
||||
|
||||
from litellm.types.utils import ModelResponse
|
||||
|
||||
client = OpenAI(api_key="test_api_key")
|
||||
|
||||
mock_raw_response = MagicMock()
|
||||
mock_raw_response.headers = {
|
||||
"x-request-id": "123",
|
||||
"openai-organization": "org-123",
|
||||
"x-ratelimit-limit-requests": "100",
|
||||
"x-ratelimit-remaining-requests": "99",
|
||||
}
|
||||
mock_raw_response.parse.return_value = ModelResponse(**openai_api_response)
|
||||
|
||||
print(f"openai_api_response: {openai_api_response}")
|
||||
|
||||
with patch.object(
|
||||
client.chat.completions.with_raw_response, "create", mock_raw_response
|
||||
) as mock_create:
|
||||
litellm.completion(
|
||||
model="gpt-4o-mini",
|
||||
messages=[
|
||||
{"role": "user", "content": "Hey"},
|
||||
{
|
||||
"content": "",
|
||||
"tool_calls": [
|
||||
{
|
||||
"id": "call_m0vFJjQmTH1McvaHBPR2YFwY",
|
||||
"function": {
|
||||
"arguments": '{"input": "dksjsdkjdhskdjshdskhjkhlk"}',
|
||||
"name": "tool_name",
|
||||
},
|
||||
"type": "function",
|
||||
"index": 0,
|
||||
},
|
||||
{
|
||||
"id": "call_Vw6RaqV2n5aaANXEdp5pYxo2",
|
||||
"function": {
|
||||
"arguments": '{"input": "jkljlkjlkjlkjlk"}',
|
||||
"name": "tool_name",
|
||||
},
|
||||
"type": "function",
|
||||
"index": 1,
|
||||
},
|
||||
{
|
||||
"id": "call_hBIKwldUEGlNh6NlSXil62K4",
|
||||
"function": {
|
||||
"arguments": '{"input": "jkjlkjlkjlkj;lj"}',
|
||||
"name": "tool_name",
|
||||
},
|
||||
"type": "function",
|
||||
"index": 2,
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
client=client,
|
||||
)
|
||||
|
||||
mock_create.assert_called_once()
|
Loading…
Add table
Add a link
Reference in a new issue