mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
Litellm add router to base llm testing (#7202)
* code qa add litellm router to base llm testing * test_image_url * fix img url * fix add router to base llm class * fix base llm testing * add test scenario * fix test_json_response_format * fixes base llm testing * fix base llm testing * fix test image url
This commit is contained in:
parent
1b6b47b16d
commit
925d33aa9d
2 changed files with 89 additions and 14 deletions
|
@ -1,4 +1,3 @@
|
|||
import asyncio
|
||||
import httpx
|
||||
import json
|
||||
import pytest
|
||||
|
@ -56,6 +55,14 @@ class BaseLLMChatTest(ABC):
|
|||
Abstract base test class that enforces a common test across all test classes.
|
||||
"""
|
||||
|
||||
@property
|
||||
def completion_function(self):
|
||||
return litellm.completion
|
||||
|
||||
@property
|
||||
def async_completion_function(self):
|
||||
return litellm.acompletion
|
||||
|
||||
@abstractmethod
|
||||
def get_base_completion_call_args(self) -> dict:
|
||||
"""Must return the base completion call args"""
|
||||
|
@ -71,7 +78,7 @@ class BaseLLMChatTest(ABC):
|
|||
}
|
||||
]
|
||||
try:
|
||||
response = litellm.completion(
|
||||
response = self.completion_function(
|
||||
**base_completion_call_args,
|
||||
messages=messages,
|
||||
)
|
||||
|
@ -90,7 +97,7 @@ class BaseLLMChatTest(ABC):
|
|||
base_completion_call_args = self.get_base_completion_call_args()
|
||||
messages = [Message(content="Hello, how are you?", role="user")]
|
||||
|
||||
completion(**base_completion_call_args, messages=messages)
|
||||
self.completion_function(**base_completion_call_args, messages=messages)
|
||||
|
||||
@pytest.mark.parametrize("image_url", ["str", "dict"])
|
||||
def test_pdf_handling(self, pdf_messages, image_url):
|
||||
|
@ -116,7 +123,7 @@ class BaseLLMChatTest(ABC):
|
|||
if not supports_pdf_input(base_completion_call_args["model"], None):
|
||||
pytest.skip("Model does not support image input")
|
||||
|
||||
response = litellm.completion(
|
||||
response = self.completion_function(
|
||||
**base_completion_call_args,
|
||||
messages=image_messages,
|
||||
)
|
||||
|
@ -128,7 +135,9 @@ class BaseLLMChatTest(ABC):
|
|||
messages = [
|
||||
{"role": "user", "content": "Hello", "name": "test_name"},
|
||||
]
|
||||
response = litellm.completion(**base_completion_call_args, messages=messages)
|
||||
response = self.completion_function(
|
||||
**base_completion_call_args, messages=messages
|
||||
)
|
||||
assert response is not None
|
||||
|
||||
def test_multilingual_requests(self):
|
||||
|
@ -138,7 +147,7 @@ class BaseLLMChatTest(ABC):
|
|||
Context: https://github.com/openai/openai-python/issues/1921
|
||||
"""
|
||||
base_completion_call_args = self.get_base_completion_call_args()
|
||||
response = litellm.completion(
|
||||
response = self.completion_function(
|
||||
**base_completion_call_args,
|
||||
messages=[{"role": "user", "content": "你好世界!\ud83e, ö"}],
|
||||
)
|
||||
|
@ -171,7 +180,7 @@ class BaseLLMChatTest(ABC):
|
|||
},
|
||||
]
|
||||
|
||||
response = litellm.completion(
|
||||
response = self.completion_function(
|
||||
**base_completion_call_args,
|
||||
messages=messages,
|
||||
response_format=response_format,
|
||||
|
@ -200,7 +209,7 @@ class BaseLLMChatTest(ABC):
|
|||
pytest.skip("Model does not support response schema")
|
||||
|
||||
try:
|
||||
res = litellm.completion(
|
||||
res = self.completion_function(
|
||||
**base_completion_call_args,
|
||||
messages=[
|
||||
{"role": "system", "content": "You are a helpful assistant."},
|
||||
|
@ -240,7 +249,7 @@ class BaseLLMChatTest(ABC):
|
|||
]
|
||||
|
||||
try:
|
||||
response = litellm.completion(
|
||||
response = self.completion_function(
|
||||
**base_completion_call_args,
|
||||
messages=messages,
|
||||
response_format={"type": "json_object"},
|
||||
|
@ -282,7 +291,9 @@ class BaseLLMChatTest(ABC):
|
|||
"""Test that tool calls with no arguments is translated correctly. Relevant issue: https://github.com/BerriAI/litellm/issues/6833"""
|
||||
pass
|
||||
|
||||
def test_image_url(self):
|
||||
@pytest.mark.parametrize("detail", [None, "low", "high"])
|
||||
@pytest.mark.flaky(retries=4, delay=1)
|
||||
def test_image_url(self, detail):
|
||||
litellm.set_verbose = True
|
||||
from litellm.utils import supports_vision
|
||||
|
||||
|
@ -301,14 +312,32 @@ class BaseLLMChatTest(ABC):
|
|||
{
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": "https://i.pinimg.com/736x/b4/b1/be/b4b1becad04d03a9071db2817fc9fe77.jpg"
|
||||
"url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
|
||||
},
|
||||
},
|
||||
],
|
||||
}
|
||||
]
|
||||
|
||||
response = litellm.completion(**base_completion_call_args, messages=messages)
|
||||
if detail is not None:
|
||||
messages = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "text", "text": "What's in this image?"},
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": "https://www.gstatic.com/webp/gallery/1.webp",
|
||||
"detail": detail,
|
||||
},
|
||||
},
|
||||
],
|
||||
}
|
||||
]
|
||||
response = self.completion_function(
|
||||
**base_completion_call_args, messages=messages
|
||||
)
|
||||
assert response is not None
|
||||
|
||||
def test_prompt_caching(self):
|
||||
|
@ -325,7 +354,7 @@ class BaseLLMChatTest(ABC):
|
|||
|
||||
try:
|
||||
for _ in range(2):
|
||||
response = litellm.completion(
|
||||
response = self.completion_function(
|
||||
**base_completion_call_args,
|
||||
messages=[
|
||||
# System Message
|
||||
|
@ -407,7 +436,7 @@ class BaseLLMChatTest(ABC):
|
|||
litellm.model_cost = litellm.get_model_cost_map(url="")
|
||||
|
||||
litellm.set_verbose = True
|
||||
response = litellm.completion(
|
||||
response = self.completion_function(
|
||||
**self.get_base_completion_call_args(),
|
||||
messages=[{"role": "user", "content": "Hello, how are you?"}],
|
||||
)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue