Litellm add router to base llm testing (#7202)

* code qa add litellm router to base llm testing

* test_image_url

* fix img url

* fix add router to base llm class

* fix base llm testing

* add test scenario

* fix test_json_response_format

* fixes base llm testing

* fix base llm testing

* fix test image url
This commit is contained in:
Ishaan Jaff 2024-12-13 19:16:28 -08:00 committed by GitHub
parent 1b6b47b16d
commit 925d33aa9d
2 changed files with 89 additions and 14 deletions

View file

@ -1,4 +1,3 @@
import asyncio
import httpx import httpx
import json import json
import pytest import pytest
@ -56,6 +55,14 @@ class BaseLLMChatTest(ABC):
Abstract base test class that enforces a common test across all test classes. Abstract base test class that enforces a common test across all test classes.
""" """
@property
def completion_function(self):
return litellm.completion
@property
def async_completion_function(self):
return litellm.acompletion
@abstractmethod @abstractmethod
def get_base_completion_call_args(self) -> dict: def get_base_completion_call_args(self) -> dict:
"""Must return the base completion call args""" """Must return the base completion call args"""
@ -71,7 +78,7 @@ class BaseLLMChatTest(ABC):
} }
] ]
try: try:
response = litellm.completion( response = self.completion_function(
**base_completion_call_args, **base_completion_call_args,
messages=messages, messages=messages,
) )
@ -90,7 +97,7 @@ class BaseLLMChatTest(ABC):
base_completion_call_args = self.get_base_completion_call_args() base_completion_call_args = self.get_base_completion_call_args()
messages = [Message(content="Hello, how are you?", role="user")] messages = [Message(content="Hello, how are you?", role="user")]
completion(**base_completion_call_args, messages=messages) self.completion_function(**base_completion_call_args, messages=messages)
@pytest.mark.parametrize("image_url", ["str", "dict"]) @pytest.mark.parametrize("image_url", ["str", "dict"])
def test_pdf_handling(self, pdf_messages, image_url): def test_pdf_handling(self, pdf_messages, image_url):
@ -116,7 +123,7 @@ class BaseLLMChatTest(ABC):
if not supports_pdf_input(base_completion_call_args["model"], None): if not supports_pdf_input(base_completion_call_args["model"], None):
pytest.skip("Model does not support image input") pytest.skip("Model does not support image input")
response = litellm.completion( response = self.completion_function(
**base_completion_call_args, **base_completion_call_args,
messages=image_messages, messages=image_messages,
) )
@ -128,7 +135,9 @@ class BaseLLMChatTest(ABC):
messages = [ messages = [
{"role": "user", "content": "Hello", "name": "test_name"}, {"role": "user", "content": "Hello", "name": "test_name"},
] ]
response = litellm.completion(**base_completion_call_args, messages=messages) response = self.completion_function(
**base_completion_call_args, messages=messages
)
assert response is not None assert response is not None
def test_multilingual_requests(self): def test_multilingual_requests(self):
@ -138,7 +147,7 @@ class BaseLLMChatTest(ABC):
Context: https://github.com/openai/openai-python/issues/1921 Context: https://github.com/openai/openai-python/issues/1921
""" """
base_completion_call_args = self.get_base_completion_call_args() base_completion_call_args = self.get_base_completion_call_args()
response = litellm.completion( response = self.completion_function(
**base_completion_call_args, **base_completion_call_args,
messages=[{"role": "user", "content": "你好世界!\ud83e, ö"}], messages=[{"role": "user", "content": "你好世界!\ud83e, ö"}],
) )
@ -171,7 +180,7 @@ class BaseLLMChatTest(ABC):
}, },
] ]
response = litellm.completion( response = self.completion_function(
**base_completion_call_args, **base_completion_call_args,
messages=messages, messages=messages,
response_format=response_format, response_format=response_format,
@ -200,7 +209,7 @@ class BaseLLMChatTest(ABC):
pytest.skip("Model does not support response schema") pytest.skip("Model does not support response schema")
try: try:
res = litellm.completion( res = self.completion_function(
**base_completion_call_args, **base_completion_call_args,
messages=[ messages=[
{"role": "system", "content": "You are a helpful assistant."}, {"role": "system", "content": "You are a helpful assistant."},
@ -240,7 +249,7 @@ class BaseLLMChatTest(ABC):
] ]
try: try:
response = litellm.completion( response = self.completion_function(
**base_completion_call_args, **base_completion_call_args,
messages=messages, messages=messages,
response_format={"type": "json_object"}, response_format={"type": "json_object"},
@ -282,7 +291,9 @@ class BaseLLMChatTest(ABC):
"""Test that tool calls with no arguments is translated correctly. Relevant issue: https://github.com/BerriAI/litellm/issues/6833""" """Test that tool calls with no arguments is translated correctly. Relevant issue: https://github.com/BerriAI/litellm/issues/6833"""
pass pass
def test_image_url(self): @pytest.mark.parametrize("detail", [None, "low", "high"])
@pytest.mark.flaky(retries=4, delay=1)
def test_image_url(self, detail):
litellm.set_verbose = True litellm.set_verbose = True
from litellm.utils import supports_vision from litellm.utils import supports_vision
@ -301,14 +312,32 @@ class BaseLLMChatTest(ABC):
{ {
"type": "image_url", "type": "image_url",
"image_url": { "image_url": {
"url": "https://i.pinimg.com/736x/b4/b1/be/b4b1becad04d03a9071db2817fc9fe77.jpg" "url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
}, },
}, },
], ],
} }
] ]
response = litellm.completion(**base_completion_call_args, messages=messages) if detail is not None:
messages = [
{
"role": "user",
"content": [
{"type": "text", "text": "What's in this image?"},
{
"type": "image_url",
"image_url": {
"url": "https://www.gstatic.com/webp/gallery/1.webp",
"detail": detail,
},
},
],
}
]
response = self.completion_function(
**base_completion_call_args, messages=messages
)
assert response is not None assert response is not None
def test_prompt_caching(self): def test_prompt_caching(self):
@ -325,7 +354,7 @@ class BaseLLMChatTest(ABC):
try: try:
for _ in range(2): for _ in range(2):
response = litellm.completion( response = self.completion_function(
**base_completion_call_args, **base_completion_call_args,
messages=[ messages=[
# System Message # System Message
@ -407,7 +436,7 @@ class BaseLLMChatTest(ABC):
litellm.model_cost = litellm.get_model_cost_map(url="") litellm.model_cost = litellm.get_model_cost_map(url="")
litellm.set_verbose = True litellm.set_verbose = True
response = litellm.completion( response = self.completion_function(
**self.get_base_completion_call_args(), **self.get_base_completion_call_args(),
messages=[{"role": "user", "content": "Hello, how are you?"}], messages=[{"role": "user", "content": "Hello, how are you?"}],
) )

View file

@ -0,0 +1,46 @@
"""
Uses litellm.Router, ensures router.completion and router.acompletion pass BaseLLMChatTest
"""
import os
import sys
sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
from base_llm_unit_tests import BaseLLMChatTest
from litellm.router import Router
from litellm._logging import verbose_logger, verbose_router_logger
import logging
class TestRouterLLMTranslation(BaseLLMChatTest):
verbose_router_logger.setLevel(logging.DEBUG)
litellm_router = Router(
model_list=[
{
"model_name": "gpt-4o-mini",
"litellm_params": {
"model": "gpt-4o-mini",
"api_key": os.getenv("OPENAI_API_KEY"),
},
},
]
)
@property
def completion_function(self):
return self.litellm_router.completion
@property
def async_completion_function(self):
return self.litellm_router.acompletion
def get_base_completion_call_args(self) -> dict:
return {"model": "gpt-4o-mini"}
def test_tool_call_no_arguments(self, tool_call_no_arguments):
"""Test that tool calls with no arguments is translated correctly. Relevant issue: https://github.com/BerriAI/litellm/issues/6833"""
pass