diff --git a/tests/llm_translation/base_llm_unit_tests.py b/tests/llm_translation/base_llm_unit_tests.py index 19b382347d..ab42bc5fe9 100644 --- a/tests/llm_translation/base_llm_unit_tests.py +++ b/tests/llm_translation/base_llm_unit_tests.py @@ -1,4 +1,3 @@ -import asyncio import httpx import json import pytest @@ -56,6 +55,14 @@ class BaseLLMChatTest(ABC): Abstract base test class that enforces a common test across all test classes. """ + @property + def completion_function(self): + return litellm.completion + + @property + def async_completion_function(self): + return litellm.acompletion + @abstractmethod def get_base_completion_call_args(self) -> dict: """Must return the base completion call args""" @@ -71,7 +78,7 @@ class BaseLLMChatTest(ABC): } ] try: - response = litellm.completion( + response = self.completion_function( **base_completion_call_args, messages=messages, ) @@ -90,7 +97,7 @@ class BaseLLMChatTest(ABC): base_completion_call_args = self.get_base_completion_call_args() messages = [Message(content="Hello, how are you?", role="user")] - completion(**base_completion_call_args, messages=messages) + self.completion_function(**base_completion_call_args, messages=messages) @pytest.mark.parametrize("image_url", ["str", "dict"]) def test_pdf_handling(self, pdf_messages, image_url): @@ -116,7 +123,7 @@ class BaseLLMChatTest(ABC): if not supports_pdf_input(base_completion_call_args["model"], None): pytest.skip("Model does not support image input") - response = litellm.completion( + response = self.completion_function( **base_completion_call_args, messages=image_messages, ) @@ -128,7 +135,9 @@ class BaseLLMChatTest(ABC): messages = [ {"role": "user", "content": "Hello", "name": "test_name"}, ] - response = litellm.completion(**base_completion_call_args, messages=messages) + response = self.completion_function( + **base_completion_call_args, messages=messages + ) assert response is not None def test_multilingual_requests(self): @@ -138,7 +147,7 @@ class BaseLLMChatTest(ABC): Context: https://github.com/openai/openai-python/issues/1921 """ base_completion_call_args = self.get_base_completion_call_args() - response = litellm.completion( + response = self.completion_function( **base_completion_call_args, messages=[{"role": "user", "content": "你好世界!\ud83e, ö"}], ) @@ -171,7 +180,7 @@ class BaseLLMChatTest(ABC): }, ] - response = litellm.completion( + response = self.completion_function( **base_completion_call_args, messages=messages, response_format=response_format, @@ -200,7 +209,7 @@ class BaseLLMChatTest(ABC): pytest.skip("Model does not support response schema") try: - res = litellm.completion( + res = self.completion_function( **base_completion_call_args, messages=[ {"role": "system", "content": "You are a helpful assistant."}, @@ -240,7 +249,7 @@ class BaseLLMChatTest(ABC): ] try: - response = litellm.completion( + response = self.completion_function( **base_completion_call_args, messages=messages, response_format={"type": "json_object"}, @@ -282,7 +291,9 @@ class BaseLLMChatTest(ABC): """Test that tool calls with no arguments is translated correctly. Relevant issue: https://github.com/BerriAI/litellm/issues/6833""" pass - def test_image_url(self): + @pytest.mark.parametrize("detail", [None, "low", "high"]) + @pytest.mark.flaky(retries=4, delay=1) + def test_image_url(self, detail): litellm.set_verbose = True from litellm.utils import supports_vision @@ -301,14 +312,32 @@ class BaseLLMChatTest(ABC): { "type": "image_url", "image_url": { - "url": "https://i.pinimg.com/736x/b4/b1/be/b4b1becad04d03a9071db2817fc9fe77.jpg" + "url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" }, }, ], } ] - response = litellm.completion(**base_completion_call_args, messages=messages) + if detail is not None: + messages = [ + { + "role": "user", + "content": [ + {"type": "text", "text": "What's in this image?"}, + { + "type": "image_url", + "image_url": { + "url": "https://www.gstatic.com/webp/gallery/1.webp", + "detail": detail, + }, + }, + ], + } + ] + response = self.completion_function( + **base_completion_call_args, messages=messages + ) assert response is not None def test_prompt_caching(self): @@ -325,7 +354,7 @@ class BaseLLMChatTest(ABC): try: for _ in range(2): - response = litellm.completion( + response = self.completion_function( **base_completion_call_args, messages=[ # System Message @@ -407,7 +436,7 @@ class BaseLLMChatTest(ABC): litellm.model_cost = litellm.get_model_cost_map(url="") litellm.set_verbose = True - response = litellm.completion( + response = self.completion_function( **self.get_base_completion_call_args(), messages=[{"role": "user", "content": "Hello, how are you?"}], ) diff --git a/tests/llm_translation/test_router_llm_translation_tests.py b/tests/llm_translation/test_router_llm_translation_tests.py new file mode 100644 index 0000000000..f54e891516 --- /dev/null +++ b/tests/llm_translation/test_router_llm_translation_tests.py @@ -0,0 +1,46 @@ +""" +Uses litellm.Router, ensures router.completion and router.acompletion pass BaseLLMChatTest +""" + +import os +import sys + +sys.path.insert( + 0, os.path.abspath("../..") +) # Adds the parent directory to the system path + +from base_llm_unit_tests import BaseLLMChatTest +from litellm.router import Router +from litellm._logging import verbose_logger, verbose_router_logger +import logging + + +class TestRouterLLMTranslation(BaseLLMChatTest): + verbose_router_logger.setLevel(logging.DEBUG) + + litellm_router = Router( + model_list=[ + { + "model_name": "gpt-4o-mini", + "litellm_params": { + "model": "gpt-4o-mini", + "api_key": os.getenv("OPENAI_API_KEY"), + }, + }, + ] + ) + + @property + def completion_function(self): + return self.litellm_router.completion + + @property + def async_completion_function(self): + return self.litellm_router.acompletion + + def get_base_completion_call_args(self) -> dict: + return {"model": "gpt-4o-mini"} + + def test_tool_call_no_arguments(self, tool_call_no_arguments): + """Test that tool calls with no arguments is translated correctly. Relevant issue: https://github.com/BerriAI/litellm/issues/6833""" + pass