diff --git a/tests/local_testing/test_promptlayer_integration.py b/tests/local_testing/test_promptlayer_integration.py index d2e2268e61..8e32894731 100644 --- a/tests/local_testing/test_promptlayer_integration.py +++ b/tests/local_testing/test_promptlayer_integration.py @@ -1,15 +1,23 @@ -import sys -import os import io +import os +import sys sys.path.insert(0, os.path.abspath("../..")) -from litellm import completion -import litellm - -import pytest - +import json import time +from datetime import datetime +from unittest.mock import AsyncMock + +import httpx +import pytest +from openai.types.chat import ChatCompletionMessage +from openai.types.chat.chat_completion import ChatCompletion, Choice +from respx import MockRouter + +import litellm +from litellm import completion +from litellm.integrations.prompt_layer import PromptLayerLogger # def test_promptlayer_logging(): # try: @@ -114,3 +122,76 @@ def test_promptlayer_logging_with_metadata_tags(): # print(e) # test_chat_openai() + + +@pytest.mark.asyncio +@pytest.mark.respx +async def test_promptlayer_logging_with_mocked_request(respx_mock: MockRouter): + promptlayer_logger = PromptLayerLogger() + + mock_response = AsyncMock() + obj = ChatCompletion( + id="foo", + model="gpt-4", + object="chat.completion", + choices=[ + Choice( + finish_reason="stop", + index=0, + message=ChatCompletionMessage( + content="Hello world!", + role="assistant", + ), + ) + ], + created=int(datetime.now().timestamp()), + ) + litellm.set_verbose = True + + mock_request = respx_mock.post(url__regex=r".*/chat/completions.*").mock( + return_value=httpx.Response(200, json=obj.model_dump(mode="json")) + ) + + mock_promptlayer_response = respx_mock.post( + "https://api.promptlayer.com/log-request" + ).mock(return_value=httpx.Response(200, json={"id": "mock_promptlayer_id"})) + + response = completion( + model="gpt-4", + messages=[{"role": "user", "content": "Hello, can you provide a response?"}], + temperature=0.2, + max_tokens=20, + metadata={"model": "ai21", "pl_tags": ["env:dev"]}, + ) + + status_code = promptlayer_logger.log_event( + kwargs={ + "model": "gpt-4", + "messages": [ + {"role": "user", "content": "Hello, can you provide a response?"} + ], + }, + response_obj=response, + start_time=datetime.now(), + end_time=datetime.now(), + print_verbose=print, + ) + + respx_mock.assert_all_called() + + for call in mock_request.calls: + print(call) + print(call.request.content) + + json_body = json.loads(call.request.content) + + print(json_body) + + for call in mock_promptlayer_response.calls: + print(call) + print(call.request.content) + + json_body = json.loads(call.request.content) + print(json_body) + + assert status_code == {"id": "mock_promptlayer_id"}