mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-24 18:24:20 +00:00
Update promptlayer integration test based on review feedback
This commit is contained in:
parent
ec26cec8f6
commit
30f44afe0b
1 changed files with 88 additions and 7 deletions
|
@ -1,15 +1,23 @@
|
|||
import sys
|
||||
import os
|
||||
import io
|
||||
import os
|
||||
import sys
|
||||
|
||||
sys.path.insert(0, os.path.abspath("../.."))
|
||||
|
||||
from litellm import completion
|
||||
import litellm
|
||||
|
||||
import pytest
|
||||
|
||||
import json
|
||||
import time
|
||||
from datetime import datetime
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
import httpx
|
||||
import pytest
|
||||
from openai.types.chat import ChatCompletionMessage
|
||||
from openai.types.chat.chat_completion import ChatCompletion, Choice
|
||||
from respx import MockRouter
|
||||
|
||||
import litellm
|
||||
from litellm import completion
|
||||
from litellm.integrations.prompt_layer import PromptLayerLogger
|
||||
|
||||
# def test_promptlayer_logging():
|
||||
# try:
|
||||
|
@ -114,3 +122,76 @@ def test_promptlayer_logging_with_metadata_tags():
|
|||
# print(e)
|
||||
|
||||
# test_chat_openai()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.respx
|
||||
async def test_promptlayer_logging_with_mocked_request(respx_mock: MockRouter):
|
||||
promptlayer_logger = PromptLayerLogger()
|
||||
|
||||
mock_response = AsyncMock()
|
||||
obj = ChatCompletion(
|
||||
id="foo",
|
||||
model="gpt-4",
|
||||
object="chat.completion",
|
||||
choices=[
|
||||
Choice(
|
||||
finish_reason="stop",
|
||||
index=0,
|
||||
message=ChatCompletionMessage(
|
||||
content="Hello world!",
|
||||
role="assistant",
|
||||
),
|
||||
)
|
||||
],
|
||||
created=int(datetime.now().timestamp()),
|
||||
)
|
||||
litellm.set_verbose = True
|
||||
|
||||
mock_request = respx_mock.post(url__regex=r".*/chat/completions.*").mock(
|
||||
return_value=httpx.Response(200, json=obj.model_dump(mode="json"))
|
||||
)
|
||||
|
||||
mock_promptlayer_response = respx_mock.post(
|
||||
"https://api.promptlayer.com/log-request"
|
||||
).mock(return_value=httpx.Response(200, json={"id": "mock_promptlayer_id"}))
|
||||
|
||||
response = completion(
|
||||
model="gpt-4",
|
||||
messages=[{"role": "user", "content": "Hello, can you provide a response?"}],
|
||||
temperature=0.2,
|
||||
max_tokens=20,
|
||||
metadata={"model": "ai21", "pl_tags": ["env:dev"]},
|
||||
)
|
||||
|
||||
status_code = promptlayer_logger.log_event(
|
||||
kwargs={
|
||||
"model": "gpt-4",
|
||||
"messages": [
|
||||
{"role": "user", "content": "Hello, can you provide a response?"}
|
||||
],
|
||||
},
|
||||
response_obj=response,
|
||||
start_time=datetime.now(),
|
||||
end_time=datetime.now(),
|
||||
print_verbose=print,
|
||||
)
|
||||
|
||||
respx_mock.assert_all_called()
|
||||
|
||||
for call in mock_request.calls:
|
||||
print(call)
|
||||
print(call.request.content)
|
||||
|
||||
json_body = json.loads(call.request.content)
|
||||
|
||||
print(json_body)
|
||||
|
||||
for call in mock_promptlayer_response.calls:
|
||||
print(call)
|
||||
print(call.request.content)
|
||||
|
||||
json_body = json.loads(call.request.content)
|
||||
print(json_body)
|
||||
|
||||
assert status_code == {"id": "mock_promptlayer_id"}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue