mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
36 lines
No EOL
1.3 KiB
Python
36 lines
No EOL
1.3 KiB
Python
#### What this tests ####
|
|
# This tests if logging to the litedebugger integration actually works
|
|
# pytest mistakes intentional bad calls as failed tests -> [TODO] fix this
|
|
import sys, os
|
|
import traceback
|
|
import pytest
|
|
|
|
sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the system path
|
|
import litellm
|
|
from litellm import embedding, completion
|
|
|
|
litellm.set_verbose = True
|
|
|
|
|
|
# Test 1: On completion call - without setting client to true -> ensure no logs are created
|
|
response = completion(model="claude-instant-1", messages=[{"role": "user", "content": "Hi 👋 - i'm openai"}])
|
|
# print(f"response: {response}")
|
|
|
|
|
|
litellm.use_client = True
|
|
|
|
user_message = "Hello, how are you?"
|
|
messages = [{ "content": user_message,"role": "user"}]
|
|
|
|
|
|
# Test 2: On completion call
|
|
response = completion(model="claude-instant-1", messages=[{"role": "user", "content": "Hi 👋 - i'm openai"}])
|
|
# print(f"response: {response}")
|
|
|
|
# Test 3: On embedding call
|
|
response = embedding(model="text-embedding-ada-002", input=["sample text"])
|
|
# print(f"response: {response}")
|
|
|
|
# Test 4: On streaming completion call
|
|
response = completion(model="replicate/llama-2-70b-chat:58d078176e02c219e11eb4da5a02a7830a283b14cf8f94537af893ccff5ee781", messages=[{"role": "user", "content": "Hi 👋 - i'm openai"}], stream=True)
|
|
print(f"response: {response}") |