mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
fix promptlayer logging
This commit is contained in:
parent
508a2c1daf
commit
1913d36e05
3 changed files with 40 additions and 30 deletions
|
@ -1,37 +1,48 @@
|
|||
#### What this tests ####
|
||||
# This tests if logging to the llmonitor integration actually works
|
||||
# Adds the parent directory to the system path
|
||||
# import sys
|
||||
# import os
|
||||
import sys
|
||||
import os
|
||||
import io
|
||||
|
||||
# sys.path.insert(0, os.path.abspath('../..'))
|
||||
sys.path.insert(0, os.path.abspath('../..'))
|
||||
|
||||
# from litellm import completion, embedding
|
||||
# import litellm
|
||||
from litellm import completion
|
||||
import litellm
|
||||
|
||||
# litellm.success_callback = ["promptlayer"]
|
||||
litellm.success_callback = ["promptlayer"]
|
||||
litellm.set_verbose = True
|
||||
import time
|
||||
|
||||
|
||||
# litellm.set_verbose = True
|
||||
|
||||
def test_promptlayer_logging():
|
||||
try:
|
||||
# Redirect stdout
|
||||
old_stdout = sys.stdout
|
||||
sys.stdout = new_stdout = io.StringIO()
|
||||
|
||||
|
||||
# def test_chat_openai():
|
||||
# try:
|
||||
# response = completion(model="gpt-3.5-turbo",
|
||||
# messages=[{
|
||||
# "role": "user",
|
||||
# "content": "Hi 👋 - i'm openai"
|
||||
# }])
|
||||
response = completion(model="claude-instant-1.2",
|
||||
messages=[{
|
||||
"role": "user",
|
||||
"content": "Hi 👋 - i'm openai"
|
||||
}])
|
||||
|
||||
# print(response)
|
||||
# Restore stdout
|
||||
time.sleep(1)
|
||||
sys.stdout = old_stdout
|
||||
output = new_stdout.getvalue().strip()
|
||||
print(output)
|
||||
if "LiteLLM: Prompt Layer Logging: success" not in output:
|
||||
raise Exception("Required log message not found!")
|
||||
|
||||
except Exception as e:
|
||||
print(e)
|
||||
|
||||
test_promptlayer_logging()
|
||||
|
||||
# except Exception as e:
|
||||
# print(e)
|
||||
|
||||
|
||||
|
||||
# def test_chat_openai():
|
||||
# litellm.success_callback = ["langfuse"]
|
||||
# try:
|
||||
# response = completion(model="replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1",
|
||||
# messages=[{
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue