forked from phoenix/litellm-mirror
fix promptlayer logging
This commit is contained in:
parent
508a2c1daf
commit
1913d36e05
3 changed files with 40 additions and 30 deletions
|
@ -17,8 +17,11 @@ class PromptLayerLogger:
|
|||
def log_event(self, kwargs, response_obj, start_time, end_time, print_verbose):
|
||||
# Method definition
|
||||
try:
|
||||
if 'litellm_logging_obj' in kwargs:
|
||||
kwargs.pop('litellm_logging_obj')
|
||||
|
||||
print_verbose(
|
||||
f"Prompt Layer Logging - Enters logging function for model {kwargs}"
|
||||
f"Prompt Layer Logging - Enters logging function for model kwargs: {kwargs}\n, response: {response_obj}"
|
||||
)
|
||||
|
||||
request_response = requests.post(
|
||||
|
@ -27,9 +30,7 @@ class PromptLayerLogger:
|
|||
"function_name": "openai.ChatCompletion.create",
|
||||
"kwargs": kwargs,
|
||||
"tags": ["hello", "world"],
|
||||
"request_response": dict(
|
||||
response_obj
|
||||
), # TODO: Check if we need a dict
|
||||
"request_response": dict(response_obj),
|
||||
"request_start_time": int(start_time.timestamp()),
|
||||
"request_end_time": int(end_time.timestamp()),
|
||||
"api_key": self.key,
|
||||
|
@ -39,11 +40,9 @@ class PromptLayerLogger:
|
|||
# "prompt_version":1,
|
||||
},
|
||||
)
|
||||
|
||||
print_verbose(
|
||||
f"Prompt Layer Logging - final response object: {request_response}"
|
||||
f"Prompt Layer Logging: success - final response object: {request_response}"
|
||||
)
|
||||
except:
|
||||
# traceback.print_exc()
|
||||
print_verbose(f"Prompt Layer Error - {traceback.format_exc()}")
|
||||
print_verbose(f"error: Prompt Layer Error - {traceback.format_exc()}")
|
||||
pass
|
||||
|
|
|
@ -1,37 +1,48 @@
|
|||
#### What this tests ####
|
||||
# This tests if logging to the llmonitor integration actually works
|
||||
# Adds the parent directory to the system path
|
||||
# import sys
|
||||
# import os
|
||||
import sys
|
||||
import os
|
||||
import io
|
||||
|
||||
# sys.path.insert(0, os.path.abspath('../..'))
|
||||
sys.path.insert(0, os.path.abspath('../..'))
|
||||
|
||||
# from litellm import completion, embedding
|
||||
# import litellm
|
||||
from litellm import completion
|
||||
import litellm
|
||||
|
||||
# litellm.success_callback = ["promptlayer"]
|
||||
litellm.success_callback = ["promptlayer"]
|
||||
litellm.set_verbose = True
|
||||
import time
|
||||
|
||||
|
||||
# litellm.set_verbose = True
|
||||
|
||||
def test_promptlayer_logging():
|
||||
try:
|
||||
# Redirect stdout
|
||||
old_stdout = sys.stdout
|
||||
sys.stdout = new_stdout = io.StringIO()
|
||||
|
||||
|
||||
# def test_chat_openai():
|
||||
# try:
|
||||
# response = completion(model="gpt-3.5-turbo",
|
||||
# messages=[{
|
||||
# "role": "user",
|
||||
# "content": "Hi 👋 - i'm openai"
|
||||
# }])
|
||||
response = completion(model="claude-instant-1.2",
|
||||
messages=[{
|
||||
"role": "user",
|
||||
"content": "Hi 👋 - i'm openai"
|
||||
}])
|
||||
|
||||
# print(response)
|
||||
# Restore stdout
|
||||
time.sleep(1)
|
||||
sys.stdout = old_stdout
|
||||
output = new_stdout.getvalue().strip()
|
||||
print(output)
|
||||
if "LiteLLM: Prompt Layer Logging: success" not in output:
|
||||
raise Exception("Required log message not found!")
|
||||
|
||||
except Exception as e:
|
||||
print(e)
|
||||
|
||||
test_promptlayer_logging()
|
||||
|
||||
# except Exception as e:
|
||||
# print(e)
|
||||
|
||||
|
||||
|
||||
# def test_chat_openai():
|
||||
# litellm.success_callback = ["langfuse"]
|
||||
# try:
|
||||
# response = completion(model="replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1",
|
||||
# messages=[{
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[tool.poetry]
|
||||
name = "litellm"
|
||||
version = "0.1.615"
|
||||
version = "0.1.618"
|
||||
description = "Library to easily interface with LLM API providers"
|
||||
authors = ["BerriAI"]
|
||||
license = "MIT License"
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue