forked from phoenix/litellm-mirror
clean format
This commit is contained in:
parent
da4bdfab95
commit
9a43b23876
3 changed files with 66 additions and 9 deletions
|
@ -9,6 +9,37 @@ import requests
|
|||
dotenv.load_dotenv() # Loading env variables using dotenv
|
||||
|
||||
|
||||
# convert to {completion: xx, tokens: xx}
|
||||
def parse_usage(usage):
|
||||
return {
|
||||
"completion": usage["completion_tokens"],
|
||||
"prompt": usage["prompt_tokens"],
|
||||
}
|
||||
|
||||
|
||||
def parse_messages(input):
|
||||
|
||||
if input is None:
|
||||
return None
|
||||
|
||||
def clean_message(message):
|
||||
if "message" in message:
|
||||
return clean_message(message["message"])
|
||||
|
||||
return {
|
||||
"role": message["role"],
|
||||
"text": message["content"],
|
||||
}
|
||||
|
||||
if isinstance(input, list):
|
||||
if len(input) == 1:
|
||||
return clean_message(input[0])
|
||||
else:
|
||||
return [clean_message(msg) for msg in input]
|
||||
else:
|
||||
return clean_message(input)
|
||||
|
||||
|
||||
class LLMonitorLogger:
|
||||
# Class variables or attributes
|
||||
def __init__(self):
|
||||
|
@ -17,15 +48,33 @@ class LLMonitorLogger:
|
|||
"LLMONITOR_API_URL") or "https://app.llmonitor.com"
|
||||
self.app_id = os.getenv("LLMONITOR_APP_ID")
|
||||
|
||||
def log_event(self, type, run_id, error, usage, model, messages,
|
||||
response_obj, user_id, time, print_verbose):
|
||||
def log_event(
|
||||
self,
|
||||
type,
|
||||
run_id,
|
||||
model,
|
||||
print_verbose,
|
||||
messages=None,
|
||||
user_id=None,
|
||||
response_obj=None,
|
||||
time=datetime.datetime.now(),
|
||||
error=None,
|
||||
):
|
||||
# Method definition
|
||||
try:
|
||||
print_verbose(
|
||||
f"LLMonitor Logging - Enters logging function for model {model}"
|
||||
)
|
||||
|
||||
print(type, model, messages, response_obj, time, end_user)
|
||||
if response_obj:
|
||||
usage = parse_usage(response_obj['usage'])
|
||||
output = response_obj['choices']
|
||||
else:
|
||||
usage = None
|
||||
output = None
|
||||
|
||||
print(type, run_id, model, messages, usage, output, time, user_id,
|
||||
error)
|
||||
|
||||
headers = {'Content-Type': 'application/json'}
|
||||
|
||||
|
@ -38,8 +87,9 @@ class LLMonitorLogger:
|
|||
"event": type,
|
||||
"timestamp": time.isoformat(),
|
||||
"userId": user_id,
|
||||
"input": messages,
|
||||
"output": response_obj['choices'][0]['message']['content'],
|
||||
"input": parse_messages(messages),
|
||||
"usage": usage,
|
||||
"output": parse_messages(output),
|
||||
}
|
||||
|
||||
print_verbose(f"LLMonitor Logging - final data object: {data}")
|
||||
|
|
|
@ -15,12 +15,21 @@ litellm.error_callback = ["llmonitor"]
|
|||
|
||||
litellm.set_verbose = True
|
||||
|
||||
# openai call
|
||||
os.environ[
|
||||
"OPENAI_API_KEY"] = "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"
|
||||
|
||||
print(os.environ["OPENAI_API_KEY"])
|
||||
|
||||
# def my_custom_logging_fn(model_call_dict):
|
||||
# print(f"model call details: {model_call_dict}")
|
||||
|
||||
# # openai call
|
||||
# response = completion(model="gpt-3.5-turbo",
|
||||
# messages=[{
|
||||
# "role": "user",
|
||||
# "content": "Hi 👋 - i'm openai"
|
||||
# }])
|
||||
# }],
|
||||
# logger_fn=my_custom_logging_fn)
|
||||
|
||||
# print(response)
|
||||
|
||||
|
|
|
@ -986,7 +986,6 @@ def handle_success(args, kwargs, result, start_time, end_time):
|
|||
print_verbose("reaches llmonitor for logging!")
|
||||
model = args[0] if len(args) > 0 else kwargs["model"]
|
||||
messages = args[1] if len(args) > 1 else kwargs["messages"]
|
||||
usage = kwargs["usage"]
|
||||
llmonitorLogger.log_event(
|
||||
type="end",
|
||||
model=model,
|
||||
|
@ -994,7 +993,6 @@ def handle_success(args, kwargs, result, start_time, end_time):
|
|||
user_id=litellm._thread_context.user,
|
||||
response_obj=result,
|
||||
time=end_time,
|
||||
usage=usage,
|
||||
run_id=kwargs["litellm_call_id"],
|
||||
print_verbose=print_verbose,
|
||||
)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue