forked from phoenix/litellm-mirror
clean
This commit is contained in:
parent
0cb4816ab2
commit
fd9c9227b6
2 changed files with 13 additions and 11 deletions
|
@ -74,7 +74,6 @@ class LLMonitorLogger:
|
|||
f"LLMonitor Logging - Logging request for model {model}")
|
||||
|
||||
if response_obj:
|
||||
print(response_obj)
|
||||
usage = parse_usage(
|
||||
response_obj['usage']) if 'usage' in response_obj else None
|
||||
output = response_obj[
|
||||
|
@ -110,8 +109,6 @@ class LLMonitorLogger:
|
|||
"tokensUsage": usage,
|
||||
}]
|
||||
|
||||
print(data)
|
||||
|
||||
# print_verbose(f"LLMonitor Logging - final data object: {data}")
|
||||
|
||||
response = requests.post(
|
||||
|
|
|
@ -14,14 +14,19 @@ litellm.failure_callback = ["llmonitor"]
|
|||
|
||||
litellm.set_verbose = True
|
||||
|
||||
# openai call
|
||||
# first_success_test = completion(model="gpt-3.5-turbo",
|
||||
# messages=[{
|
||||
# "role": "user",
|
||||
# "content": "Hi 👋 - i'm openai"
|
||||
# }])
|
||||
|
||||
# print(first_success_test)
|
||||
def test_chat_openai():
|
||||
try:
|
||||
response = completion(model="gpt-3.5-turbo",
|
||||
messages=[{
|
||||
"role": "user",
|
||||
"content": "Hi 👋 - i'm openai"
|
||||
}])
|
||||
|
||||
print(response)
|
||||
|
||||
except Exception as e:
|
||||
print(e)
|
||||
|
||||
|
||||
def test_embedding_openai():
|
||||
|
@ -31,7 +36,7 @@ def test_embedding_openai():
|
|||
print(f"response: {str(response)[:50]}")
|
||||
except Exception as e:
|
||||
print(e)
|
||||
# pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
|
||||
test_chat_openai()
|
||||
test_embedding_openai()
|
Loading…
Add table
Add a link
Reference in a new issue