mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
bumping version
This commit is contained in:
parent
2492ed23d1
commit
f149945d91
2 changed files with 20 additions and 20 deletions
|
@ -1,27 +1,27 @@
|
|||
#### What this tests ####
|
||||
# This tests if logging to the helicone integration actually works
|
||||
# pytest mistakes intentional bad calls as failed tests -> [TODO] fix this
|
||||
import sys, os
|
||||
import traceback
|
||||
import pytest
|
||||
# #### What this tests ####
|
||||
# # This tests if logging to the helicone integration actually works
|
||||
# # pytest mistakes intentional bad calls as failed tests -> [TODO] fix this
|
||||
# import sys, os
|
||||
# import traceback
|
||||
# import pytest
|
||||
|
||||
sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the system path
|
||||
import litellm
|
||||
from litellm import embedding, completion
|
||||
# sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the system path
|
||||
# import litellm
|
||||
# from litellm import embedding, completion
|
||||
|
||||
litellm.success_callback = ["supabase"]
|
||||
litellm.failure_callback = ["supabase"]
|
||||
# litellm.success_callback = ["supabase"]
|
||||
# litellm.failure_callback = ["supabase"]
|
||||
|
||||
litellm.modify_integration("supabase",{"table_name": "litellm_logs"})
|
||||
# litellm.modify_integration("supabase",{"table_name": "litellm_logs"})
|
||||
|
||||
litellm.set_verbose = True
|
||||
# litellm.set_verbose = True
|
||||
|
||||
user_message = "Hello, how are you?"
|
||||
messages = [{ "content": user_message,"role": "user"}]
|
||||
# user_message = "Hello, how are you?"
|
||||
# messages = [{ "content": user_message,"role": "user"}]
|
||||
|
||||
|
||||
#openai call
|
||||
response = completion(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hi 👋 - i'm openai"}])
|
||||
# #openai call
|
||||
# response = completion(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hi 👋 - i'm openai"}])
|
||||
|
||||
#bad request call
|
||||
response = completion(model="chatgpt-test", messages=[{"role": "user", "content": "Hi 👋 - i'm a bad request"}])
|
||||
# #bad request call
|
||||
# response = completion(model="chatgpt-test", messages=[{"role": "user", "content": "Hi 👋 - i'm a bad request"}])
|
Loading…
Add table
Add a link
Reference in a new issue