forked from phoenix/litellm-mirror
simplifying calling litellm debugger
This commit is contained in:
parent
7da8dfc820
commit
3286a708fe
7 changed files with 12 additions and 17 deletions
|
@ -16,14 +16,13 @@ See our live dashboard 👉 [admin.litellm.ai](https://admin.litellm.ai/)
|
||||||
By default, your dashboard is viewable at `admin.litellm.ai/<your_email>`.
|
By default, your dashboard is viewable at `admin.litellm.ai/<your_email>`.
|
||||||
|
|
||||||
```
|
```
|
||||||
|
import litellm, os
|
||||||
|
|
||||||
## Set your email
|
## Set your email
|
||||||
os.environ["LITELLM_EMAIL"] = "your_user_email"
|
os.environ["LITELLM_EMAIL"] = "your_user_email"
|
||||||
|
|
||||||
## LOG ON ALL 3 EVENTS
|
## Set debugger to true
|
||||||
litellm.input_callback = ["lite_debugger"]
|
litellm.debugger = True
|
||||||
litellm.success_callback = ["lite_debugger"]
|
|
||||||
litellm.failure_callback = ["lite_debugger"]
|
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Example Usage
|
## Example Usage
|
||||||
|
@ -36,12 +35,8 @@ By default, your dashboard is viewable at `admin.litellm.ai/<your_email>`.
|
||||||
## Set ENV variable
|
## Set ENV variable
|
||||||
os.environ["LITELLM_EMAIL"] = "your_email"
|
os.environ["LITELLM_EMAIL"] = "your_email"
|
||||||
|
|
||||||
## LOG ON ALL 3 EVENTS
|
## Set debugger to true
|
||||||
litellm.input_callback = ["lite_debugger"]
|
litellm.debugger = True
|
||||||
litellm.success_callback = ["lite_debugger"]
|
|
||||||
litellm.failure_callback = ["lite_debugger"]
|
|
||||||
|
|
||||||
litellm.set_verbose = True
|
|
||||||
|
|
||||||
user_message = "Hello, how are you?"
|
user_message = "Hello, how are you?"
|
||||||
messages = [{ "content": user_message,"role": "user"}]
|
messages = [{ "content": user_message,"role": "user"}]
|
||||||
|
@ -52,6 +47,5 @@ By default, your dashboard is viewable at `admin.litellm.ai/<your_email>`.
|
||||||
|
|
||||||
# bad request call
|
# bad request call
|
||||||
response = completion(model="chatgpt-test", messages=[{"role": "user", "content": "Hi 👋 - i'm a bad request"}])
|
response = completion(model="chatgpt-test", messages=[{"role": "user", "content": "Hi 👋 - i'm a bad request"}])
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -21,6 +21,7 @@ hugging_api_token: Optional[str] = None
|
||||||
togetherai_api_key: Optional[str] = None
|
togetherai_api_key: Optional[str] = None
|
||||||
caching = False
|
caching = False
|
||||||
caching_with_models = False # if you want the caching key to be model + prompt
|
caching_with_models = False # if you want the caching key to be model + prompt
|
||||||
|
debugger = False
|
||||||
model_cost = {
|
model_cost = {
|
||||||
"gpt-3.5-turbo": {
|
"gpt-3.5-turbo": {
|
||||||
"max_tokens": 4000,
|
"max_tokens": 4000,
|
||||||
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -9,11 +9,7 @@
|
||||||
# import litellm
|
# import litellm
|
||||||
# from litellm import embedding, completion
|
# from litellm import embedding, completion
|
||||||
|
|
||||||
# litellm.input_callback = ["lite_debugger"]
|
# litellm.debugger = True
|
||||||
# litellm.success_callback = ["lite_debugger"]
|
|
||||||
# litellm.failure_callback = ["lite_debugger"]
|
|
||||||
|
|
||||||
# litellm.set_verbose = True
|
|
||||||
|
|
||||||
# user_message = "Hello, how are you?"
|
# user_message = "Hello, how are you?"
|
||||||
# messages = [{ "content": user_message,"role": "user"}]
|
# messages = [{ "content": user_message,"role": "user"}]
|
||||||
|
|
|
@ -286,6 +286,10 @@ def client(original_function):
|
||||||
): # just run once to check if user wants to send their data anywhere - PostHog/Sentry/Slack/etc.
|
): # just run once to check if user wants to send their data anywhere - PostHog/Sentry/Slack/etc.
|
||||||
try:
|
try:
|
||||||
global callback_list, add_breadcrumb, user_logger_fn
|
global callback_list, add_breadcrumb, user_logger_fn
|
||||||
|
if litellm.debugger: # add to input, success and failure callbacks if user sets debugging to true
|
||||||
|
litellm.input_callback.append("lite_debugger")
|
||||||
|
litellm.success_callback.append("lite_debugger")
|
||||||
|
litellm.failure_callback.append("lite_debugger")
|
||||||
if (
|
if (
|
||||||
len(litellm.input_callback) > 0 or len(litellm.success_callback) > 0 or len(litellm.failure_callback) > 0
|
len(litellm.input_callback) > 0 or len(litellm.success_callback) > 0 or len(litellm.failure_callback) > 0
|
||||||
) and len(callback_list) == 0:
|
) and len(callback_list) == 0:
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue