forked from phoenix/litellm-mirror
feat(utils.py): json logs for raw request sent by litellm
make it easier to view verbose logs in datadog
This commit is contained in:
parent
f0e48cdd53
commit
b46db8b891
5 changed files with 18 additions and 7 deletions
|
@ -23,6 +23,14 @@ response = completion(model="gpt-3.5-turbo", messages=messages)
|
||||||
response = completion("command-nightly", messages)
|
response = completion("command-nightly", messages)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## JSON Logs
|
||||||
|
|
||||||
|
If you need to store the logs as JSON, just set the `litellm.json_logs = True`.
|
||||||
|
|
||||||
|
We currently just log the raw POST request from litellm as a JSON - [**See Code**].
|
||||||
|
|
||||||
|
[Share feedback here](https://github.com/BerriAI/litellm/issues)
|
||||||
|
|
||||||
## Logger Function
|
## Logger Function
|
||||||
But sometimes all you care about is seeing exactly what's getting sent to your api call and what's being returned - e.g. if the api call is failing, why is that happening? what are the exact params being set?
|
But sometimes all you care about is seeing exactly what's getting sent to your api call and what's being returned - e.g. if the api call is failing, why is that happening? what are the exact params being set?
|
||||||
|
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
import threading, requests, os
|
import threading, requests, os
|
||||||
from typing import Callable, List, Optional, Dict, Union, Any, Literal
|
from typing import Callable, List, Optional, Dict, Union, Any, Literal
|
||||||
from litellm.caching import Cache
|
from litellm.caching import Cache
|
||||||
from litellm._logging import set_verbose, _turn_on_debug, verbose_logger
|
from litellm._logging import set_verbose, _turn_on_debug, verbose_logger, json_logs
|
||||||
from litellm.proxy._types import (
|
from litellm.proxy._types import (
|
||||||
KeyManagementSystem,
|
KeyManagementSystem,
|
||||||
KeyManagementSettings,
|
KeyManagementSettings,
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
set_verbose = False
|
set_verbose = False
|
||||||
|
json_logs = False
|
||||||
# Create a handler for the logger (you may need to adapt this based on your needs)
|
# Create a handler for the logger (you may need to adapt this based on your needs)
|
||||||
handler = logging.StreamHandler()
|
handler = logging.StreamHandler()
|
||||||
handler.setLevel(logging.DEBUG)
|
handler.setLevel(logging.DEBUG)
|
||||||
|
|
|
@ -73,10 +73,6 @@ class LangsmithLogger:
|
||||||
elif type(value) != dict and is_serializable(value=value):
|
elif type(value) != dict and is_serializable(value=value):
|
||||||
new_kwargs[key] = value
|
new_kwargs[key] = value
|
||||||
|
|
||||||
print(f"type of response: {type(response_obj)}")
|
|
||||||
for k, v in new_kwargs.items():
|
|
||||||
print(f"key={k}, type of arg: {type(v)}, value={v}")
|
|
||||||
|
|
||||||
if isinstance(response_obj, BaseModel):
|
if isinstance(response_obj, BaseModel):
|
||||||
try:
|
try:
|
||||||
response_obj = response_obj.model_dump()
|
response_obj = response_obj.model_dump()
|
||||||
|
|
|
@ -1202,6 +1202,13 @@ class Logging:
|
||||||
if verbose_logger.level == 0:
|
if verbose_logger.level == 0:
|
||||||
# this means verbose logger was not switched on - user is in litellm.set_verbose=True
|
# this means verbose logger was not switched on - user is in litellm.set_verbose=True
|
||||||
print_verbose(f"\033[92m{curl_command}\033[0m\n")
|
print_verbose(f"\033[92m{curl_command}\033[0m\n")
|
||||||
|
|
||||||
|
if litellm.json_logs:
|
||||||
|
verbose_logger.info(
|
||||||
|
"POST Request Sent from LiteLLM",
|
||||||
|
extra={"api_base": {api_base}, **masked_headers},
|
||||||
|
)
|
||||||
|
else:
|
||||||
verbose_logger.info(f"\033[92m{curl_command}\033[0m\n")
|
verbose_logger.info(f"\033[92m{curl_command}\033[0m\n")
|
||||||
if self.logger_fn and callable(self.logger_fn):
|
if self.logger_fn and callable(self.logger_fn):
|
||||||
try:
|
try:
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue