diff --git a/docs/my-website/docs/debugging/local_debugging.md b/docs/my-website/docs/debugging/local_debugging.md index 87faef73e..a9409bfab 100644 --- a/docs/my-website/docs/debugging/local_debugging.md +++ b/docs/my-website/docs/debugging/local_debugging.md @@ -23,6 +23,14 @@ response = completion(model="gpt-3.5-turbo", messages=messages) response = completion("command-nightly", messages) ``` +## JSON Logs + +If you need to store the logs as JSON, just set the `litellm.json_logs = True`. + +We currently just log the raw POST request from litellm as a JSON - [**See Code**]. + +[Share feedback here](https://github.com/BerriAI/litellm/issues) + ## Logger Function But sometimes all you care about is seeing exactly what's getting sent to your api call and what's being returned - e.g. if the api call is failing, why is that happening? what are the exact params being set? diff --git a/litellm/__init__.py b/litellm/__init__.py index 49287d12f..a3d61bce1 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -2,7 +2,7 @@ import threading, requests, os from typing import Callable, List, Optional, Dict, Union, Any, Literal from litellm.caching import Cache -from litellm._logging import set_verbose, _turn_on_debug, verbose_logger +from litellm._logging import set_verbose, _turn_on_debug, verbose_logger, json_logs from litellm.proxy._types import ( KeyManagementSystem, KeyManagementSettings, diff --git a/litellm/_logging.py b/litellm/_logging.py index 4f7e46446..f31ee41f8 100644 --- a/litellm/_logging.py +++ b/litellm/_logging.py @@ -1,7 +1,7 @@ import logging set_verbose = False - +json_logs = False # Create a handler for the logger (you may need to adapt this based on your needs) handler = logging.StreamHandler() handler.setLevel(logging.DEBUG) diff --git a/litellm/integrations/langsmith.py b/litellm/integrations/langsmith.py index 8e37a1ec1..415f3d2d2 100644 --- a/litellm/integrations/langsmith.py +++ b/litellm/integrations/langsmith.py @@ -73,10 +73,6 @@ class LangsmithLogger: elif type(value) != dict and is_serializable(value=value): new_kwargs[key] = value - print(f"type of response: {type(response_obj)}") - for k, v in new_kwargs.items(): - print(f"key={k}, type of arg: {type(v)}, value={v}") - if isinstance(response_obj, BaseModel): try: response_obj = response_obj.model_dump() diff --git a/litellm/utils.py b/litellm/utils.py index 302d18e99..e5f7f9d11 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -1202,7 +1202,14 @@ class Logging: if verbose_logger.level == 0: # this means verbose logger was not switched on - user is in litellm.set_verbose=True print_verbose(f"\033[92m{curl_command}\033[0m\n") - verbose_logger.info(f"\033[92m{curl_command}\033[0m\n") + + if litellm.json_logs: + verbose_logger.info( + "POST Request Sent from LiteLLM", + extra={"api_base": {api_base}, **masked_headers}, + ) + else: + verbose_logger.info(f"\033[92m{curl_command}\033[0m\n") if self.logger_fn and callable(self.logger_fn): try: self.logger_fn(