forked from phoenix/litellm-mirror
fix(utils.py): allow user to opt in to raw request logging to langfuse
This commit is contained in:
parent
a8ea7c6d31
commit
7eae0ff7e3
5 changed files with 62 additions and 21 deletions
38
docs/my-website/docs/observability/raw_request_response.md
Normal file
38
docs/my-website/docs/observability/raw_request_response.md
Normal file
|
@ -0,0 +1,38 @@
|
|||
import Image from '@theme/IdealImage';
|
||||
|
||||
# Raw Request/Response Logging
|
||||
|
||||
See the raw request/response sent by LiteLLM in your logging provider (OTEL/Langfuse/etc.).
|
||||
|
||||
```python
|
||||
# pip install langfuse
|
||||
import litellm
|
||||
import os
|
||||
|
||||
# log raw request/response
|
||||
litellm.log_raw_request_response = True
|
||||
|
||||
# from https://cloud.langfuse.com/
|
||||
os.environ["LANGFUSE_PUBLIC_KEY"] = ""
|
||||
os.environ["LANGFUSE_SECRET_KEY"] = ""
|
||||
# Optional, defaults to https://cloud.langfuse.com
|
||||
os.environ["LANGFUSE_HOST"] # optional
|
||||
|
||||
# LLM API Keys
|
||||
os.environ['OPENAI_API_KEY']=""
|
||||
|
||||
# set langfuse as a callback, litellm will send the data to langfuse
|
||||
litellm.success_callback = ["langfuse"]
|
||||
|
||||
# openai call
|
||||
response = litellm.completion(
|
||||
model="gpt-3.5-turbo",
|
||||
messages=[
|
||||
{"role": "user", "content": "Hi 👋 - i'm openai"}
|
||||
]
|
||||
)
|
||||
```
|
||||
|
||||
**Expected Log**
|
||||
|
||||
<Image img={require('../../img/raw_request_log.png')}/>
|
BIN
docs/my-website/img/raw_request_log.png
Normal file
BIN
docs/my-website/img/raw_request_log.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 168 KiB |
|
@ -182,6 +182,7 @@ const sidebars = {
|
|||
label: "Logging & Observability",
|
||||
items: [
|
||||
"debugging/local_debugging",
|
||||
"observability/raw_request_response",
|
||||
"observability/callbacks",
|
||||
"observability/custom_callback",
|
||||
"observability/langfuse_integration",
|
||||
|
|
|
@ -60,6 +60,7 @@ _async_failure_callback: List[Callable] = (
|
|||
pre_call_rules: List[Callable] = []
|
||||
post_call_rules: List[Callable] = []
|
||||
turn_off_message_logging: Optional[bool] = False
|
||||
log_raw_request_response: bool = False
|
||||
redact_messages_in_exceptions: Optional[bool] = False
|
||||
store_audit_logs = False # Enterprise feature, allow users to see audit logs
|
||||
## end of callbacks #############
|
||||
|
|
|
@ -1347,7 +1347,8 @@ class Logging:
|
|||
)
|
||||
else:
|
||||
verbose_logger.debug(f"\033[92m{curl_command}\033[0m\n")
|
||||
# log raw request to provider (like LangFuse)
|
||||
# log raw request to provider (like LangFuse) -- if opted in.
|
||||
if litellm.log_raw_request_response is True:
|
||||
try:
|
||||
# [Non-blocking Extra Debug Information in metadata]
|
||||
_litellm_params = self.model_call_details.get("litellm_params", {})
|
||||
|
@ -2735,7 +2736,7 @@ class Logging:
|
|||
only redacts when litellm.turn_off_message_logging == True
|
||||
"""
|
||||
# check if user opted out of logging message/response to callbacks
|
||||
if litellm.turn_off_message_logging == True:
|
||||
if litellm.turn_off_message_logging is True:
|
||||
# remove messages, prompts, input, response from logging
|
||||
self.model_call_details["messages"] = [
|
||||
{"role": "user", "content": "redacted-by-litellm"}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue