mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
logging replicate response logs
This commit is contained in:
parent
1da6026622
commit
44f71aa321
3 changed files with 6 additions and 3 deletions
Binary file not shown.
|
@ -50,6 +50,7 @@ def handle_prediction_response(prediction_url, api_token, print_verbose):
|
||||||
}
|
}
|
||||||
|
|
||||||
status = ""
|
status = ""
|
||||||
|
logs = ""
|
||||||
while True and (status not in ["succeeded", "failed", "canceled"]):
|
while True and (status not in ["succeeded", "failed", "canceled"]):
|
||||||
print_verbose("making request")
|
print_verbose("making request")
|
||||||
time.sleep(0.0001)
|
time.sleep(0.0001)
|
||||||
|
@ -60,9 +61,10 @@ def handle_prediction_response(prediction_url, api_token, print_verbose):
|
||||||
output_string = "".join(response_data['output'])
|
output_string = "".join(response_data['output'])
|
||||||
print_verbose(f"Non-streamed output:{output_string}")
|
print_verbose(f"Non-streamed output:{output_string}")
|
||||||
status = response_data['status']
|
status = response_data['status']
|
||||||
|
logs = response_data.get("logs", "")
|
||||||
else:
|
else:
|
||||||
print_verbose("Failed to fetch prediction status and output.")
|
print_verbose("Failed to fetch prediction status and output.")
|
||||||
return output_string
|
return output_string, logs
|
||||||
|
|
||||||
# Function to handle prediction response (streaming)
|
# Function to handle prediction response (streaming)
|
||||||
def handle_prediction_response_streaming(prediction_url, api_token, print_verbose):
|
def handle_prediction_response_streaming(prediction_url, api_token, print_verbose):
|
||||||
|
@ -131,14 +133,14 @@ def completion(
|
||||||
print_verbose("streaming request")
|
print_verbose("streaming request")
|
||||||
return handle_prediction_response_streaming(prediction_url, api_key, print_verbose)
|
return handle_prediction_response_streaming(prediction_url, api_key, print_verbose)
|
||||||
else:
|
else:
|
||||||
result = handle_prediction_response(prediction_url, api_key, print_verbose)
|
result, logs = handle_prediction_response(prediction_url, api_key, print_verbose)
|
||||||
|
|
||||||
## LOGGING
|
## LOGGING
|
||||||
logging_obj.post_call(
|
logging_obj.post_call(
|
||||||
input=prompt,
|
input=prompt,
|
||||||
api_key="",
|
api_key="",
|
||||||
original_response=result,
|
original_response=result,
|
||||||
additional_args={"complete_input_dict": input_data},
|
additional_args={"complete_input_dict": input_data,"logs": logs},
|
||||||
)
|
)
|
||||||
|
|
||||||
print_verbose(f"raw model_response: {result}")
|
print_verbose(f"raw model_response: {result}")
|
||||||
|
|
|
@ -241,6 +241,7 @@ class Logging:
|
||||||
self.model_call_details["additional_args"] = additional_args
|
self.model_call_details["additional_args"] = additional_args
|
||||||
|
|
||||||
# User Logging -> if you pass in a custom logging function
|
# User Logging -> if you pass in a custom logging function
|
||||||
|
print_verbose(f"model call details: {self.model_call_details}")
|
||||||
print_verbose(
|
print_verbose(
|
||||||
f"Logging Details Post-API Call: logger_fn - {self.logger_fn} | callable(logger_fn) - {callable(self.logger_fn)}"
|
f"Logging Details Post-API Call: logger_fn - {self.logger_fn} | callable(logger_fn) - {callable(self.logger_fn)}"
|
||||||
)
|
)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue