mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 19:54:13 +00:00
Litellm dev 12 24 2024 p2 (#7400)
* fix(utils.py): default custom_llm_provider=None for 'supports_response_schema' Closes https://github.com/BerriAI/litellm/issues/7397 * refactor(langfuse/): call langfuse logger inside customlogger compatible langfuse class, refactor langfuse logger to use verbose_logger.debug instead of print_verbose * refactor(litellm_pre_call_utils.py): move config based team callbacks inside dynamic team callback logic enables simpler unit testing for config-based team callbacks * fix(proxy/_types.py): handle teamcallbackmetadata - none values drop none values if present. if all none, use default dict to avoid downstream errors * test(test_proxy_utils.py): add unit test preventing future issues - asserts team_id in config state not popped off across calls Fixes https://github.com/BerriAI/litellm/issues/6787 * fix(langfuse_prompt_management.py): add success + failure logging event support * fix: fix linting error * test: fix test * test: fix test * test: override o1 prompt caching - openai currently not working * test: fix test
This commit is contained in:
parent
d790ba0897
commit
c95351e70f
12 changed files with 227 additions and 62 deletions
|
@ -148,12 +148,7 @@ class LangFuseLogger:
|
|||
|
||||
return metadata
|
||||
|
||||
# def log_error(kwargs, response_obj, start_time, end_time):
|
||||
# generation = trace.generation(
|
||||
# level ="ERROR" # can be any of DEBUG, DEFAULT, WARNING or ERROR
|
||||
# status_message='error' # can be any string (e.g. stringified stack trace or error body)
|
||||
# )
|
||||
def log_event( # noqa: PLR0915
|
||||
def _old_log_event( # noqa: PLR0915
|
||||
self,
|
||||
kwargs,
|
||||
response_obj,
|
||||
|
@ -167,7 +162,7 @@ class LangFuseLogger:
|
|||
# Method definition
|
||||
|
||||
try:
|
||||
print_verbose(
|
||||
verbose_logger.debug(
|
||||
f"Langfuse Logging - Enters logging function for model {kwargs}"
|
||||
)
|
||||
|
||||
|
@ -260,7 +255,9 @@ class LangFuseLogger:
|
|||
):
|
||||
input = prompt
|
||||
output = response_obj.get("response", "")
|
||||
print_verbose(f"OUTPUT IN LANGFUSE: {output}; original: {response_obj}")
|
||||
verbose_logger.debug(
|
||||
f"OUTPUT IN LANGFUSE: {output}; original: {response_obj}"
|
||||
)
|
||||
trace_id = None
|
||||
generation_id = None
|
||||
if self._is_langfuse_v2():
|
||||
|
@ -291,7 +288,7 @@ class LangFuseLogger:
|
|||
input,
|
||||
response_obj,
|
||||
)
|
||||
print_verbose(
|
||||
verbose_logger.debug(
|
||||
f"Langfuse Layer Logging - final response object: {response_obj}"
|
||||
)
|
||||
verbose_logger.info("Langfuse Layer Logging - logging success")
|
||||
|
@ -444,7 +441,7 @@ class LangFuseLogger:
|
|||
) -> tuple:
|
||||
import langfuse
|
||||
|
||||
print_verbose("Langfuse Layer Logging - logging to langfuse v2")
|
||||
verbose_logger.debug("Langfuse Layer Logging - logging to langfuse v2")
|
||||
|
||||
try:
|
||||
metadata = self._prepare_metadata(metadata)
|
||||
|
@ -577,7 +574,7 @@ class LangFuseLogger:
|
|||
trace_params["metadata"] = {"metadata_passed_to_litellm": metadata}
|
||||
|
||||
cost = kwargs.get("response_cost", None)
|
||||
print_verbose(f"trace: {cost}")
|
||||
verbose_logger.debug(f"trace: {cost}")
|
||||
|
||||
clean_metadata["litellm_response_cost"] = cost
|
||||
if standard_logging_object is not None:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue