Litellm dev 12 24 2024 p2 (#7400)

* fix(utils.py): default custom_llm_provider=None for 'supports_response_schema'

Closes https://github.com/BerriAI/litellm/issues/7397

* refactor(langfuse/): call langfuse logger inside customlogger compatible langfuse class, refactor langfuse logger to use verbose_logger.debug instead of print_verbose

* refactor(litellm_pre_call_utils.py): move config based team callbacks inside dynamic team callback logic

enables simpler unit testing for config-based team callbacks

* fix(proxy/_types.py): handle teamcallbackmetadata - none values

drop none values if present. if all none, use default dict to avoid downstream errors

* test(test_proxy_utils.py): add unit test preventing future issues - asserts team_id in config state not popped off across calls

Fixes https://github.com/BerriAI/litellm/issues/6787

* fix(langfuse_prompt_management.py): add success + failure logging event support

* fix: fix linting error

* test: fix test

* test: fix test

* test: override o1 prompt caching - openai currently not working

* test: fix test
This commit is contained in:
Krish Dholakia 2024-12-24 20:33:41 -08:00 committed by GitHub
parent d790ba0897
commit c95351e70f
12 changed files with 227 additions and 62 deletions

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -1011,13 +1011,29 @@ class TeamCallbackMetadata(LiteLLMPydanticObjectBase):
@model_validator(mode="before")
@classmethod
def validate_callback_vars(cls, values):
success_callback = values.get("success_callback", [])
if success_callback is None:
values.pop("success_callback", None)
failure_callback = values.get("failure_callback", [])
if failure_callback is None:
values.pop("failure_callback", None)
callback_vars = values.get("callback_vars", {})
if callback_vars is None:
values.pop("callback_vars", None)
if all(val is None for val in values.values()):
return {
"success_callback": [],
"failure_callback": [],
"callback_vars": {},
}
valid_keys = set(StandardCallbackDynamicParams.__annotations__.keys())
for key in callback_vars:
if key not in valid_keys:
raise ValueError(
f"Invalid callback variable: {key}. Must be one of {valid_keys}"
)
if callback_vars is not None:
for key in callback_vars:
if key not in valid_keys:
raise ValueError(
f"Invalid callback variable: {key}. Must be one of {valid_keys}"
)
return values

View file

@ -120,7 +120,7 @@ def convert_key_logging_metadata_to_callback(
def _get_dynamic_logging_metadata(
user_api_key_dict: UserAPIKeyAuth,
user_api_key_dict: UserAPIKeyAuth, proxy_config: ProxyConfig
) -> Optional[TeamCallbackMetadata]:
callback_settings_obj: Optional[TeamCallbackMetadata] = None
if (
@ -132,24 +132,31 @@ def _get_dynamic_logging_metadata(
data=AddTeamCallback(**item),
team_callback_settings_obj=callback_settings_obj,
)
elif user_api_key_dict.team_metadata is not None:
elif (
user_api_key_dict.team_metadata is not None
and "callback_settings" in user_api_key_dict.team_metadata
):
"""
callback_settings = {
{
'callback_vars': {'langfuse_public_key': 'pk', 'langfuse_secret_key': 'sk_'},
'failure_callback': [],
'success_callback': ['langfuse', 'langfuse']
}
}
"""
team_metadata = user_api_key_dict.team_metadata
if "callback_settings" in team_metadata:
callback_settings = team_metadata.get("callback_settings", None) or {}
callback_settings_obj = TeamCallbackMetadata(**callback_settings)
verbose_proxy_logger.debug(
"Team callback settings activated: %s", callback_settings_obj
callback_settings = team_metadata.get("callback_settings", None) or {}
callback_settings_obj = TeamCallbackMetadata(**callback_settings)
verbose_proxy_logger.debug(
"Team callback settings activated: %s", callback_settings_obj
)
elif user_api_key_dict.team_id is not None:
callback_settings_obj = (
LiteLLMProxyRequestSetup.add_team_based_callbacks_from_config(
team_id=user_api_key_dict.team_id, proxy_config=proxy_config
)
"""
callback_settings = {
{
'callback_vars': {'langfuse_public_key': 'pk', 'langfuse_secret_key': 'sk_'},
'failure_callback': [],
'success_callback': ['langfuse', 'langfuse']
}
}
"""
)
return callback_settings_obj
@ -343,6 +350,29 @@ class LiteLLMProxyRequestSetup:
return final_tags
@staticmethod
def add_team_based_callbacks_from_config(
team_id: str,
proxy_config: ProxyConfig,
) -> Optional[TeamCallbackMetadata]:
"""
Add team-based callbacks from the config
"""
team_config = proxy_config.load_team_config(team_id=team_id)
if len(team_config.keys()) == 0:
return None
callback_vars_dict = {**team_config.get("callback_vars", team_config)}
callback_vars_dict.pop("team_id", None)
callback_vars_dict.pop("success_callback", None)
callback_vars_dict.pop("failure_callback", None)
return TeamCallbackMetadata(
success_callback=team_config.get("success_callback", None),
failure_callback=team_config.get("failure_callback", None),
callback_vars=callback_vars_dict,
)
async def add_litellm_data_to_request( # noqa: PLR0915
data: dict,
@ -551,24 +581,9 @@ async def add_litellm_data_to_request( # noqa: PLR0915
if "tags" in data:
data[_metadata_variable_name]["tags"] = data["tags"]
### TEAM-SPECIFIC PARAMS ###
if user_api_key_dict.team_id is not None:
team_config = await proxy_config.load_team_config(
team_id=user_api_key_dict.team_id
)
if len(team_config) == 0:
pass
else:
team_id = team_config.pop("team_id", None)
data[_metadata_variable_name]["team_id"] = team_id
data = {
**team_config,
**data,
} # add the team-specific configs to the completion call
# Team Callbacks controls
callback_settings_obj = _get_dynamic_logging_metadata(
user_api_key_dict=user_api_key_dict
user_api_key_dict=user_api_key_dict, proxy_config=proxy_config
)
if callback_settings_obj is not None:
data["success_callback"] = callback_settings_obj.success_callback

View file

@ -1362,14 +1362,14 @@ class ProxyConfig:
team_config[k] = get_secret(v)
return team_config
async def load_team_config(self, team_id: str):
def load_team_config(self, team_id: str):
"""
- for a given team id
- return the relevant completion() call params
"""
# load existing config
config = self.config
config = self.get_config_state()
## LITELLM MODULE SETTINGS (e.g. litellm.drop_params=True,..)
litellm_settings = config.get("litellm_settings", {})
@ -1459,6 +1459,14 @@ class ProxyConfig:
def update_config_state(self, config: dict):
self.config = config
def get_config_state(self):
"""
Returns a deep copy of the config,
Do this, to avoid mutating the config state outside of allowed methods
"""
return copy.deepcopy(self.config)
async def load_config( # noqa: PLR0915
self, router: Optional[litellm.Router], config_file_path: str
):

View file

@ -53,6 +53,8 @@ async def langfuse_proxy_route(
[Docs](https://docs.litellm.ai/docs/pass_through/langfuse)
"""
from litellm.proxy.proxy_server import proxy_config
## CHECK FOR LITELLM API KEY IN THE QUERY PARAMS - ?..key=LITELLM_API_KEY
api_key = request.headers.get("Authorization") or ""
@ -68,7 +70,9 @@ async def langfuse_proxy_route(
)
callback_settings_obj: Optional[TeamCallbackMetadata] = (
_get_dynamic_logging_metadata(user_api_key_dict=user_api_key_dict)
_get_dynamic_logging_metadata(
user_api_key_dict=user_api_key_dict, proxy_config=proxy_config
)
)
dynamic_langfuse_public_key: Optional[str] = None