mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
Litellm dev 12 24 2024 p2 (#7400)
* fix(utils.py): default custom_llm_provider=None for 'supports_response_schema' Closes https://github.com/BerriAI/litellm/issues/7397 * refactor(langfuse/): call langfuse logger inside customlogger compatible langfuse class, refactor langfuse logger to use verbose_logger.debug instead of print_verbose * refactor(litellm_pre_call_utils.py): move config based team callbacks inside dynamic team callback logic enables simpler unit testing for config-based team callbacks * fix(proxy/_types.py): handle teamcallbackmetadata - none values drop none values if present. if all none, use default dict to avoid downstream errors * test(test_proxy_utils.py): add unit test preventing future issues - asserts team_id in config state not popped off across calls Fixes https://github.com/BerriAI/litellm/issues/6787 * fix(langfuse_prompt_management.py): add success + failure logging event support * fix: fix linting error * test: fix test * test: fix test * test: override o1 prompt caching - openai currently not working * test: fix test
This commit is contained in:
parent
d790ba0897
commit
c95351e70f
12 changed files with 227 additions and 62 deletions
|
@ -1362,14 +1362,14 @@ class ProxyConfig:
|
|||
team_config[k] = get_secret(v)
|
||||
return team_config
|
||||
|
||||
async def load_team_config(self, team_id: str):
|
||||
def load_team_config(self, team_id: str):
|
||||
"""
|
||||
- for a given team id
|
||||
- return the relevant completion() call params
|
||||
"""
|
||||
|
||||
# load existing config
|
||||
config = self.config
|
||||
config = self.get_config_state()
|
||||
|
||||
## LITELLM MODULE SETTINGS (e.g. litellm.drop_params=True,..)
|
||||
litellm_settings = config.get("litellm_settings", {})
|
||||
|
@ -1459,6 +1459,14 @@ class ProxyConfig:
|
|||
def update_config_state(self, config: dict):
|
||||
self.config = config
|
||||
|
||||
def get_config_state(self):
|
||||
"""
|
||||
Returns a deep copy of the config,
|
||||
|
||||
Do this, to avoid mutating the config state outside of allowed methods
|
||||
"""
|
||||
return copy.deepcopy(self.config)
|
||||
|
||||
async def load_config( # noqa: PLR0915
|
||||
self, router: Optional[litellm.Router], config_file_path: str
|
||||
):
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue