mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 11:43:54 +00:00
Litellm dev 12 19 2024 p2 (#7315)
All checks were successful
Read Version from pyproject.toml / read-version (push) Successful in 46s
All checks were successful
Read Version from pyproject.toml / read-version (push) Successful in 46s
* fix(proxy_server.py): only update k,v pair if v is not empty/null Fixes https://github.com/BerriAI/litellm/issues/6787 * test(test_router.py): cleanup duplicate calls * test: add new test stream options drop params test * test: update optional params / stream options test to test for vertex ai mistral route specifically Addresses https://github.com/BerriAI/litellm/issues/7309 * fix(proxy_server.py): fix linting errors * fix: fix linting errors
This commit is contained in:
parent
35076212ef
commit
4c7a3931b7
5 changed files with 106 additions and 34 deletions
|
@ -20,5 +20,9 @@ model_list:
|
||||||
api_version: "2024-05-01-preview"
|
api_version: "2024-05-01-preview"
|
||||||
|
|
||||||
litellm_settings:
|
litellm_settings:
|
||||||
success_callback: ["langsmith"]
|
default_team_settings:
|
||||||
num_retries: 0
|
- team_id: c91e32bb-0f2a-4aa1-86c4-307ca2e03ea3
|
||||||
|
success_callback: ["langfuse"]
|
||||||
|
failure_callback: ["langfuse"]
|
||||||
|
langfuse_public_key: my-fake-key
|
||||||
|
langfuse_secret: my-fake-secret
|
||||||
|
|
|
@ -1354,6 +1354,19 @@ class ProxyConfig:
|
||||||
config[key] = get_secret(value)
|
config[key] = get_secret(value)
|
||||||
return config
|
return config
|
||||||
|
|
||||||
|
def _get_team_config(self, team_id: str, all_teams_config: List[Dict]) -> Dict:
|
||||||
|
team_config: dict = {}
|
||||||
|
for team in all_teams_config:
|
||||||
|
if "team_id" not in team:
|
||||||
|
raise Exception(f"team_id missing from team: {team}")
|
||||||
|
if team_id == team["team_id"]:
|
||||||
|
team_config = team
|
||||||
|
break
|
||||||
|
for k, v in team_config.items():
|
||||||
|
if isinstance(v, str) and v.startswith("os.environ/"):
|
||||||
|
team_config[k] = get_secret(v)
|
||||||
|
return team_config
|
||||||
|
|
||||||
async def load_team_config(self, team_id: str):
|
async def load_team_config(self, team_id: str):
|
||||||
"""
|
"""
|
||||||
- for a given team id
|
- for a given team id
|
||||||
|
@ -1366,18 +1379,11 @@ class ProxyConfig:
|
||||||
## LITELLM MODULE SETTINGS (e.g. litellm.drop_params=True,..)
|
## LITELLM MODULE SETTINGS (e.g. litellm.drop_params=True,..)
|
||||||
litellm_settings = config.get("litellm_settings", {})
|
litellm_settings = config.get("litellm_settings", {})
|
||||||
all_teams_config = litellm_settings.get("default_team_settings", None)
|
all_teams_config = litellm_settings.get("default_team_settings", None)
|
||||||
team_config: dict = {}
|
|
||||||
if all_teams_config is None:
|
if all_teams_config is None:
|
||||||
return team_config
|
return {}
|
||||||
for team in all_teams_config:
|
team_config = self._get_team_config(
|
||||||
if "team_id" not in team:
|
team_id=team_id, all_teams_config=all_teams_config
|
||||||
raise Exception(f"team_id missing from team: {team}")
|
)
|
||||||
if team_id == team["team_id"]:
|
|
||||||
team_config = team
|
|
||||||
break
|
|
||||||
for k, v in team_config.items():
|
|
||||||
if isinstance(v, str) and v.startswith("os.environ/"):
|
|
||||||
team_config[k] = get_secret(v)
|
|
||||||
return team_config
|
return team_config
|
||||||
|
|
||||||
def _init_cache(
|
def _init_cache(
|
||||||
|
@ -1452,9 +1458,12 @@ class ProxyConfig:
|
||||||
|
|
||||||
config = self._check_for_os_environ_vars(config=config)
|
config = self._check_for_os_environ_vars(config=config)
|
||||||
|
|
||||||
self.config = config
|
self.update_config_state(config=config)
|
||||||
return config
|
return config
|
||||||
|
|
||||||
|
def update_config_state(self, config: dict):
|
||||||
|
self.config = config
|
||||||
|
|
||||||
async def load_config( # noqa: PLR0915
|
async def load_config( # noqa: PLR0915
|
||||||
self, router: Optional[litellm.Router], config_file_path: str
|
self, router: Optional[litellm.Router], config_file_path: str
|
||||||
):
|
):
|
||||||
|
@ -2272,6 +2281,24 @@ class ProxyConfig:
|
||||||
pass_through_endpoints=general_settings["pass_through_endpoints"]
|
pass_through_endpoints=general_settings["pass_through_endpoints"]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def _update_config_fields(
|
||||||
|
self,
|
||||||
|
current_config: dict,
|
||||||
|
param_name: str,
|
||||||
|
db_param_value: Any,
|
||||||
|
) -> dict:
|
||||||
|
if isinstance(current_config[param_name], dict):
|
||||||
|
# if dict exists (e.g. litellm_settings),
|
||||||
|
# go through each key and value,
|
||||||
|
# and update if new value is not None/empty dict
|
||||||
|
for key, value in db_param_value.items():
|
||||||
|
if value:
|
||||||
|
current_config[param_name][key] = value
|
||||||
|
else:
|
||||||
|
current_config[param_name] = db_param_value
|
||||||
|
|
||||||
|
return current_config
|
||||||
|
|
||||||
async def _update_config_from_db(
|
async def _update_config_from_db(
|
||||||
self,
|
self,
|
||||||
prisma_client: PrismaClient,
|
prisma_client: PrismaClient,
|
||||||
|
@ -2311,10 +2338,11 @@ class ProxyConfig:
|
||||||
if param_name is not None and param_value is not None:
|
if param_name is not None and param_value is not None:
|
||||||
# check if param_name is already in the config
|
# check if param_name is already in the config
|
||||||
if param_name in config:
|
if param_name in config:
|
||||||
if isinstance(config[param_name], dict):
|
config = self._update_config_fields(
|
||||||
config[param_name].update(param_value)
|
current_config=config,
|
||||||
else:
|
param_name=param_name,
|
||||||
config[param_name] = param_value
|
db_param_value=param_value,
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
# if it's not in the config - then add it
|
# if it's not in the config - then add it
|
||||||
config[param_name] = param_value
|
config[param_name] = param_value
|
||||||
|
|
|
@ -502,6 +502,20 @@ def test_dynamic_drop_additional_params(drop_params):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def test_dynamic_drop_additional_params_stream_options():
|
||||||
|
"""
|
||||||
|
Make a call to vertex ai, dropping 'stream_options' specifically
|
||||||
|
"""
|
||||||
|
optional_params = litellm.utils.get_optional_params(
|
||||||
|
model="mistral-large-2411@001",
|
||||||
|
custom_llm_provider="vertex_ai",
|
||||||
|
stream_options={"include_usage": True},
|
||||||
|
additional_drop_params=["stream_options"],
|
||||||
|
)
|
||||||
|
|
||||||
|
assert "stream_options" not in optional_params
|
||||||
|
|
||||||
|
|
||||||
def test_dynamic_drop_additional_params_e2e():
|
def test_dynamic_drop_additional_params_e2e():
|
||||||
with patch(
|
with patch(
|
||||||
"litellm.llms.custom_httpx.http_handler.HTTPHandler.post", new=MagicMock()
|
"litellm.llms.custom_httpx.http_handler.HTTPHandler.post", new=MagicMock()
|
||||||
|
|
|
@ -2220,22 +2220,6 @@ def test_router_cooldown_api_connection_error():
|
||||||
except litellm.APIConnectionError:
|
except litellm.APIConnectionError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
try:
|
|
||||||
router.completion(
|
|
||||||
model="gemini-1.5-pro",
|
|
||||||
messages=[{"role": "admin", "content": "Fail on this!"}],
|
|
||||||
)
|
|
||||||
except litellm.APIConnectionError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
try:
|
|
||||||
router.completion(
|
|
||||||
model="gemini-1.5-pro",
|
|
||||||
messages=[{"role": "admin", "content": "Fail on this!"}],
|
|
||||||
)
|
|
||||||
except litellm.APIConnectionError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def test_router_correctly_reraise_error():
|
def test_router_correctly_reraise_error():
|
||||||
"""
|
"""
|
||||||
|
|
|
@ -934,3 +934,45 @@ def test_get_team_models():
|
||||||
model_access_groups=model_access_groups,
|
model_access_groups=model_access_groups,
|
||||||
)
|
)
|
||||||
assert result == ["gpt-4o", "gpt-3.5-turbo", "gpt-4o-mini"]
|
assert result == ["gpt-4o", "gpt-3.5-turbo", "gpt-4o-mini"]
|
||||||
|
|
||||||
|
|
||||||
|
def test_update_config_fields():
|
||||||
|
from litellm.proxy.proxy_server import ProxyConfig
|
||||||
|
|
||||||
|
proxy_config = ProxyConfig()
|
||||||
|
|
||||||
|
args = {
|
||||||
|
"current_config": {
|
||||||
|
"litellm_settings": {
|
||||||
|
"default_team_settings": [
|
||||||
|
{
|
||||||
|
"team_id": "c91e32bb-0f2a-4aa1-86c4-307ca2e03ea3",
|
||||||
|
"success_callback": ["langfuse"],
|
||||||
|
"failure_callback": ["langfuse"],
|
||||||
|
"langfuse_public_key": "my-fake-key",
|
||||||
|
"langfuse_secret": "my-fake-secret",
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"param_name": "litellm_settings",
|
||||||
|
"db_param_value": {
|
||||||
|
"telemetry": False,
|
||||||
|
"drop_params": True,
|
||||||
|
"num_retries": 5,
|
||||||
|
"request_timeout": 600,
|
||||||
|
"success_callback": ["langfuse"],
|
||||||
|
"default_team_settings": [],
|
||||||
|
"context_window_fallbacks": [{"gpt-3.5-turbo": ["gpt-3.5-turbo-large"]}],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
updated_config = proxy_config._update_config_fields(**args)
|
||||||
|
|
||||||
|
all_team_config = updated_config["litellm_settings"]["default_team_settings"]
|
||||||
|
|
||||||
|
# check if team id config returned
|
||||||
|
team_config = proxy_config._get_team_config(
|
||||||
|
team_id="c91e32bb-0f2a-4aa1-86c4-307ca2e03ea3", all_teams_config=all_team_config
|
||||||
|
)
|
||||||
|
assert team_config["langfuse_public_key"] == "my-fake-key"
|
||||||
|
assert team_config["langfuse_secret"] == "my-fake-secret"
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue