LiteLLM Minor Fixes & Improvements (09/20/2024) (#5807)

* fix(vertex_llm_base.py): Handle api_base = ""

Fixes https://github.com/BerriAI/litellm/issues/5798

* fix(o1_transformation.py): handle stream_options not being supported

https://github.com/BerriAI/litellm/issues/5803

* docs(routing.md): fix docs

Closes https://github.com/BerriAI/litellm/issues/5808

* perf(internal_user_endpoints.py): reduce db calls for getting team_alias for a key

Use the list gotten earlier in `/user/info` endpoint

 Reduces ui keys tab load time to 800ms (prev. 28s+)

* feat(proxy_server.py): support CONFIG_FILE_PATH as env var

Closes https://github.com/BerriAI/litellm/issues/5744

* feat(get_llm_provider_logic.py): add `litellm_proxy/` as a known openai-compatible route

simplifies calling litellm proxy

Reduces confusion when calling models on litellm proxy from litellm sdk

* docs(litellm_proxy.md): cleanup docs

* fix(internal_user_endpoints.py): fix pydantic obj

* test(test_key_generate_prisma.py): fix test
This commit is contained in:
Krish Dholakia 2024-09-20 20:21:32 -07:00 committed by GitHub
parent 0c488cf4ca
commit d6ca7fed18
14 changed files with 204 additions and 84 deletions

View file

@ -15,6 +15,7 @@ import copy
import json
import re
import secrets
import time
import traceback
import uuid
from datetime import datetime, timedelta, timezone
@ -274,10 +275,23 @@ async def ui_get_available_role(
return _data_to_return
def get_team_from_list(
team_list: Optional[List[LiteLLM_TeamTable]], team_id: str
) -> Optional[LiteLLM_TeamTable]:
if team_list is None:
return None
for team in team_list:
if team.team_id == team_id:
return team
return None
@router.get(
"/user/info",
tags=["Internal User management"],
dependencies=[Depends(user_api_key_auth)],
response_model=UserInfoResponse,
)
@management_endpoint_wrapper
async def user_info(
@ -337,7 +351,7 @@ async def user_info(
## GET ALL TEAMS ##
team_list = []
team_id_list = []
# _DEPRECATED_ check if user in 'member' field
# get all teams user belongs to
teams_1 = await prisma_client.get_data(
user_id=user_id, table_name="team", query_type="find_all"
)
@ -414,13 +428,13 @@ async def user_info(
if (
key.token == litellm_master_key_hash
and general_settings.get("disable_master_key_return", False)
== True ## [IMPORTANT] used by hosted proxy-ui to prevent sharing master key on ui
is True ## [IMPORTANT] used by hosted proxy-ui to prevent sharing master key on ui
):
continue
try:
key = key.model_dump() # noqa
except:
except Exception:
# if using pydantic v1
key = key.dict()
if (
@ -428,29 +442,29 @@ async def user_info(
and key["team_id"] is not None
and key["team_id"] != "litellm-dashboard"
):
team_info = await prisma_client.get_data(
team_id=key["team_id"], table_name="team"
team_info = get_team_from_list(
team_list=teams_1, team_id=key["team_id"]
)
team_alias = getattr(team_info, "team_alias", None)
key["team_alias"] = team_alias
if team_info is not None:
team_alias = getattr(team_info, "team_alias", None)
key["team_alias"] = team_alias
else:
key["team_alias"] = None
else:
key["team_alias"] = "None"
returned_keys.append(key)
response_data = {
"user_id": user_id,
"user_info": user_info,
"keys": returned_keys,
"teams": team_list,
}
response_data = UserInfoResponse(
user_id=user_id, user_info=user_info, keys=returned_keys, teams=team_list
)
return response_data
except Exception as e:
verbose_proxy_logger.error(
verbose_proxy_logger.exception(
"litellm.proxy.proxy_server.user_info(): Exception occured - {}".format(
str(e)
)
)
verbose_proxy_logger.debug(traceback.format_exc())
if isinstance(e, HTTPException):
raise ProxyException(
message=getattr(e, "detail", f"Authentication Error({str(e)})"),