forked from phoenix/litellm-mirror
LiteLLM Minor Fixes and Improvements (08/06/2024) (#5567)
* fix(utils.py): return citations for perplexity streaming Fixes https://github.com/BerriAI/litellm/issues/5535 * fix(anthropic/chat.py): support fallbacks for anthropic streaming (#5542) * fix(anthropic/chat.py): support fallbacks for anthropic streaming Fixes https://github.com/BerriAI/litellm/issues/5512 * fix(anthropic/chat.py): use module level http client if none given (prevents early client closure) * fix: fix linting errors * fix(http_handler.py): fix raise_for_status error handling * test: retry flaky test * fix otel type * fix(bedrock/embed): fix error raising * test(test_openai_batches_and_files.py): skip azure batches test (for now) quota exceeded * fix(test_router.py): skip azure batch route test (for now) - hit batch quota limits --------- Co-authored-by: Ishaan Jaff <ishaanjaffer0324@gmail.com> * All `model_group_alias` should show up in `/models`, `/model/info` , `/model_group/info` (#5539) * fix(router.py): support returning model_alias model names in `/v1/models` * fix(proxy_server.py): support returning model alias'es on `/model/info` * feat(router.py): support returning model group alias for `/model_group/info` * fix(proxy_server.py): fix linting errors * fix(proxy_server.py): fix linting errors * build(model_prices_and_context_window.json): add amazon titan text premier pricing information Closes https://github.com/BerriAI/litellm/issues/5560 * feat(litellm_logging.py): log standard logging response object for pass through endpoints. Allows bedrock /invoke agent calls to be correctly logged to langfuse + s3 * fix(success_handler.py): fix linting error * fix(success_handler.py): fix linting errors * fix(team_endpoints.py): Allows admin to update team member budgets --------- Co-authored-by: Ishaan Jaff <ishaanjaffer0324@gmail.com>
This commit is contained in:
parent
e4dcd6f745
commit
72e961af3c
25 changed files with 509 additions and 99 deletions
|
@ -3005,13 +3005,13 @@ def model_list(
|
|||
|
||||
This is just for compatibility with openai projects like aider.
|
||||
"""
|
||||
global llm_model_list, general_settings
|
||||
global llm_model_list, general_settings, llm_router
|
||||
all_models = []
|
||||
## CHECK IF MODEL RESTRICTIONS ARE SET AT KEY/TEAM LEVEL ##
|
||||
if llm_model_list is None:
|
||||
if llm_router is None:
|
||||
proxy_model_list = []
|
||||
else:
|
||||
proxy_model_list = [m["model_name"] for m in llm_model_list]
|
||||
proxy_model_list = llm_router.get_model_names()
|
||||
key_models = get_key_models(
|
||||
user_api_key_dict=user_api_key_dict, proxy_model_list=proxy_model_list
|
||||
)
|
||||
|
@ -7503,10 +7503,11 @@ async def model_info_v1(
|
|||
|
||||
all_models: List[dict] = []
|
||||
## CHECK IF MODEL RESTRICTIONS ARE SET AT KEY/TEAM LEVEL ##
|
||||
if llm_model_list is None:
|
||||
if llm_router is None:
|
||||
proxy_model_list = []
|
||||
else:
|
||||
proxy_model_list = [m["model_name"] for m in llm_model_list]
|
||||
proxy_model_list = llm_router.get_model_names()
|
||||
|
||||
key_models = get_key_models(
|
||||
user_api_key_dict=user_api_key_dict, proxy_model_list=proxy_model_list
|
||||
)
|
||||
|
@ -7523,8 +7524,14 @@ async def model_info_v1(
|
|||
|
||||
if len(all_models_str) > 0:
|
||||
model_names = all_models_str
|
||||
_relevant_models = [m for m in llm_model_list if m["model_name"] in model_names]
|
||||
all_models = copy.deepcopy(_relevant_models)
|
||||
llm_model_list = llm_router.get_model_list()
|
||||
if llm_model_list is not None:
|
||||
_relevant_models = [
|
||||
m for m in llm_model_list if m["model_name"] in model_names
|
||||
]
|
||||
all_models = copy.deepcopy(_relevant_models) # type: ignore
|
||||
else:
|
||||
all_models = []
|
||||
|
||||
for model in all_models:
|
||||
# provided model_info in config.yaml
|
||||
|
@ -7590,12 +7597,12 @@ async def model_group_info(
|
|||
raise HTTPException(
|
||||
status_code=500, detail={"error": "LLM Router is not loaded in"}
|
||||
)
|
||||
all_models: List[dict] = []
|
||||
## CHECK IF MODEL RESTRICTIONS ARE SET AT KEY/TEAM LEVEL ##
|
||||
if llm_model_list is None:
|
||||
if llm_router is None:
|
||||
proxy_model_list = []
|
||||
else:
|
||||
proxy_model_list = [m["model_name"] for m in llm_model_list]
|
||||
proxy_model_list = llm_router.get_model_names()
|
||||
|
||||
key_models = get_key_models(
|
||||
user_api_key_dict=user_api_key_dict, proxy_model_list=proxy_model_list
|
||||
)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue