LiteLLM Minor Fixes & Improvements (10/16/2024) (#6265)

* fix(caching_handler.py): handle positional arguments in add cache logic

Fixes https://github.com/BerriAI/litellm/issues/6264

* feat(litellm_pre_call_utils.py): allow forwarding openai org id to backend client

https://github.com/BerriAI/litellm/issues/6237

* docs(configs.md): add 'forward_openai_org_id' to docs

* fix(proxy_server.py): return model info if user_model is set

Fixes https://github.com/BerriAI/litellm/issues/6233

* fix(hosted_vllm/chat/transformation.py): don't set tools unless non-none

* fix(openai.py): improve debug log for openai 'str' error

Addresses https://github.com/BerriAI/litellm/issues/6272

* fix(proxy_server.py): fix linting error

* fix(proxy_server.py): fix linting errors

* test: skip WIP test

* docs(openai.md): add docs on passing openai org id from client to openai
This commit is contained in:
Krish Dholakia 2024-10-16 22:16:23 -07:00 committed by GitHub
parent 43878bd2a0
commit 38a9a106d2
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
14 changed files with 371 additions and 47 deletions

View file

@ -19,6 +19,7 @@ from typing import (
List,
Optional,
Tuple,
cast,
get_args,
get_origin,
get_type_hints,
@ -7313,18 +7314,40 @@ async def model_info_v1(
```
"""
global llm_model_list, general_settings, user_config_file_path, proxy_config, llm_router
global llm_model_list, general_settings, user_config_file_path, proxy_config, llm_router, user_model
if user_model is not None:
# user is trying to get specific model from litellm router
try:
model_info: Dict = cast(Dict, litellm.get_model_info(model=user_model))
except Exception:
model_info = {}
_deployment_info = Deployment(
model_name="*",
litellm_params=LiteLLM_Params(
model=user_model,
),
model_info=model_info,
)
_deployment_info_dict = _deployment_info.model_dump()
_deployment_info_dict = remove_sensitive_info_from_deployment(
deployment_dict=_deployment_info_dict
)
return {"data": _deployment_info_dict}
if llm_model_list is None:
raise HTTPException(
status_code=500, detail={"error": "LLM Model List not loaded in"}
status_code=500,
detail={
"error": "LLM Model List not loaded in. Make sure you passed models in your config.yaml or on the LiteLLM Admin UI. - https://docs.litellm.ai/docs/proxy/configs"
},
)
if llm_router is None:
raise HTTPException(
status_code=500,
detail={
"error": "LLM Router is not loaded in. Make sure you passed models in your config.yaml or on the LiteLLM Admin UI."
"error": "LLM Router is not loaded in. Make sure you passed models in your config.yaml or on the LiteLLM Admin UI. - https://docs.litellm.ai/docs/proxy/configs"
},
)