forked from phoenix/litellm-mirror
LiteLLM Minor Fixes and Improvements (09/12/2024) (#5658)
* fix(factory.py): handle tool call content as list Fixes https://github.com/BerriAI/litellm/issues/5652 * fix(factory.py): enforce stronger typing * fix(router.py): return model alias in /v1/model/info and /v1/model_group/info * fix(user_api_key_auth.py): move noisy warning message to debug cleanup logs * fix(types.py): cleanup pydantic v2 deprecated param Fixes https://github.com/BerriAI/litellm/issues/5649 * docs(gemini.md): show how to pass inline data to gemini api Fixes https://github.com/BerriAI/litellm/issues/5674
This commit is contained in:
parent
795047c37f
commit
4657a40ef1
14 changed files with 324 additions and 41 deletions
|
@ -1432,3 +1432,72 @@ async def test_gemini_pass_through_endpoint():
|
|||
)
|
||||
|
||||
print(resp.body)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_proxy_model_group_alias_checks(prisma_client):
|
||||
"""
|
||||
Check if model group alias is returned on
|
||||
|
||||
`/v1/models`
|
||||
`/v1/model/info`
|
||||
`/v1/model_group/info`
|
||||
"""
|
||||
import json
|
||||
|
||||
from fastapi import HTTPException, Request, Response
|
||||
from starlette.datastructures import URL
|
||||
|
||||
from litellm.proxy.proxy_server import model_group_info, model_info_v1, model_list
|
||||
|
||||
setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client)
|
||||
setattr(litellm.proxy.proxy_server, "master_key", "sk-1234")
|
||||
await litellm.proxy.proxy_server.prisma_client.connect()
|
||||
|
||||
proxy_config = getattr(litellm.proxy.proxy_server, "proxy_config")
|
||||
|
||||
_model_list = [
|
||||
{
|
||||
"model_name": "gpt-3.5-turbo",
|
||||
"litellm_params": {"model": "gpt-3.5-turbo"},
|
||||
}
|
||||
]
|
||||
model_alias = "gpt-4"
|
||||
router = litellm.Router(
|
||||
model_list=_model_list,
|
||||
model_group_alias={model_alias: "gpt-3.5-turbo"},
|
||||
)
|
||||
setattr(litellm.proxy.proxy_server, "llm_router", router)
|
||||
setattr(litellm.proxy.proxy_server, "llm_model_list", _model_list)
|
||||
|
||||
request = Request(scope={"type": "http", "method": "POST", "headers": {}})
|
||||
request._url = URL(url="/v1/models")
|
||||
|
||||
resp = await model_list(
|
||||
user_api_key_dict=UserAPIKeyAuth(models=[]),
|
||||
)
|
||||
|
||||
assert len(resp) == 2
|
||||
print(resp)
|
||||
|
||||
resp = await model_info_v1(
|
||||
user_api_key_dict=UserAPIKeyAuth(models=[]),
|
||||
)
|
||||
models = resp["data"]
|
||||
is_model_alias_in_list = False
|
||||
for item in models:
|
||||
if model_alias == item["model_name"]:
|
||||
is_model_alias_in_list = True
|
||||
|
||||
assert is_model_alias_in_list
|
||||
|
||||
resp = await model_group_info(
|
||||
user_api_key_dict=UserAPIKeyAuth(models=[]),
|
||||
)
|
||||
models = resp["data"]
|
||||
is_model_alias_in_list = False
|
||||
for item in models:
|
||||
if model_alias == item.model_group:
|
||||
is_model_alias_in_list = True
|
||||
|
||||
assert is_model_alias_in_list
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue