feat(proxy_server.py): new 'supported_openai_params' endpoint

get supported openai params for a given model
This commit is contained in:
Krrish Dholakia 2024-05-20 08:39:50 -07:00
parent 4f2ac5f270
commit 0016477d3b
2 changed files with 34 additions and 1 deletions

View file

@ -260,7 +260,7 @@ class OpenAIConfig:
model_specific_params = []
if (
"gpt-3.5-turbo" in model or "gpt-4-turbo" in model or "gpt-4o" in model
model != "gpt-3.5-turbo-16k" and model != "gpt-4"
): # gpt-4 does not support 'response_format'
model_specific_params.append("response_format")

View file

@ -4889,6 +4889,9 @@ async def moderations(
)
#### DEV UTILS ####
@router.post(
"/utils/token_counter",
tags=["llm utils"],
@ -4940,6 +4943,36 @@ async def token_counter(request: TokenCountRequest):
)
@router.get(
"/utils/supported_openai_params",
tags=["llm utils"],
dependencies=[Depends(user_api_key_auth)],
)
async def supported_openai_params(model: str):
"""
Returns supported openai params for a given litellm model name
e.g. `gpt-4` vs `gpt-3.5-turbo`
Example curl:
```
curl -X GET --location 'http://localhost:4000/utils/supported_openai_params?model=gpt-3.5-turbo-16k' \
--header 'Authorization: Bearer sk-1234'
```
"""
try:
model, custom_llm_provider, _, _ = litellm.get_llm_provider(model=model)
return {
"supported_openai_params": litellm.get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
}
except Exception as e:
raise HTTPException(
status_code=400, detail={"error": "Could not map model={}".format(model)}
)
#### KEY MANAGEMENT ####