mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 11:43:54 +00:00
feat(proxy_server.py): new 'supported_openai_params' endpoint
get supported openai params for a given model
This commit is contained in:
parent
4f2ac5f270
commit
0016477d3b
2 changed files with 34 additions and 1 deletions
|
@ -260,7 +260,7 @@ class OpenAIConfig:
|
||||||
|
|
||||||
model_specific_params = []
|
model_specific_params = []
|
||||||
if (
|
if (
|
||||||
"gpt-3.5-turbo" in model or "gpt-4-turbo" in model or "gpt-4o" in model
|
model != "gpt-3.5-turbo-16k" and model != "gpt-4"
|
||||||
): # gpt-4 does not support 'response_format'
|
): # gpt-4 does not support 'response_format'
|
||||||
model_specific_params.append("response_format")
|
model_specific_params.append("response_format")
|
||||||
|
|
||||||
|
|
|
@ -4889,6 +4889,9 @@ async def moderations(
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
#### DEV UTILS ####
|
||||||
|
|
||||||
|
|
||||||
@router.post(
|
@router.post(
|
||||||
"/utils/token_counter",
|
"/utils/token_counter",
|
||||||
tags=["llm utils"],
|
tags=["llm utils"],
|
||||||
|
@ -4940,6 +4943,36 @@ async def token_counter(request: TokenCountRequest):
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@router.get(
|
||||||
|
"/utils/supported_openai_params",
|
||||||
|
tags=["llm utils"],
|
||||||
|
dependencies=[Depends(user_api_key_auth)],
|
||||||
|
)
|
||||||
|
async def supported_openai_params(model: str):
|
||||||
|
"""
|
||||||
|
Returns supported openai params for a given litellm model name
|
||||||
|
|
||||||
|
e.g. `gpt-4` vs `gpt-3.5-turbo`
|
||||||
|
|
||||||
|
Example curl:
|
||||||
|
```
|
||||||
|
curl -X GET --location 'http://localhost:4000/utils/supported_openai_params?model=gpt-3.5-turbo-16k' \
|
||||||
|
--header 'Authorization: Bearer sk-1234'
|
||||||
|
```
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
model, custom_llm_provider, _, _ = litellm.get_llm_provider(model=model)
|
||||||
|
return {
|
||||||
|
"supported_openai_params": litellm.get_supported_openai_params(
|
||||||
|
model=model, custom_llm_provider=custom_llm_provider
|
||||||
|
)
|
||||||
|
}
|
||||||
|
except Exception as e:
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=400, detail={"error": "Could not map model={}".format(model)}
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
#### KEY MANAGEMENT ####
|
#### KEY MANAGEMENT ####
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue