forked from phoenix/litellm-mirror
(feat) proxy_server display model list when user does not specify model
This commit is contained in:
parent
e987d31028
commit
051b21b61f
1 changed files with 18 additions and 5 deletions
|
@ -112,11 +112,18 @@ def data_generator(response):
|
||||||
|
|
||||||
#### API ENDPOINTS ####
|
#### API ENDPOINTS ####
|
||||||
@router.get("/models") # if project requires model list
|
@router.get("/models") # if project requires model list
|
||||||
def model_list():
|
def model_list():
|
||||||
return dict(
|
if user_model != None:
|
||||||
data=[{"id": user_model, "object": "model", "created": 1677610602, "owned_by": "openai"}],
|
return dict(
|
||||||
object="list",
|
data=[{"id": user_model, "object": "model", "created": 1677610602, "owned_by": "openai"}],
|
||||||
)
|
object="list",
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
all_models = litellm.model_list
|
||||||
|
return dict(
|
||||||
|
data = [{"id": model, "object": "model", "created": 1677610602, "owned_by": "openai"} for model in all_models],
|
||||||
|
object="list",
|
||||||
|
)
|
||||||
|
|
||||||
@router.post("/completions")
|
@router.post("/completions")
|
||||||
async def completion(request: Request):
|
async def completion(request: Request):
|
||||||
|
@ -126,6 +133,12 @@ async def completion(request: Request):
|
||||||
data["model"] = user_model
|
data["model"] = user_model
|
||||||
if user_api_base:
|
if user_api_base:
|
||||||
data["api_base"] = user_api_base
|
data["api_base"] = user_api_base
|
||||||
|
# override with user settings
|
||||||
|
if user_temperature:
|
||||||
|
data["temperature"] = user_temperature
|
||||||
|
if user_max_tokens:
|
||||||
|
data["max_tokens"] = user_max_tokens
|
||||||
|
|
||||||
## check for custom prompt template ##
|
## check for custom prompt template ##
|
||||||
litellm.register_prompt_template(
|
litellm.register_prompt_template(
|
||||||
model=user_model,
|
model=user_model,
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue