From 051b21b61f1286cd3c3a59761d150739f974446b Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Sat, 7 Oct 2023 17:19:02 -0700 Subject: [PATCH] (feat) proxy_server display model list when user does not specify model --- litellm/proxy/proxy_server.py | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index 36b96c754..c2e7334af 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -112,11 +112,18 @@ def data_generator(response): #### API ENDPOINTS #### @router.get("/models") # if project requires model list -def model_list(): - return dict( - data=[{"id": user_model, "object": "model", "created": 1677610602, "owned_by": "openai"}], - object="list", - ) +def model_list(): + if user_model != None: + return dict( + data=[{"id": user_model, "object": "model", "created": 1677610602, "owned_by": "openai"}], + object="list", + ) + else: + all_models = litellm.model_list + return dict( + data = [{"id": model, "object": "model", "created": 1677610602, "owned_by": "openai"} for model in all_models], + object="list", + ) @router.post("/completions") async def completion(request: Request): @@ -126,6 +133,12 @@ async def completion(request: Request): data["model"] = user_model if user_api_base: data["api_base"] = user_api_base + # override with user settings + if user_temperature: + data["temperature"] = user_temperature + if user_max_tokens: + data["max_tokens"] = user_max_tokens + ## check for custom prompt template ## litellm.register_prompt_template( model=user_model,