(feat) proxy use default model in config

This commit is contained in:
ishaan-jaff 2023-11-09 13:08:23 -08:00
parent c4a948ff96
commit 4b4d9bf142

View file

@ -426,12 +426,20 @@ def litellm_completion(*args, **kwargs):
if user_api_base: if user_api_base:
kwargs["api_base"] = user_api_base kwargs["api_base"] = user_api_base
## CHECK CONFIG ## ## CHECK CONFIG ##
if llm_model_list and kwargs["model"] in [m["model_name"] for m in llm_model_list]: if llm_model_list != None:
llm_models = [m["model_name"] for m in llm_model_list]
if kwargs["model"] in llm_models:
for m in llm_model_list: for m in llm_model_list:
if kwargs["model"] == m["model_name"]: # if user has specified a config, this will use the config if kwargs["model"] == m["model_name"]: # if user has specified a config, this will use the config
for key, value in m["litellm_params"].items(): for key, value in m["litellm_params"].items():
kwargs[key] = value kwargs[key] = value
break break
else:
print_verbose("user sent model not in config, using default config model")
default_model = llm_model_list[0]
litellm_params = default_model.get('litellm_params', None)
for key, value in litellm_params.items():
kwargs[key] = value
if call_type == "chat_completion": if call_type == "chat_completion":
response = litellm.completion(*args, **kwargs) response = litellm.completion(*args, **kwargs)
elif call_type == "text_completion": elif call_type == "text_completion":