adding hosted get model list

This commit is contained in:
Krrish Dholakia 2023-08-22 07:11:49 -07:00
parent 7a22dc7f84
commit 4b34eb1414
6 changed files with 31 additions and 4 deletions

View file

@ -41,7 +41,7 @@ callback_list: Optional[List[str]] = []
user_logger_fn = None
additional_details: Optional[Dict[str, str]] = {}
local_cache: Optional[Dict[str, str]] = {}
last_fetched_at = None
######## Model Response #########################
# All liteLLM Model responses will be in this format, Follows the OpenAI Format
# https://docs.litellm.ai/docs/completion/output
@ -1007,6 +1007,24 @@ def handle_success(args, kwargs, result, start_time, end_time):
)
pass
def get_model_list():
global last_fetched_at
# if user is using hosted product -> get their updated model list - refresh every 5 minutes
user_email = (os.getenv("LITELLM_EMAIL") or litellm.email)
if user_email:
time_delta = 0
if last_fetched_at != None:
current_time = time.time()
time_delta = current_time - last_fetched_at
if time_delta > 300 or last_fetched_at == None:
# make the api call
last_fetched_at = time.time()
response = requests.get(url="http://api.litellm.ai/get_model_list", headers={"content-type": "application/json"}, data=json.dumps({"user_email": user_email}))
print_verbose(f"get_model_list response: {response.text}")
model_list = response.json()["model_list"]
return model_list
# return litellm model list by default
return litellm.model_list
def acreate(*args, **kwargs): ## Thin client to handle the acreate langchain call
return litellm.acompletion(*args, **kwargs)