cache the model list response

This commit is contained in:
Krrish Dholakia 2023-08-22 07:21:24 -07:00
parent fd2eeaa6bc
commit c4a4db2b34
4 changed files with 7 additions and 3 deletions

View file

@ -3,6 +3,9 @@ import os, sys, traceback
sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
import litellm
from litellm import get_model_list
print(get_model_list())
print(get_model_list())
# print(litellm.model_list)

View file

@ -1019,10 +1019,11 @@ def get_model_list():
if time_delta > 300 or last_fetched_at == None:
# make the api call
last_fetched_at = time.time()
print(f"last_fetched_at: {last_fetched_at}")
response = requests.get(url="http://api.litellm.ai/get_model_list", headers={"content-type": "application/json"}, data=json.dumps({"user_email": user_email}))
print_verbose(f"get_model_list response: {response.text}")
model_list = response.json()["model_list"]
return model_list
litellm.model_list = model_list # update the user's current litellm model list
# return litellm model list by default
return litellm.model_list

View file

@ -1,6 +1,6 @@
[tool.poetry]
name = "litellm"
version = "0.1.449"
version = "0.1.450"
description = "Library to easily interface with LLM API providers"
authors = ["BerriAI"]
license = "MIT License"