diff --git a/litellm/__init__.py b/litellm/__init__.py index 2e9a77496..a9fd32634 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -4,7 +4,7 @@ input_callback: List[str] = [] success_callback: List[str] = [] failure_callback: List[str] = [] set_verbose = False -debugger_email = None # for debugging dashboard. Learn more - https://docs.litellm.ai/docs/debugging/hosted_debugging +email = None # for hosted dashboard. Learn more - https://docs.litellm.ai/docs/debugging/hosted_debugging telemetry = True max_tokens = 256 # OpenAI Defaults retry = True @@ -263,7 +263,8 @@ from .utils import ( completion_cost, get_litellm_params, Logging, - acreate + acreate, + get_model_list ) from .main import * # type: ignore from .integrations import * diff --git a/litellm/__pycache__/__init__.cpython-311.pyc b/litellm/__pycache__/__init__.cpython-311.pyc index a833b03a9..e1fe4f2c9 100644 Binary files a/litellm/__pycache__/__init__.cpython-311.pyc and b/litellm/__pycache__/__init__.cpython-311.pyc differ diff --git a/litellm/__pycache__/utils.cpython-311.pyc b/litellm/__pycache__/utils.cpython-311.pyc index a8c9457cb..89d8eddd5 100644 Binary files a/litellm/__pycache__/utils.cpython-311.pyc and b/litellm/__pycache__/utils.cpython-311.pyc differ diff --git a/litellm/tests/test_get_model_list.py b/litellm/tests/test_get_model_list.py new file mode 100644 index 000000000..570a0e207 --- /dev/null +++ b/litellm/tests/test_get_model_list.py @@ -0,0 +1,8 @@ +import os, sys, traceback + +sys.path.insert( + 0, os.path.abspath("../..") +) # Adds the parent directory to the system path +from litellm import get_model_list + +print(get_model_list()) \ No newline at end of file diff --git a/litellm/utils.py b/litellm/utils.py index 4c4ec2aba..ad6af63c4 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -41,7 +41,7 @@ callback_list: Optional[List[str]] = [] user_logger_fn = None additional_details: Optional[Dict[str, str]] = {} local_cache: Optional[Dict[str, str]] = {} - +last_fetched_at = None ######## Model Response ######################### # All liteLLM Model responses will be in this format, Follows the OpenAI Format # https://docs.litellm.ai/docs/completion/output @@ -1007,6 +1007,24 @@ def handle_success(args, kwargs, result, start_time, end_time): ) pass +def get_model_list(): + global last_fetched_at + # if user is using hosted product -> get their updated model list - refresh every 5 minutes + user_email = (os.getenv("LITELLM_EMAIL") or litellm.email) + if user_email: + time_delta = 0 + if last_fetched_at != None: + current_time = time.time() + time_delta = current_time - last_fetched_at + if time_delta > 300 or last_fetched_at == None: + # make the api call + last_fetched_at = time.time() + response = requests.get(url="http://api.litellm.ai/get_model_list", headers={"content-type": "application/json"}, data=json.dumps({"user_email": user_email})) + print_verbose(f"get_model_list response: {response.text}") + model_list = response.json()["model_list"] + return model_list + # return litellm model list by default + return litellm.model_list def acreate(*args, **kwargs): ## Thin client to handle the acreate langchain call return litellm.acompletion(*args, **kwargs) diff --git a/pyproject.toml b/pyproject.toml index 857b07b2f..43d85c960 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "litellm" -version = "0.1.448" +version = "0.1.449" description = "Library to easily interface with LLM API providers" authors = ["BerriAI"] license = "MIT License"