adding hosted get model list

This commit is contained in:
Krrish Dholakia 2023-08-22 07:11:49 -07:00
parent 909383bfb3
commit fd2eeaa6bc
6 changed files with 31 additions and 4 deletions

View file

@ -4,7 +4,7 @@ input_callback: List[str] = []
success_callback: List[str] = [] success_callback: List[str] = []
failure_callback: List[str] = [] failure_callback: List[str] = []
set_verbose = False set_verbose = False
debugger_email = None # for debugging dashboard. Learn more - https://docs.litellm.ai/docs/debugging/hosted_debugging email = None # for hosted dashboard. Learn more - https://docs.litellm.ai/docs/debugging/hosted_debugging
telemetry = True telemetry = True
max_tokens = 256 # OpenAI Defaults max_tokens = 256 # OpenAI Defaults
retry = True retry = True
@ -263,7 +263,8 @@ from .utils import (
completion_cost, completion_cost,
get_litellm_params, get_litellm_params,
Logging, Logging,
acreate acreate,
get_model_list
) )
from .main import * # type: ignore from .main import * # type: ignore
from .integrations import * from .integrations import *

View file

@ -0,0 +1,8 @@
import os, sys, traceback
sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
from litellm import get_model_list
print(get_model_list())

View file

@ -41,7 +41,7 @@ callback_list: Optional[List[str]] = []
user_logger_fn = None user_logger_fn = None
additional_details: Optional[Dict[str, str]] = {} additional_details: Optional[Dict[str, str]] = {}
local_cache: Optional[Dict[str, str]] = {} local_cache: Optional[Dict[str, str]] = {}
last_fetched_at = None
######## Model Response ######################### ######## Model Response #########################
# All liteLLM Model responses will be in this format, Follows the OpenAI Format # All liteLLM Model responses will be in this format, Follows the OpenAI Format
# https://docs.litellm.ai/docs/completion/output # https://docs.litellm.ai/docs/completion/output
@ -1007,6 +1007,24 @@ def handle_success(args, kwargs, result, start_time, end_time):
) )
pass pass
def get_model_list():
global last_fetched_at
# if user is using hosted product -> get their updated model list - refresh every 5 minutes
user_email = (os.getenv("LITELLM_EMAIL") or litellm.email)
if user_email:
time_delta = 0
if last_fetched_at != None:
current_time = time.time()
time_delta = current_time - last_fetched_at
if time_delta > 300 or last_fetched_at == None:
# make the api call
last_fetched_at = time.time()
response = requests.get(url="http://api.litellm.ai/get_model_list", headers={"content-type": "application/json"}, data=json.dumps({"user_email": user_email}))
print_verbose(f"get_model_list response: {response.text}")
model_list = response.json()["model_list"]
return model_list
# return litellm model list by default
return litellm.model_list
def acreate(*args, **kwargs): ## Thin client to handle the acreate langchain call def acreate(*args, **kwargs): ## Thin client to handle the acreate langchain call
return litellm.acompletion(*args, **kwargs) return litellm.acompletion(*args, **kwargs)

View file

@ -1,6 +1,6 @@
[tool.poetry] [tool.poetry]
name = "litellm" name = "litellm"
version = "0.1.448" version = "0.1.449"
description = "Library to easily interface with LLM API providers" description = "Library to easily interface with LLM API providers"
authors = ["BerriAI"] authors = ["BerriAI"]
license = "MIT License" license = "MIT License"