fix(utils.py): allow url to be passed to model cost map

This commit is contained in:
Krrish Dholakia 2023-10-24 15:38:05 -07:00
parent 62b053e089
commit 558d2582b5
3 changed files with 27 additions and 12 deletions

View file

@ -9,10 +9,10 @@ failure_callback: List[Union[str, Callable]] = []
set_verbose = False set_verbose = False
email: Optional[ email: Optional[
str str
] = None # for hosted dashboard. Learn more - https://docs.litellm.ai/docs/debugging/hosted_debugging ] = None # Not used anymore, will be removed in next MAJOR release - https://github.com/BerriAI/litellm/discussions/648
token: Optional[ token: Optional[
str str
] = None # for hosted dashboard. Learn more - https://docs.litellm.ai/docs/debugging/hosted_debugging ] = None # Not used anymore, will be removed in next MAJOR release - https://github.com/BerriAI/litellm/discussions/648
telemetry = True telemetry = True
max_tokens = 256 # OpenAI Defaults max_tokens = 256 # OpenAI Defaults
drop_params = False drop_params = False
@ -34,9 +34,9 @@ aleph_alpha_key: Optional[str] = None
nlp_cloud_key: Optional[str] = None nlp_cloud_key: Optional[str] = None
use_client: bool = False use_client: bool = False
logging: bool = True logging: bool = True
caching: bool = False # deprecated son caching: bool = False # Not used anymore, will be removed in next MAJOR release - https://github.com/BerriAI/litellm/discussions/648
caching_with_models: bool = False # if you want the caching key to be model + prompt # deprecated soon caching_with_models: bool = False # # Not used anymore, will be removed in next MAJOR release - https://github.com/BerriAI/litellm/discussions/648
cache: Optional[Cache] = None # cache object cache: Optional[Cache] = None # cache object <- use this - https://docs.litellm.ai/docs/caching
model_alias_map: Dict[str, str] = {} model_alias_map: Dict[str, str] = {}
max_budget: float = 0.0 # set the max budget across all providers max_budget: float = 0.0 # set the max budget across all providers
_current_cost = 0 # private variable, used if max budget is set _current_cost = 0 # private variable, used if max budget is set
@ -44,11 +44,10 @@ error_logs: Dict = {}
add_function_to_prompt: bool = False # if function calling not supported by api, append function call details to system prompt add_function_to_prompt: bool = False # if function calling not supported by api, append function call details to system prompt
client_session: Optional[requests.Session] = None client_session: Optional[requests.Session] = None
model_fallbacks: Optional[List] = None model_fallbacks: Optional[List] = None
model_cost_map_url: str = "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json"
############################################# #############################################
def get_model_cost_map(url: Optional[str]=None): def get_model_cost_map(url: Optional[str]=None):
if url is None:
url = "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json"
try: try:
response = requests.get(url) response = requests.get(url)
response.raise_for_status() # Raise an exception if request is unsuccessful response.raise_for_status() # Raise an exception if request is unsuccessful
@ -60,7 +59,7 @@ def get_model_cost_map(url: Optional[str]=None):
with importlib.resources.open_text("litellm", "model_prices_and_context_window_backup.json") as f: with importlib.resources.open_text("litellm", "model_prices_and_context_window_backup.json") as f:
content = json.load(f) content = json.load(f)
return content return content
model_cost = get_model_cost_map() model_cost = get_model_cost_map(url=model_cost_map_url)
custom_prompt_dict:Dict[str, dict] = {} custom_prompt_dict:Dict[str, dict] = {}
####### THREAD-SPECIFIC DATA ################### ####### THREAD-SPECIFIC DATA ###################
class MyLocal(threading.local): class MyLocal(threading.local):

View file

@ -24,4 +24,13 @@ def test_update_model_cost():
except Exception as e: except Exception as e:
pytest.fail(f"An error occurred: {e}") pytest.fail(f"An error occurred: {e}")
test_update_model_cost() # test_update_model_cost()
def test_update_model_cost_map_url():
try:
litellm.register_model(model_cost="https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json")
assert litellm.model_cost["gpt-4"]["input_cost_per_token"] == 0.00003
except Exception as e:
pytest.fail(f"An error occurred: {e}")
test_update_model_cost_map_url()

View file

@ -1049,9 +1049,10 @@ def completion_cost(
return 0.0 # this should not block a users execution path return 0.0 # this should not block a users execution path
####### HELPER FUNCTIONS ################ ####### HELPER FUNCTIONS ################
def register_model(model_cost: dict): def register_model(model_cost: Union[str, dict]):
""" """
Register new / Override existing models (and their pricing) to specific providers. Register new / Override existing models (and their pricing) to specific providers.
Provide EITHER a model cost dictionary or a url to a hosted json blob
Example usage: Example usage:
model_cost_dict = { model_cost_dict = {
"gpt-4": { "gpt-4": {
@ -1063,9 +1064,15 @@ def register_model(model_cost: dict):
}, },
} }
""" """
for key, value in model_cost.items(): loaded_model_cost = {}
if isinstance(model_cost, dict):
loaded_model_cost = model_cost
elif isinstance(model_cost, str):
loaded_model_cost = litellm.get_model_cost_map(url=model_cost)
for key, value in loaded_model_cost.items():
## override / add new keys to the existing model cost dictionary ## override / add new keys to the existing model cost dictionary
litellm.model_cost[key] = model_cost[key] litellm.model_cost[key] = loaded_model_cost[key]
# add new model names to provider lists # add new model names to provider lists
if value.get('litellm_provider') == 'openai': if value.get('litellm_provider') == 'openai':