fix(utils.py): allow url to be passed to model cost map

This commit is contained in:
Krrish Dholakia 2023-10-24 15:38:05 -07:00
parent cb100d19da
commit 5c8a4f51d5
3 changed files with 27 additions and 12 deletions

View file

@ -9,10 +9,10 @@ failure_callback: List[Union[str, Callable]] = []
set_verbose = False
email: Optional[
str
] = None # for hosted dashboard. Learn more - https://docs.litellm.ai/docs/debugging/hosted_debugging
] = None # Not used anymore, will be removed in next MAJOR release - https://github.com/BerriAI/litellm/discussions/648
token: Optional[
str
] = None # for hosted dashboard. Learn more - https://docs.litellm.ai/docs/debugging/hosted_debugging
] = None # Not used anymore, will be removed in next MAJOR release - https://github.com/BerriAI/litellm/discussions/648
telemetry = True
max_tokens = 256 # OpenAI Defaults
drop_params = False
@ -34,9 +34,9 @@ aleph_alpha_key: Optional[str] = None
nlp_cloud_key: Optional[str] = None
use_client: bool = False
logging: bool = True
caching: bool = False # deprecated son
caching_with_models: bool = False # if you want the caching key to be model + prompt # deprecated soon
cache: Optional[Cache] = None # cache object
caching: bool = False # Not used anymore, will be removed in next MAJOR release - https://github.com/BerriAI/litellm/discussions/648
caching_with_models: bool = False # # Not used anymore, will be removed in next MAJOR release - https://github.com/BerriAI/litellm/discussions/648
cache: Optional[Cache] = None # cache object <- use this - https://docs.litellm.ai/docs/caching
model_alias_map: Dict[str, str] = {}
max_budget: float = 0.0 # set the max budget across all providers
_current_cost = 0 # private variable, used if max budget is set
@ -44,11 +44,10 @@ error_logs: Dict = {}
add_function_to_prompt: bool = False # if function calling not supported by api, append function call details to system prompt
client_session: Optional[requests.Session] = None
model_fallbacks: Optional[List] = None
model_cost_map_url: str = "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json"
#############################################
def get_model_cost_map(url: Optional[str]=None):
if url is None:
url = "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json"
try:
response = requests.get(url)
response.raise_for_status() # Raise an exception if request is unsuccessful
@ -60,7 +59,7 @@ def get_model_cost_map(url: Optional[str]=None):
with importlib.resources.open_text("litellm", "model_prices_and_context_window_backup.json") as f:
content = json.load(f)
return content
model_cost = get_model_cost_map()
model_cost = get_model_cost_map(url=model_cost_map_url)
custom_prompt_dict:Dict[str, dict] = {}
####### THREAD-SPECIFIC DATA ###################
class MyLocal(threading.local):

View file

@ -24,4 +24,13 @@ def test_update_model_cost():
except Exception as e:
pytest.fail(f"An error occurred: {e}")
test_update_model_cost()
# test_update_model_cost()
def test_update_model_cost_map_url():
try:
litellm.register_model(model_cost="https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json")
assert litellm.model_cost["gpt-4"]["input_cost_per_token"] == 0.00003
except Exception as e:
pytest.fail(f"An error occurred: {e}")
test_update_model_cost_map_url()

View file

@ -1049,9 +1049,10 @@ def completion_cost(
return 0.0 # this should not block a users execution path
####### HELPER FUNCTIONS ################
def register_model(model_cost: dict):
def register_model(model_cost: Union[str, dict]):
"""
Register new / Override existing models (and their pricing) to specific providers.
Provide EITHER a model cost dictionary or a url to a hosted json blob
Example usage:
model_cost_dict = {
"gpt-4": {
@ -1063,9 +1064,15 @@ def register_model(model_cost: dict):
},
}
"""
for key, value in model_cost.items():
loaded_model_cost = {}
if isinstance(model_cost, dict):
loaded_model_cost = model_cost
elif isinstance(model_cost, str):
loaded_model_cost = litellm.get_model_cost_map(url=model_cost)
for key, value in loaded_model_cost.items():
## override / add new keys to the existing model cost dictionary
litellm.model_cost[key] = model_cost[key]
litellm.model_cost[key] = loaded_model_cost[key]
# add new model names to provider lists
if value.get('litellm_provider') == 'openai':