diff --git a/litellm/main.py b/litellm/main.py index 746c26a38d..9412f46f59 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -356,7 +356,10 @@ def completion( if model_list is not None: deployments = [m["litellm_params"] for m in model_list if m["model_name"] == model] return batch_completion_models(deployments=deployments, **args) - + if litellm.model_alias_map and model in litellm.model_alias_map: + model = litellm.model_alias_map[ + model + ] # update the model to the actual value if an alias has been passed in model_response = ModelResponse() if kwargs.get('azure', False) == True: # don't remove flag check, to remain backwards compatible for repos like Codium diff --git a/litellm/utils.py b/litellm/utils.py index 1a2009672a..0268bf99a6 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -2246,11 +2246,6 @@ def get_llm_provider(model: str, custom_llm_provider: Optional[str] = None, api_ dynamic_api_key = None # check if llm provider provided - if litellm.model_alias_map and model in litellm.model_alias_map: - model = litellm.model_alias_map[ - model - ] # update the model to the actual value if an alias has been passed in - if custom_llm_provider: return model, custom_llm_provider, dynamic_api_key, api_base