diff --git a/litellm/__init__.py b/litellm/__init__.py index d0b5cb6de9..e69063f62d 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -258,6 +258,7 @@ provider_list: List = [ "oobabooga", "ollama", "deepinfra", + "perplexity", "custom", # custom apis ] diff --git a/litellm/main.py b/litellm/main.py index fa3bbc49cf..ab038186f8 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -263,7 +263,7 @@ def completion( if deployment_id != None: # azure llms model=deployment_id custom_llm_provider="azure" - model, custom_llm_provider, dynamic_api_key = get_llm_provider(model=model, custom_llm_provider=custom_llm_provider, api_base=api_base) + model, custom_llm_provider, dynamic_api_key, api_base = get_llm_provider(model=model, custom_llm_provider=custom_llm_provider, api_base=api_base) model_api_key = get_api_key(llm_provider=custom_llm_provider, dynamic_api_key=api_key) # get the api key from the environment if required for the model if model_api_key and "sk-litellm" in model_api_key: api_base = "https://proxy.litellm.ai" @@ -1497,7 +1497,7 @@ def embedding( Raises: - exception_type: If an exception occurs during the API call. """ - model, custom_llm_provider, dynamic_api_key = get_llm_provider(model, custom_llm_provider) + model, custom_llm_provider, dynamic_api_key, api_base = get_llm_provider(model, custom_llm_provider) try: response = None logging = litellm_logging_obj