diff --git a/litellm/__pycache__/main.cpython-311.pyc b/litellm/__pycache__/main.cpython-311.pyc index c75deb7212..e84e64bc54 100644 Binary files a/litellm/__pycache__/main.cpython-311.pyc and b/litellm/__pycache__/main.cpython-311.pyc differ diff --git a/litellm/__pycache__/utils.cpython-311.pyc b/litellm/__pycache__/utils.cpython-311.pyc index 3c19ed7225..8d25a43261 100644 Binary files a/litellm/__pycache__/utils.cpython-311.pyc and b/litellm/__pycache__/utils.cpython-311.pyc differ diff --git a/litellm/main.py b/litellm/main.py index ad197a9191..7803de2a96 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -164,7 +164,6 @@ def completion( os.environ["REPLICATE_API_TOKEN"] = api_key elif litellm.replicate_key: os.environ["REPLICATE_API_TOKEN"] = litellm.replicate_key - prompt = " ".join([message["content"] for message in messages]) input = {"prompt": prompt} if max_tokens != float('inf'): @@ -199,7 +198,7 @@ def completion( if api_key: os.environ["ANTHROPIC_API_KEY"] = api_key elif litellm.anthropic_key: - os.environ["ANTHROPIC_API_TOKEN"] = litellm.anthropic_key + os.environ["ANTHROPIC_API_KEY"] = litellm.anthropic_key prompt = f"{HUMAN_PROMPT}" for message in messages: if "role" in message: diff --git a/litellm/tests/test_api_key_param.py b/litellm/tests/test_api_key_param.py index 7ffffcda88..6213730f51 100644 --- a/litellm/tests/test_api_key_param.py +++ b/litellm/tests/test_api_key_param.py @@ -15,14 +15,27 @@ def logger_fn(model_call_object: dict): user_message = "Hello, how are you?" messages = [{ "content": user_message,"role": "user"}] -print(os.environ) -temp_key = os.environ.get("OPENAI_API_KEY") -os.environ["OPENAI_API_KEY"] = "bad-key" +## Test 1: Setting key dynamically +temp_key = os.environ.get("ANTHROPIC_API_KEY") +os.environ["ANTHROPIC_API_KEY"] = "bad-key" # test on openai completion call try: - response = completion(model="gpt-3.5-turbo", messages=messages, logger_fn=logger_fn, api_key=temp_key) + response = completion(model="claude-instant-1", messages=messages, logger_fn=logger_fn, api_key=temp_key) print(f"response: {response}") except: print(f"error occurred: {traceback.format_exc()}") pass -os.environ["OPENAI_API_KEY"] = temp_key +os.environ["ANTHROPIC_API_KEY"] = temp_key + + +## Test 2: Setting key via __init__ params +litellm.anthropic_key = os.environ.get("ANTHROPIC_API_KEY") +os.environ.pop("ANTHROPIC_API_KEY") +# test on openai completion call +try: + response = completion(model="claude-instant-1", messages=messages, logger_fn=logger_fn) + print(f"response: {response}") +except: + print(f"error occurred: {traceback.format_exc()}") + pass +os.environ["ANTHROPIC_API_KEY"] = temp_key diff --git a/litellm/utils.py b/litellm/utils.py index 4f671a2314..411c21f245 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -417,7 +417,7 @@ def exception_type(model, original_exception): exception_type = "" logging(model=model, additional_args={"error_str": error_str, "exception_type": exception_type, "original_exception": original_exception}, logger_fn=user_logger_fn) if "claude" in model: #one of the anthropics - if "status_code" in original_exception: + if hasattr(original_exception, "status_code"): print_verbose(f"status_code: {original_exception.status_code}") if original_exception.status_code == 401: exception_mapping_worked = True @@ -428,6 +428,9 @@ def exception_type(model, original_exception): elif original_exception.status_code == 429: exception_mapping_worked = True raise RateLimitError(f"AnthropicException - {original_exception.message}") + elif "Could not resolve authentication method. Expected either api_key or auth_token to be set." in error_str: + exception_mapping_worked = True + raise AuthenticationError(f"AnthropicException - {error_str}") elif "replicate" in model: if "Incorrect authentication token" in error_str: exception_mapping_worked = True diff --git a/pyproject.toml b/pyproject.toml index 98ace7d78b..15d3b592e3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "litellm" -version = "0.1.353" +version = "0.1.354" description = "Library to easily interface with LLM API providers" authors = ["BerriAI"] license = "MIT License"