mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-24 18:24:20 +00:00
fixing claude exception-mapping + set key bug
This commit is contained in:
parent
1a4044091a
commit
dd6b4db090
6 changed files with 24 additions and 9 deletions
Binary file not shown.
Binary file not shown.
|
@ -164,7 +164,6 @@ def completion(
|
|||
os.environ["REPLICATE_API_TOKEN"] = api_key
|
||||
elif litellm.replicate_key:
|
||||
os.environ["REPLICATE_API_TOKEN"] = litellm.replicate_key
|
||||
|
||||
prompt = " ".join([message["content"] for message in messages])
|
||||
input = {"prompt": prompt}
|
||||
if max_tokens != float('inf'):
|
||||
|
@ -199,7 +198,7 @@ def completion(
|
|||
if api_key:
|
||||
os.environ["ANTHROPIC_API_KEY"] = api_key
|
||||
elif litellm.anthropic_key:
|
||||
os.environ["ANTHROPIC_API_TOKEN"] = litellm.anthropic_key
|
||||
os.environ["ANTHROPIC_API_KEY"] = litellm.anthropic_key
|
||||
prompt = f"{HUMAN_PROMPT}"
|
||||
for message in messages:
|
||||
if "role" in message:
|
||||
|
|
|
@ -15,14 +15,27 @@ def logger_fn(model_call_object: dict):
|
|||
user_message = "Hello, how are you?"
|
||||
messages = [{ "content": user_message,"role": "user"}]
|
||||
|
||||
print(os.environ)
|
||||
temp_key = os.environ.get("OPENAI_API_KEY")
|
||||
os.environ["OPENAI_API_KEY"] = "bad-key"
|
||||
## Test 1: Setting key dynamically
|
||||
temp_key = os.environ.get("ANTHROPIC_API_KEY")
|
||||
os.environ["ANTHROPIC_API_KEY"] = "bad-key"
|
||||
# test on openai completion call
|
||||
try:
|
||||
response = completion(model="gpt-3.5-turbo", messages=messages, logger_fn=logger_fn, api_key=temp_key)
|
||||
response = completion(model="claude-instant-1", messages=messages, logger_fn=logger_fn, api_key=temp_key)
|
||||
print(f"response: {response}")
|
||||
except:
|
||||
print(f"error occurred: {traceback.format_exc()}")
|
||||
pass
|
||||
os.environ["OPENAI_API_KEY"] = temp_key
|
||||
os.environ["ANTHROPIC_API_KEY"] = temp_key
|
||||
|
||||
|
||||
## Test 2: Setting key via __init__ params
|
||||
litellm.anthropic_key = os.environ.get("ANTHROPIC_API_KEY")
|
||||
os.environ.pop("ANTHROPIC_API_KEY")
|
||||
# test on openai completion call
|
||||
try:
|
||||
response = completion(model="claude-instant-1", messages=messages, logger_fn=logger_fn)
|
||||
print(f"response: {response}")
|
||||
except:
|
||||
print(f"error occurred: {traceback.format_exc()}")
|
||||
pass
|
||||
os.environ["ANTHROPIC_API_KEY"] = temp_key
|
||||
|
|
|
@ -417,7 +417,7 @@ def exception_type(model, original_exception):
|
|||
exception_type = ""
|
||||
logging(model=model, additional_args={"error_str": error_str, "exception_type": exception_type, "original_exception": original_exception}, logger_fn=user_logger_fn)
|
||||
if "claude" in model: #one of the anthropics
|
||||
if "status_code" in original_exception:
|
||||
if hasattr(original_exception, "status_code"):
|
||||
print_verbose(f"status_code: {original_exception.status_code}")
|
||||
if original_exception.status_code == 401:
|
||||
exception_mapping_worked = True
|
||||
|
@ -428,6 +428,9 @@ def exception_type(model, original_exception):
|
|||
elif original_exception.status_code == 429:
|
||||
exception_mapping_worked = True
|
||||
raise RateLimitError(f"AnthropicException - {original_exception.message}")
|
||||
elif "Could not resolve authentication method. Expected either api_key or auth_token to be set." in error_str:
|
||||
exception_mapping_worked = True
|
||||
raise AuthenticationError(f"AnthropicException - {error_str}")
|
||||
elif "replicate" in model:
|
||||
if "Incorrect authentication token" in error_str:
|
||||
exception_mapping_worked = True
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[tool.poetry]
|
||||
name = "litellm"
|
||||
version = "0.1.353"
|
||||
version = "0.1.354"
|
||||
description = "Library to easily interface with LLM API providers"
|
||||
authors = ["BerriAI"]
|
||||
license = "MIT License"
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue