mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
All checks were successful
Read Version from pyproject.toml / read-version (push) Successful in 44s
* feat(base_llm): initial commit for common base config class Addresses code qa critique https://github.com/andrewyng/aisuite/issues/113#issuecomment-2512369132 * feat(base_llm/): add transform request/response abstract methods to base config class * feat(cohere-+-clarifai): refactor integrations to use common base config class * fix: fix linting errors * refactor(anthropic/): move anthropic + vertex anthropic to use base config * test: fix xai test * test: fix tests * fix: fix linting errors * test: comment out WIP test * fix(transformation.py): fix is pdf used check * fix: fix linting error
46 lines
1.4 KiB
Python
46 lines
1.4 KiB
Python
"""
|
|
This file contains common utils for anthropic calls.
|
|
"""
|
|
|
|
from typing import Optional, Union
|
|
|
|
import httpx
|
|
|
|
from litellm.llms.base_llm.transformation import BaseLLMException
|
|
|
|
|
|
class AnthropicError(BaseLLMException):
|
|
def __init__(
|
|
self,
|
|
status_code: int,
|
|
message,
|
|
headers: Optional[httpx.Headers] = None,
|
|
):
|
|
super().__init__(status_code=status_code, message=message, headers=headers)
|
|
|
|
|
|
def process_anthropic_headers(headers: Union[httpx.Headers, dict]) -> dict:
|
|
openai_headers = {}
|
|
if "anthropic-ratelimit-requests-limit" in headers:
|
|
openai_headers["x-ratelimit-limit-requests"] = headers[
|
|
"anthropic-ratelimit-requests-limit"
|
|
]
|
|
if "anthropic-ratelimit-requests-remaining" in headers:
|
|
openai_headers["x-ratelimit-remaining-requests"] = headers[
|
|
"anthropic-ratelimit-requests-remaining"
|
|
]
|
|
if "anthropic-ratelimit-tokens-limit" in headers:
|
|
openai_headers["x-ratelimit-limit-tokens"] = headers[
|
|
"anthropic-ratelimit-tokens-limit"
|
|
]
|
|
if "anthropic-ratelimit-tokens-remaining" in headers:
|
|
openai_headers["x-ratelimit-remaining-tokens"] = headers[
|
|
"anthropic-ratelimit-tokens-remaining"
|
|
]
|
|
|
|
llm_response_headers = {
|
|
"{}-{}".format("llm_provider", k): v for k, v in headers.items()
|
|
}
|
|
|
|
additional_headers = {**llm_response_headers, **openai_headers}
|
|
return additional_headers
|