mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 11:43:54 +00:00
anthropic prompt caching cost tracking (#5453)
* fix(utils.py): support 'drop_params' for embedding requests Fixes https://github.com/BerriAI/litellm/issues/5444 * feat(anthropic/cost_calculation.py): Support calculating cost for prompt caching on anthropic * feat(types/utils.py): allows us to migrate to openai's equivalent, once that comes out * fix: fix linting errors * test: mark flaky test
This commit is contained in:
parent
6aaa7a75cd
commit
aa9f1896c6
17 changed files with 432 additions and 84 deletions
|
@ -1,11 +1,14 @@
|
|||
## This is a template base class to be used for adding new LLM providers via API calls
|
||||
from typing import Any, Optional, Union
|
||||
|
||||
import httpx
|
||||
import requests
|
||||
|
||||
import litellm
|
||||
import httpx, requests
|
||||
from typing import Optional, Union
|
||||
from litellm.litellm_core_utils.litellm_logging import Logging
|
||||
|
||||
|
||||
class BaseLLM:
|
||||
|
||||
_client_session: Optional[httpx.Client] = None
|
||||
|
||||
def process_response(
|
||||
|
@ -14,7 +17,7 @@ class BaseLLM:
|
|||
response: Union[requests.Response, httpx.Response],
|
||||
model_response: litellm.utils.ModelResponse,
|
||||
stream: bool,
|
||||
logging_obj: Logging,
|
||||
logging_obj: Any,
|
||||
optional_params: dict,
|
||||
api_key: str,
|
||||
data: Union[dict, str],
|
||||
|
@ -33,7 +36,7 @@ class BaseLLM:
|
|||
response: Union[requests.Response, httpx.Response],
|
||||
model_response: litellm.utils.TextCompletionResponse,
|
||||
stream: bool,
|
||||
logging_obj: Logging,
|
||||
logging_obj: Any,
|
||||
optional_params: dict,
|
||||
api_key: str,
|
||||
data: Union[dict, str],
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue