mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
* test(tests): add unit testing for litellm_proxy integration * fix(cost_calculator.py): fix tracking cost in sdk when calling proxy * fix(main.py): respect litellm.api_base on `vertex_ai/` and `gemini/` routes * fix(main.py): consistently support custom api base across gemini + vertexai on embedding + completion * feat(vertex_ai/): test * fix: fix linting error * test: set api base as None before starting loadtest
34 lines
774 B
Python
34 lines
774 B
Python
import json
|
|
import os
|
|
import sys
|
|
|
|
import pytest
|
|
|
|
sys.path.insert(
|
|
0, os.path.abspath("../..")
|
|
) # Adds the parent directory to the system path
|
|
|
|
from unittest.mock import MagicMock, patch
|
|
|
|
from pydantic import BaseModel
|
|
|
|
from litellm.cost_calculator import response_cost_calculator
|
|
|
|
|
|
def test_cost_calculator_with_response_cost_in_additional_headers():
|
|
class MockResponse(BaseModel):
|
|
_hidden_params = {
|
|
"additional_headers": {"llm_provider-x-litellm-response-cost": 1000}
|
|
}
|
|
|
|
result = response_cost_calculator(
|
|
response_object=MockResponse(),
|
|
model="",
|
|
custom_llm_provider=None,
|
|
call_type="",
|
|
optional_params={},
|
|
cache_hit=None,
|
|
base_model=None,
|
|
)
|
|
|
|
assert result == 1000
|