litellm-mirror/tests/logging_callback_tests/test_standard_logging_payload.py
Krish Dholakia 1e403a8447
Litellm dev 10 29 2024 (#6502)
* fix(core_helpers.py): return None, instead of raising kwargs is None error

Closes https://github.com/BerriAI/litellm/issues/6500

* docs(cost_tracking.md): cleanup doc

* fix(vertex_and_google_ai_studio.py): handle function call with no params passed in

Closes https://github.com/BerriAI/litellm/issues/6495

* test(test_router_timeout.py): add test for router timeout + retry logic

* test: update test to use module level values

* (fix) Prometheus - Log Postgres DB latency, status on prometheus  (#6484)

* fix logging DB fails on prometheus

* unit testing log to otel wrapper

* unit testing for service logger + prometheus

* use LATENCY buckets for service logging

* fix service logging

* docs clarify vertex vs gemini

* (router_strategy/) ensure all async functions use async cache methods (#6489)

* fix router strat

* use async set / get cache in router_strategy

* add coverage for router strategy

* fix imports

* fix batch_get_cache

* use async methods for least busy

* fix least busy use async methods

* fix test_dual_cache_increment

* test async_get_available_deployment when routing_strategy="least-busy"

* (fix) proxy - fix when `STORE_MODEL_IN_DB` should be set (#6492)

* set store_model_in_db at the top

* correctly use store_model_in_db global

* (fix) `PrometheusServicesLogger` `_get_metric` should return metric in Registry  (#6486)

* fix logging DB fails on prometheus

* unit testing log to otel wrapper

* unit testing for service logger + prometheus

* use LATENCY buckets for service logging

* fix service logging

* fix _get_metric in prom services logger

* add clear doc string

* unit testing for prom service logger

* bump: version 1.51.0 → 1.51.1

* Add `azure/gpt-4o-mini-2024-07-18` to model_prices_and_context_window.json (#6477)

* Update utils.py (#6468)

Fixed missing keys

* (perf) Litellm redis router fix - ~100ms improvement (#6483)

* docs(exception_mapping.md): add missing exception types

Fixes https://github.com/Aider-AI/aider/issues/2120#issuecomment-2438971183

* fix(main.py): register custom model pricing with specific key

Ensure custom model pricing is registered to the specific model+provider key combination

* test: make testing more robust for custom pricing

* fix(redis_cache.py): instrument otel logging for sync redis calls

ensures complete coverage for all redis cache calls

* refactor: pass parent_otel_span for redis caching calls in router

allows for more observability into what calls are causing latency issues

* test: update tests with new params

* refactor: ensure e2e otel tracing for router

* refactor(router.py): add more otel tracing acrosss router

catch all latency issues for router requests

* fix: fix linting error

* fix(router.py): fix linting error

* fix: fix test

* test: fix tests

* fix(dual_cache.py): pass ttl to redis cache

* fix: fix param

* perf(cooldown_cache.py): improve cooldown cache, to store cache results in memory for 5s, prevents redis call from being made on each request

reduces 100ms latency per call with caching enabled on router

* fix: fix test

* fix(cooldown_cache.py): handle if a result is None

* fix(cooldown_cache.py): add debug statements

* refactor(dual_cache.py): move to using an in-memory check for batch get cache, to prevent redis from being hit for every call

* fix(cooldown_cache.py): fix linting erropr

* refactor(prometheus.py): move to using standard logging payload for reading the remaining request / tokens

Ensures prometheus token tracking works for anthropic as well

* fix: fix linting error

* fix(redis_cache.py): make sure ttl is always int (handle float values)

Fixes issue where redis_client.ex was not working correctly due to float ttl

* fix: fix linting error

* test: update test

* fix: fix linting error

---------

Co-authored-by: Ishaan Jaff <ishaanjaffer0324@gmail.com>
Co-authored-by: Xingyao Wang <xingyao@all-hands.dev>
Co-authored-by: vibhanshu-ob <115142120+vibhanshu-ob@users.noreply.github.com>
2024-10-29 22:04:16 -07:00

106 lines
3.6 KiB
Python

"""
Unit tests for StandardLoggingPayloadSetup
"""
import json
import os
import sys
from datetime import datetime
from unittest.mock import AsyncMock
from pydantic.main import Model
sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system-path
import pytest
import litellm
from litellm.types.utils import Usage
from litellm.litellm_core_utils.litellm_logging import StandardLoggingPayloadSetup
@pytest.mark.parametrize(
"response_obj,expected_values",
[
# Test None input
(None, (0, 0, 0)),
# Test empty dict
({}, (0, 0, 0)),
# Test valid usage dict
(
{
"usage": {
"prompt_tokens": 10,
"completion_tokens": 20,
"total_tokens": 30,
}
},
(10, 20, 30),
),
# Test with litellm.Usage object
(
{"usage": Usage(prompt_tokens=15, completion_tokens=25, total_tokens=40)},
(15, 25, 40),
),
# Test invalid usage type
({"usage": "invalid"}, (0, 0, 0)),
# Test None usage
({"usage": None}, (0, 0, 0)),
],
)
def test_get_usage(response_obj, expected_values):
"""
Make sure values returned from get_usage are always integers
"""
usage = StandardLoggingPayloadSetup.get_usage_from_response_obj(response_obj)
# Check types
assert isinstance(usage.prompt_tokens, int)
assert isinstance(usage.completion_tokens, int)
assert isinstance(usage.total_tokens, int)
# Check values
assert usage.prompt_tokens == expected_values[0]
assert usage.completion_tokens == expected_values[1]
assert usage.total_tokens == expected_values[2]
def test_get_additional_headers():
additional_headers = {
"x-ratelimit-limit-requests": "2000",
"x-ratelimit-remaining-requests": "1999",
"x-ratelimit-limit-tokens": "160000",
"x-ratelimit-remaining-tokens": "160000",
"llm_provider-date": "Tue, 29 Oct 2024 23:57:37 GMT",
"llm_provider-content-type": "application/json",
"llm_provider-transfer-encoding": "chunked",
"llm_provider-connection": "keep-alive",
"llm_provider-anthropic-ratelimit-requests-limit": "2000",
"llm_provider-anthropic-ratelimit-requests-remaining": "1999",
"llm_provider-anthropic-ratelimit-requests-reset": "2024-10-29T23:57:40Z",
"llm_provider-anthropic-ratelimit-tokens-limit": "160000",
"llm_provider-anthropic-ratelimit-tokens-remaining": "160000",
"llm_provider-anthropic-ratelimit-tokens-reset": "2024-10-29T23:57:36Z",
"llm_provider-request-id": "req_01F6CycZZPSHKRCCctcS1Vto",
"llm_provider-via": "1.1 google",
"llm_provider-cf-cache-status": "DYNAMIC",
"llm_provider-x-robots-tag": "none",
"llm_provider-server": "cloudflare",
"llm_provider-cf-ray": "8da71bdbc9b57abb-SJC",
"llm_provider-content-encoding": "gzip",
"llm_provider-x-ratelimit-limit-requests": "2000",
"llm_provider-x-ratelimit-remaining-requests": "1999",
"llm_provider-x-ratelimit-limit-tokens": "160000",
"llm_provider-x-ratelimit-remaining-tokens": "160000",
}
additional_logging_headers = StandardLoggingPayloadSetup.get_additional_headers(
additional_headers
)
assert additional_logging_headers == {
"x_ratelimit_limit_requests": 2000,
"x_ratelimit_remaining_requests": 1999,
"x_ratelimit_limit_tokens": 160000,
"x_ratelimit_remaining_tokens": 160000,
}