mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 11:14:04 +00:00
* fix(proxy_server.py): use default azure credentials to support azure non-client secret kms * fix(langsmith.py): raise error if credentials missing * feat(langsmith.py): support error logging for langsmith + standard logging payload Fixes https://github.com/BerriAI/litellm/issues/5738 * Fix hardcoding of schema in view check (#5749) * fix - deal with case when check view exists returns None (#5740) * Revert "fix - deal with case when check view exists returns None (#5740)" (#5741) This reverts commit535228159b
. * test(test_router_debug_logs.py): move to mock response * Fix hardcoding of schema --------- Co-authored-by: Ishaan Jaff <ishaanjaffer0324@gmail.com> Co-authored-by: Krrish Dholakia <krrishdholakia@gmail.com> * fix(proxy_server.py): allow admin to disable ui via `DISABLE_ADMIN_UI` flag * fix(router.py): fix default model name value Fixes55db19a1e4 (r1763712148)
* fix(utils.py): fix unbound variable error * feat(rerank/main.py): add azure ai rerank endpoints Closes https://github.com/BerriAI/litellm/issues/5667 * feat(secret_detection.py): Allow configuring secret detection params Allows admin to control what plugins to run for secret detection. Prevents overzealous secret detection. * docs(secret_detection.md): add secret detection guardrail docs * fix: fix linting errors * fix - deal with case when check view exists returns None (#5740) * Revert "fix - deal with case when check view exists returns None (#5740)" (#5741) This reverts commit535228159b
. * Litellm fix router testing (#5748) * test: fix testing - azure changed content policy error logic * test: fix tests to use mock responses * test(test_image_generation.py): handle api instability * test(test_image_generation.py): handle azure api instability * fix(utils.py): fix unbounded variable error * fix(utils.py): fix unbounded variable error * test: refactor test to use mock response * test: mark flaky azure tests * Bump next from 14.1.1 to 14.2.10 in /ui/litellm-dashboard (#5753) Bumps [next](https://github.com/vercel/next.js) from 14.1.1 to 14.2.10. - [Release notes](https://github.com/vercel/next.js/releases) - [Changelog](https://github.com/vercel/next.js/blob/canary/release.js) - [Commits](https://github.com/vercel/next.js/compare/v14.1.1...v14.2.10) --- updated-dependencies: - dependency-name: next dependency-type: direct:production ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * [Fix] o1-mini causes pydantic warnings on `reasoning_tokens` (#5754) * add requester_metadata in standard logging payload * log requester_metadata in metadata * use StandardLoggingPayload for logging * docs StandardLoggingPayload * fix import * include standard logging object in failure * add test for requester metadata * handle completion_tokens_details * add test for completion_tokens_details * [Feat-Proxy-DataDog] Log Redis, Postgres Failure events on DataDog (#5750) * dd - start tracking redis status on dd * add async_service_succes_hook / failure hook in custom logger * add async_service_failure_hook * log service failures on dd * fix import error * add test for redis errors / warning * [Fix] Router/ Proxy - Tag Based routing, raise correct error when no deployments found and tag filtering is on (#5745) * fix tag routing - raise correct error when no model with tag based routing * fix error string from tag based routing * test router tag based routing * raise 401 error when no tags avialable for deploymen * linting fix * [Feat] Log Request metadata on gcs bucket logging (#5743) * add requester_metadata in standard logging payload * log requester_metadata in metadata * use StandardLoggingPayload for logging * docs StandardLoggingPayload * fix import * include standard logging object in failure * add test for requester metadata * fix(litellm_logging.py): fix logging message * fix(rerank_api/main.py): fix linting errors * fix(custom_guardrails.py): maintain backwards compatibility for older guardrails * fix(rerank_api/main.py): fix cost tracking for rerank endpoints --------- Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: steffen-sbt <148480574+steffen-sbt@users.noreply.github.com> Co-authored-by: Ishaan Jaff <ishaanjaffer0324@gmail.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
74 lines
2.2 KiB
Python
74 lines
2.2 KiB
Python
# What is this?
|
|
## Unit Tests for guardrails config
|
|
import asyncio
|
|
import inspect
|
|
import os
|
|
import sys
|
|
import time
|
|
import traceback
|
|
import uuid
|
|
from datetime import datetime
|
|
|
|
import pytest
|
|
from pydantic import BaseModel
|
|
|
|
import litellm.litellm_core_utils
|
|
import litellm.litellm_core_utils.litellm_logging
|
|
|
|
sys.path.insert(0, os.path.abspath("../.."))
|
|
from typing import Any, List, Literal, Optional, Tuple, Union
|
|
from unittest.mock import AsyncMock, MagicMock, patch
|
|
|
|
import litellm
|
|
from litellm import Cache, completion, embedding
|
|
from litellm.integrations.custom_logger import CustomLogger
|
|
from litellm.types.utils import LiteLLMCommonStrings
|
|
|
|
|
|
class CustomLoggingIntegration(CustomLogger):
|
|
def __init__(self) -> None:
|
|
super().__init__()
|
|
|
|
def logging_hook(
|
|
self, kwargs: dict, result: Any, call_type: str
|
|
) -> Tuple[dict, Any]:
|
|
input: Optional[Any] = kwargs.get("input", None)
|
|
messages: Optional[List] = kwargs.get("messages", None)
|
|
if call_type == "completion":
|
|
# assume input is of type messages
|
|
if input is not None and isinstance(input, list):
|
|
input[0]["content"] = "Hey, my name is [NAME]."
|
|
if messages is not None and isinstance(messages, List):
|
|
messages[0]["content"] = "Hey, my name is [NAME]."
|
|
|
|
kwargs["input"] = input
|
|
kwargs["messages"] = messages
|
|
return kwargs, result
|
|
|
|
|
|
def test_guardrail_masking_logging_only():
|
|
"""
|
|
Assert response is unmasked.
|
|
|
|
Assert logged response is masked.
|
|
"""
|
|
callback = CustomLoggingIntegration()
|
|
|
|
with patch.object(callback, "log_success_event", new=MagicMock()) as mock_call:
|
|
litellm.callbacks = [callback]
|
|
messages = [{"role": "user", "content": "Hey, my name is Peter."}]
|
|
response = completion(
|
|
model="gpt-3.5-turbo", messages=messages, mock_response="Hi Peter!"
|
|
)
|
|
|
|
assert response.choices[0].message.content == "Hi Peter!" # type: ignore
|
|
|
|
time.sleep(3)
|
|
mock_call.assert_called_once()
|
|
|
|
print(mock_call.call_args.kwargs["kwargs"]["messages"][0]["content"])
|
|
|
|
assert (
|
|
mock_call.call_args.kwargs["kwargs"]["messages"][0]["content"]
|
|
== "Hey, my name is [NAME]."
|
|
)
|