mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
* BaseRequestResponseFetchFromCustomLogger * get_active_base_request_response_fetch_from_custom_logger * get_request_response_payload * ui_view_request_response_for_request_id * fix uiSpendLogDetailsCall * fix get_request_response_payload * ui fix RequestViewer * use 1 class AdditionalLoggingUtils * ui_view_request_response_for_request_id * cache the prefetch logs details * refactor prefetch * test view request/resp logs * fix code quality * fix get_request_response_payload * uninstall posthog prevent it from being added in ci/cd * fix posthog * fix traceloop test * fix linting error
40 lines
1,005 B
Python
40 lines
1,005 B
Python
import os
|
|
import sys
|
|
import time
|
|
|
|
import pytest
|
|
from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
|
|
|
|
import litellm
|
|
|
|
sys.path.insert(0, os.path.abspath("../.."))
|
|
|
|
|
|
@pytest.fixture()
|
|
@pytest.mark.skip(reason="Traceloop use `otel` integration instead")
|
|
def exporter():
|
|
from traceloop.sdk import Traceloop
|
|
|
|
exporter = InMemorySpanExporter()
|
|
Traceloop.init(
|
|
app_name="test_litellm",
|
|
disable_batch=True,
|
|
exporter=exporter,
|
|
)
|
|
litellm.success_callback = ["traceloop"]
|
|
litellm.set_verbose = True
|
|
|
|
return exporter
|
|
|
|
|
|
@pytest.mark.parametrize("model", ["claude-3-5-haiku-20241022", "gpt-3.5-turbo"])
|
|
@pytest.mark.skip(reason="Traceloop use `otel` integration instead")
|
|
def test_traceloop_logging(exporter, model):
|
|
litellm.completion(
|
|
model=model,
|
|
messages=[{"role": "user", "content": "This is a test"}],
|
|
max_tokens=1000,
|
|
temperature=0.7,
|
|
timeout=5,
|
|
mock_response="hi",
|
|
)
|