litellm-mirror/tests/local_testing/test_traceloop.py
Ishaan Jaff 00c596a852
(Feat) - Allow viewing Request/Response Logs stored in GCS Bucket (#8449)
* BaseRequestResponseFetchFromCustomLogger

* get_active_base_request_response_fetch_from_custom_logger

* get_request_response_payload

* ui_view_request_response_for_request_id

* fix uiSpendLogDetailsCall

* fix get_request_response_payload

* ui fix RequestViewer

* use 1 class AdditionalLoggingUtils

* ui_view_request_response_for_request_id

* cache the prefetch logs details

* refactor prefetch

* test view request/resp logs

* fix code quality

* fix get_request_response_payload

* uninstall posthog
prevent it from being added in ci/cd

* fix posthog

* fix traceloop test

* fix linting error
2025-02-10 20:38:55 -08:00

40 lines
1,005 B
Python

import os
import sys
import time
import pytest
from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
import litellm
sys.path.insert(0, os.path.abspath("../.."))
@pytest.fixture()
@pytest.mark.skip(reason="Traceloop use `otel` integration instead")
def exporter():
from traceloop.sdk import Traceloop
exporter = InMemorySpanExporter()
Traceloop.init(
app_name="test_litellm",
disable_batch=True,
exporter=exporter,
)
litellm.success_callback = ["traceloop"]
litellm.set_verbose = True
return exporter
@pytest.mark.parametrize("model", ["claude-3-5-haiku-20241022", "gpt-3.5-turbo"])
@pytest.mark.skip(reason="Traceloop use `otel` integration instead")
def test_traceloop_logging(exporter, model):
litellm.completion(
model=model,
messages=[{"role": "user", "content": "This is a test"}],
max_tokens=1000,
temperature=0.7,
timeout=5,
mock_response="hi",
)