fix(traceloop): log errors

This commit is contained in:
Nir Gazit 2024-05-22 12:51:26 +03:00
parent b9a7b72f02
commit 43c30a4489
3 changed files with 168 additions and 135 deletions

View file

@ -1,49 +1,35 @@
# Commented out for now - since traceloop break ci/cd
# import sys
# import os
# import io, asyncio
import sys
import os
import time
import pytest
import litellm
from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
from traceloop.sdk import Traceloop
# sys.path.insert(0, os.path.abspath('../..'))
# from litellm import completion
# import litellm
# litellm.num_retries = 3
# litellm.success_callback = [""]
# import time
# import pytest
# from traceloop.sdk import Traceloop
# Traceloop.init(app_name="test-litellm", disable_batch=True)
sys.path.insert(0, os.path.abspath("../.."))
# def test_traceloop_logging():
# try:
# litellm.set_verbose = True
# response = litellm.completion(
# model="gpt-3.5-turbo",
# messages=[{"role": "user", "content":"This is a test"}],
# max_tokens=1000,
# temperature=0.7,
# timeout=5,
# )
# print(f"response: {response}")
# except Exception as e:
# pytest.fail(f"An exception occurred - {e}")
# # test_traceloop_logging()
@pytest.fixture()
def exporter():
exporter = InMemorySpanExporter()
Traceloop.init(
app_name="test_litellm",
disable_batch=True,
exporter=exporter,
)
litellm.success_callback = ["traceloop"]
litellm.set_verbose = True
return exporter
# # def test_traceloop_logging_async():
# # try:
# # litellm.set_verbose = True
# # async def test_acompletion():
# # return await litellm.acompletion(
# # model="gpt-3.5-turbo",
# # messages=[{"role": "user", "content":"This is a test"}],
# # max_tokens=1000,
# # temperature=0.7,
# # timeout=5,
# # )
# # response = asyncio.run(test_acompletion())
# # print(f"response: {response}")
# # except Exception as e:
# # pytest.fail(f"An exception occurred - {e}")
# # test_traceloop_logging_async()
@pytest.mark.parametrize("model", ["claude-instant-1.2", "gpt-3.5-turbo"])
def test_traceloop_logging(exporter, model):
litellm.completion(
model=model,
messages=[{"role": "user", "content": "This is a test"}],
max_tokens=1000,
temperature=0.7,
timeout=5,
)