mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 11:43:54 +00:00
Revert "Log errors in Traceloop Integration"
This commit is contained in:
parent
c73099b5bb
commit
b16c58d521
3 changed files with 135 additions and 168 deletions
|
@ -1,35 +1,49 @@
|
|||
import sys
|
||||
import os
|
||||
import time
|
||||
import pytest
|
||||
import litellm
|
||||
from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
|
||||
from traceloop.sdk import Traceloop
|
||||
# Commented out for now - since traceloop break ci/cd
|
||||
# import sys
|
||||
# import os
|
||||
# import io, asyncio
|
||||
|
||||
sys.path.insert(0, os.path.abspath("../.."))
|
||||
# sys.path.insert(0, os.path.abspath('../..'))
|
||||
|
||||
# from litellm import completion
|
||||
# import litellm
|
||||
# litellm.num_retries = 3
|
||||
# litellm.success_callback = [""]
|
||||
# import time
|
||||
# import pytest
|
||||
# from traceloop.sdk import Traceloop
|
||||
# Traceloop.init(app_name="test-litellm", disable_batch=True)
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def exporter():
|
||||
exporter = InMemorySpanExporter()
|
||||
Traceloop.init(
|
||||
app_name="test_litellm",
|
||||
disable_batch=True,
|
||||
exporter=exporter,
|
||||
)
|
||||
litellm.success_callback = ["traceloop"]
|
||||
litellm.set_verbose = True
|
||||
|
||||
return exporter
|
||||
# def test_traceloop_logging():
|
||||
# try:
|
||||
# litellm.set_verbose = True
|
||||
# response = litellm.completion(
|
||||
# model="gpt-3.5-turbo",
|
||||
# messages=[{"role": "user", "content":"This is a test"}],
|
||||
# max_tokens=1000,
|
||||
# temperature=0.7,
|
||||
# timeout=5,
|
||||
# )
|
||||
# print(f"response: {response}")
|
||||
# except Exception as e:
|
||||
# pytest.fail(f"An exception occurred - {e}")
|
||||
# # test_traceloop_logging()
|
||||
|
||||
|
||||
@pytest.mark.parametrize("model", ["claude-instant-1.2", "gpt-3.5-turbo"])
|
||||
def test_traceloop_logging(exporter, model):
|
||||
|
||||
litellm.completion(
|
||||
model=model,
|
||||
messages=[{"role": "user", "content": "This is a test"}],
|
||||
max_tokens=1000,
|
||||
temperature=0.7,
|
||||
timeout=5,
|
||||
)
|
||||
# # def test_traceloop_logging_async():
|
||||
# # try:
|
||||
# # litellm.set_verbose = True
|
||||
# # async def test_acompletion():
|
||||
# # return await litellm.acompletion(
|
||||
# # model="gpt-3.5-turbo",
|
||||
# # messages=[{"role": "user", "content":"This is a test"}],
|
||||
# # max_tokens=1000,
|
||||
# # temperature=0.7,
|
||||
# # timeout=5,
|
||||
# # )
|
||||
# # response = asyncio.run(test_acompletion())
|
||||
# # print(f"response: {response}")
|
||||
# # except Exception as e:
|
||||
# # pytest.fail(f"An exception occurred - {e}")
|
||||
# # test_traceloop_logging_async()
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue