mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 19:54:13 +00:00
simple test
This commit is contained in:
parent
541336a001
commit
6652227c25
2 changed files with 26 additions and 27 deletions
26
litellm/tests/test_opentelemetry.py
Normal file
26
litellm/tests/test_opentelemetry.py
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
import asyncio
|
||||||
|
import litellm
|
||||||
|
|
||||||
|
from litellm.integrations.opentelemetry import OpenTelemetry, OpenTelemetryConfig
|
||||||
|
from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
|
||||||
|
|
||||||
|
|
||||||
|
def test_otel_callback():
|
||||||
|
exporter = InMemorySpanExporter()
|
||||||
|
|
||||||
|
litellm.callbacks = [OpenTelemetry(OpenTelemetryConfig(exporter=exporter))]
|
||||||
|
|
||||||
|
litellm.completion(
|
||||||
|
model="gpt-3.5-turbo",
|
||||||
|
messages=[{"role": "user", "content": "hi"}],
|
||||||
|
)
|
||||||
|
|
||||||
|
asyncio.run(
|
||||||
|
litellm.acompletion(
|
||||||
|
model="gpt-3.5-turbo",
|
||||||
|
messages=[{"role": "user", "content": "hi"}],
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
spans = exporter.get_finished_spans()
|
||||||
|
assert len(spans) == 1 + 1
|
|
@ -602,30 +602,3 @@ def test_load_router_config(mock_cache, fake_env_vars):
|
||||||
|
|
||||||
|
|
||||||
# test_load_router_config()
|
# test_load_router_config()
|
||||||
|
|
||||||
from litellm.integrations.opentelemetry import OpenTelemetry, OpenTelemetryConfig
|
|
||||||
from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
|
|
||||||
|
|
||||||
@mock_patch_acompletion()
|
|
||||||
def test_otel_with_proxy_server(mock_acompletion, client_no_auth):
|
|
||||||
exporter = InMemorySpanExporter()
|
|
||||||
litellm.callbacks = [OpenTelemetry(OpenTelemetryConfig(exporter=exporter))]
|
|
||||||
|
|
||||||
data = {"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "hi"}]}
|
|
||||||
|
|
||||||
response = client_no_auth.post("/v1/chat/completions", json=data)
|
|
||||||
mock_acompletion.assert_called_once_with(
|
|
||||||
model="gpt-3.5-turbo",
|
|
||||||
messages=[{"role": "user", "content": "hi"}],
|
|
||||||
litellm_call_id=mock.ANY,
|
|
||||||
litellm_logging_obj=mock.ANY,
|
|
||||||
request_timeout=mock.ANY,
|
|
||||||
specific_deployment=True,
|
|
||||||
metadata=mock.ANY,
|
|
||||||
proxy_server_request=mock.ANY,
|
|
||||||
)
|
|
||||||
assert response.status_code == 200
|
|
||||||
assert response.json() == example_completion_result
|
|
||||||
|
|
||||||
spans = exporter.get_finished_spans()
|
|
||||||
assert len(spans) == 0
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue