forked from phoenix/litellm-mirror
Litellm Minor Fixes & Improvements (10/03/2024) (#6049)
* fix(proxy_server.py): remove spendlog fixes from proxy startup logic Moves https://github.com/BerriAI/litellm/pull/4794 to `/db_scripts` and cleans up some caching-related debug info (easier to trace debug logs) * fix(langfuse_endpoints.py): Fixes https://github.com/BerriAI/litellm/issues/6041 * fix(azure.py): fix health checks for azure audio transcription models Fixes https://github.com/BerriAI/litellm/issues/5999 * Feat: Add Literal AI Integration (#5653) * feat: add Literal AI integration * update readme * Update README.md * fix: address comments * fix: remove literalai sdk * fix: use HTTPHandler * chore: add test * fix: add asyncio lock * fix(literal_ai.py): fix linting errors * fix(literal_ai.py): fix linting errors * refactor: cleanup --------- Co-authored-by: Willy Douhard <willy.douhard@gmail.com>
This commit is contained in:
parent
f9d0bcc5a1
commit
5c33d1c9af
14 changed files with 557 additions and 44 deletions
72
tests/local_testing/test_literalai.py
Normal file
72
tests/local_testing/test_literalai.py
Normal file
|
@ -0,0 +1,72 @@
|
|||
import os
|
||||
import sys
|
||||
|
||||
sys.path.insert(0, os.path.abspath("../.."))
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
|
||||
import pytest
|
||||
|
||||
import litellm
|
||||
from litellm._logging import verbose_logger
|
||||
from litellm.integrations.literal_ai import LiteralAILogger
|
||||
|
||||
verbose_logger.setLevel(logging.DEBUG)
|
||||
|
||||
litellm.set_verbose = True
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_literalai_queue_logging():
|
||||
try:
|
||||
# Initialize LiteralAILogger
|
||||
test_literalai_logger = LiteralAILogger()
|
||||
|
||||
litellm.callbacks = [test_literalai_logger]
|
||||
test_literalai_logger.batch_size = 6
|
||||
litellm.set_verbose = True
|
||||
|
||||
# Make multiple calls to ensure we don't hit the batch size
|
||||
for _ in range(5):
|
||||
response = await litellm.acompletion(
|
||||
model="gpt-3.5-turbo",
|
||||
messages=[{"role": "user", "content": "Test message"}],
|
||||
max_tokens=10,
|
||||
temperature=0.2,
|
||||
mock_response="This is a mock response",
|
||||
)
|
||||
|
||||
await asyncio.sleep(3)
|
||||
|
||||
# Check that logs are in the queue
|
||||
assert len(test_literalai_logger.log_queue) == 5
|
||||
|
||||
# Now make calls to exceed the batch size
|
||||
for _ in range(3):
|
||||
await litellm.acompletion(
|
||||
model="gpt-3.5-turbo",
|
||||
messages=[{"role": "user", "content": "Test message"}],
|
||||
max_tokens=10,
|
||||
temperature=0.2,
|
||||
mock_response="This is a mock response",
|
||||
)
|
||||
|
||||
# Wait a short time for any asynchronous operations to complete
|
||||
await asyncio.sleep(1)
|
||||
|
||||
print(
|
||||
"Length of literalai log queue: {}".format(
|
||||
len(test_literalai_logger.log_queue)
|
||||
)
|
||||
)
|
||||
# Check that the queue was flushed after exceeding batch size
|
||||
assert len(test_literalai_logger.log_queue) < 5
|
||||
|
||||
# Clean up
|
||||
for cb in litellm.callbacks:
|
||||
if isinstance(cb, LiteralAILogger):
|
||||
await cb.async_httpx_client.client.aclose()
|
||||
|
||||
except Exception as e:
|
||||
pytest.fail(f"Error occurred: {e}")
|
Loading…
Add table
Add a link
Reference in a new issue