(Feat) Add langsmith key based logging (#6682)

* add langsmith_api_key to StandardCallbackDynamicParams

* create a file for langsmith types

* langsmith add key / team based logging

* add key based logging for langsmith

* fix langsmith key based logging

* fix linting langsmith

* remove NOQA violation

* add unit test coverage for all helpers in test langsmith

* test_langsmith_key_based_logging

* docs langsmith key based logging

* run langsmith tests in logging callback tests

* fix logging testing

* test_langsmith_key_based_logging

* test_add_callback_via_key_litellm_pre_call_utils_langsmith

* add debug statement langsmith key based logging

* test_langsmith_key_based_logging
This commit is contained in:
Ishaan Jaff 2024-11-11 13:58:06 -08:00 committed by GitHub
parent 1e2ba3e045
commit c3bc9e6b12
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
9 changed files with 810 additions and 179 deletions

View file

@ -22,61 +22,6 @@ litellm.set_verbose = True
import time
@pytest.mark.asyncio
async def test_langsmith_queue_logging():
try:
# Initialize LangsmithLogger
test_langsmith_logger = LangsmithLogger()
litellm.callbacks = [test_langsmith_logger]
test_langsmith_logger.batch_size = 6
litellm.set_verbose = True
# Make multiple calls to ensure we don't hit the batch size
for _ in range(5):
response = await litellm.acompletion(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Test message"}],
max_tokens=10,
temperature=0.2,
mock_response="This is a mock response",
)
await asyncio.sleep(3)
# Check that logs are in the queue
assert len(test_langsmith_logger.log_queue) == 5
# Now make calls to exceed the batch size
for _ in range(3):
response = await litellm.acompletion(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Test message"}],
max_tokens=10,
temperature=0.2,
mock_response="This is a mock response",
)
# Wait a short time for any asynchronous operations to complete
await asyncio.sleep(1)
print(
"Length of langsmith log queue: {}".format(
len(test_langsmith_logger.log_queue)
)
)
# Check that the queue was flushed after exceeding batch size
assert len(test_langsmith_logger.log_queue) < 5
# Clean up
for cb in litellm.callbacks:
if isinstance(cb, LangsmithLogger):
await cb.async_httpx_client.client.aclose()
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_langsmith_logging()