import io import os import sys sys.path.insert(0, os.path.abspath("../..")) import asyncio import logging import uuid import pytest import litellm from litellm import completion from litellm._logging import verbose_logger from litellm.integrations.langsmith import LangsmithLogger from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler verbose_logger.setLevel(logging.DEBUG) litellm.set_verbose = True import time @pytest.mark.asyncio async def test_langsmith_queue_logging(): try: # Initialize LangsmithLogger test_langsmith_logger = LangsmithLogger() litellm.callbacks = [test_langsmith_logger] test_langsmith_logger.batch_size = 6 litellm.set_verbose = True # Make multiple calls to ensure we don't hit the batch size for _ in range(5): response = await litellm.acompletion( model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Test message"}], max_tokens=10, temperature=0.2, mock_response="This is a mock response", ) await asyncio.sleep(3) # Check that logs are in the queue assert len(test_langsmith_logger.log_queue) == 5 # Now make calls to exceed the batch size for _ in range(3): response = await litellm.acompletion( model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Test message"}], max_tokens=10, temperature=0.2, mock_response="This is a mock response", ) # Wait a short time for any asynchronous operations to complete await asyncio.sleep(1) print( "Length of langsmith log queue: {}".format( len(test_langsmith_logger.log_queue) ) ) # Check that the queue was flushed after exceeding batch size assert len(test_langsmith_logger.log_queue) < 5 # Clean up for cb in litellm.callbacks: if isinstance(cb, LangsmithLogger): await cb.async_httpx_client.client.aclose() except Exception as e: pytest.fail(f"Error occurred: {e}") # test_langsmith_logging() @pytest.mark.skip(reason="Flaky test. covered by unit tests on custom logger.") def test_async_langsmith_logging_with_metadata(): try: litellm.success_callback = ["langsmith"] litellm.set_verbose = True response = completion( model="gpt-3.5-turbo", messages=[{"role": "user", "content": "what llm are u"}], max_tokens=10, temperature=0.2, ) print(response) time.sleep(3) for cb in litellm.callbacks: if isinstance(cb, LangsmithLogger): cb.async_httpx_client.close() except Exception as e: pytest.fail(f"Error occurred: {e}") print(e) @pytest.mark.skip(reason="Flaky test. covered by unit tests on custom logger.") @pytest.mark.parametrize("sync_mode", [False, True]) @pytest.mark.asyncio async def test_async_langsmith_logging_with_streaming_and_metadata(sync_mode): try: test_langsmith_logger = LangsmithLogger() litellm.success_callback = ["langsmith"] litellm.set_verbose = True run_id = str(uuid.uuid4()) messages = [{"role": "user", "content": "what llm are u"}] if sync_mode is True: response = completion( model="gpt-3.5-turbo", messages=messages, max_tokens=10, temperature=0.2, stream=True, metadata={"id": run_id}, ) for cb in litellm.callbacks: if isinstance(cb, LangsmithLogger): cb.async_httpx_client = AsyncHTTPHandler() for chunk in response: continue time.sleep(3) else: response = await litellm.acompletion( model="gpt-3.5-turbo", messages=messages, max_tokens=10, temperature=0.2, mock_response="This is a mock request", stream=True, metadata={"id": run_id}, ) for cb in litellm.callbacks: if isinstance(cb, LangsmithLogger): cb.async_httpx_client = AsyncHTTPHandler() async for chunk in response: continue await asyncio.sleep(3) print("run_id", run_id) logged_run_on_langsmith = test_langsmith_logger.get_run_by_id(run_id=run_id) print("logged_run_on_langsmith", logged_run_on_langsmith) print("fields in logged_run_on_langsmith", logged_run_on_langsmith.keys()) input_fields_on_langsmith = logged_run_on_langsmith.get("inputs") extra_fields_on_langsmith = logged_run_on_langsmith.get("extra").get( "invocation_params" ) assert logged_run_on_langsmith.get("run_type") == "llm" print("\nLogged INPUT ON LANGSMITH", input_fields_on_langsmith) print("\nextra fields on langsmith", extra_fields_on_langsmith) assert isinstance(input_fields_on_langsmith, dict) except Exception as e: pytest.fail(f"Error occurred: {e}") print(e)