diff --git a/enterprise/callbacks/api_callback.py b/enterprise/callbacks/generic_api_callback.py similarity index 97% rename from enterprise/callbacks/api_callback.py rename to enterprise/callbacks/generic_api_callback.py index 8a8ea2617..309001e1b 100644 --- a/enterprise/callbacks/api_callback.py +++ b/enterprise/callbacks/generic_api_callback.py @@ -57,7 +57,9 @@ class GenericAPILogger: # This is sync, because we run this in a separate thread. Running in a sepearate thread ensures it will never block an LLM API call # Experience with s3, Langfuse shows that async logging events are complicated and can block LLM calls - def log_event(self, kwargs, response_obj, start_time, end_time, print_verbose): + def log_event( + self, kwargs, response_obj, start_time, end_time, user_id, print_verbose + ): try: verbose_logger.debug( f"GenericAPILogger Logging - Enters logging function for model {kwargs}" diff --git a/litellm/tests/test_custom_api_logger.py b/litellm/tests/test_custom_api_logger.py new file mode 100644 index 000000000..063dfa3e3 --- /dev/null +++ b/litellm/tests/test_custom_api_logger.py @@ -0,0 +1,45 @@ +import sys +import os +import io, asyncio + +# import logging +# logging.basicConfig(level=logging.DEBUG) +sys.path.insert(0, os.path.abspath("../..")) +print("Modified sys.path:", sys.path) + + +from litellm import completion +import litellm + +litellm.num_retries = 3 + +import time, random +import pytest + + +@pytest.mark.asyncio +async def test_custom_api_logging(): + try: + litellm.success_callback = ["generic"] + litellm.set_verbose = True + os.environ["GENERIC_LOGGER_ENDPOINT"] = "http://localhost:8000/log-event" + + print("Testing generic api logging") + + await litellm.acompletion( + model="gpt-3.5-turbo", + messages=[{"role": "user", "content": f"This is a test"}], + max_tokens=10, + temperature=0.7, + user="ishaan-2", + ) + + except Exception as e: + pytest.fail(f"An exception occurred - {e}") + finally: + # post, close log file and verify + # Reset stdout to the original value + print("Passed! Testing async s3 logging") + + +# test_s3_logging() diff --git a/litellm/utils.py b/litellm/utils.py index 37d5b7507..d42e5f655 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -12,6 +12,7 @@ import litellm import dotenv, json, traceback, threading, base64, ast import subprocess, os +from os.path import abspath, join, dirname import litellm, openai import itertools import random, uuid, requests @@ -33,11 +34,11 @@ from dataclasses import ( # import pkg_resources from importlib import resources -# filename = pkg_resources.resource_filename(__name__, "llms/tokenizers") -filename = str(resources.files("llms").joinpath("tokenizers")) -os.environ[ - "TIKTOKEN_CACHE_DIR" -] = filename # use local copy of tiktoken b/c of - https://github.com/BerriAI/litellm/issues/1071 +# # filename = pkg_resources.resource_filename(__name__, "llms/tokenizers") +# filename = str(resources.files().joinpath("llms/tokenizers")) +# os.environ[ +# "TIKTOKEN_CACHE_DIR" +# ] = filename # use local copy of tiktoken b/c of - https://github.com/BerriAI/litellm/issues/1071 encoding = tiktoken.get_encoding("cl100k_base") import importlib.metadata from ._logging import verbose_logger @@ -76,8 +77,13 @@ from .exceptions import ( UnprocessableEntityError, ) -# import enterprise features -from ..enterprise.callbacks.api_callback import GenericAPILogger +# Import Enterprise features +project_path = abspath(join(dirname(__file__), "..", "..")) +# Add the "enterprise" directory to sys.path +enterprise_path = abspath(join(project_path, "enterprise")) +sys.path.append(enterprise_path) +from enterprise.callbacks.generic_api_callback import GenericAPILogger + from typing import cast, List, Dict, Union, Optional, Literal, Any from .caching import Cache