diff --git a/litellm/__init__.py b/litellm/__init__.py index 6093c5375..5792f9cbd 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -4,6 +4,8 @@ input_callback: List[str] = [] success_callback: List[str] = [] failure_callback: List[str] = [] set_verbose = False +debugger = False # Learn more - https://docs.litellm.ai/docs/debugging/hosted_debugging +email = None # for debugging dashboard telemetry = True max_tokens = 256 # OpenAI Defaults retry = True @@ -20,7 +22,6 @@ vertex_location: Optional[str] = None togetherai_api_key: Optional[str] = None caching = False caching_with_models = False # if you want the caching key to be model + prompt -debugger = False model_cost = { "gpt-3.5-turbo": { "max_tokens": 4000, @@ -262,7 +263,8 @@ from .utils import ( cost_per_token, completion_cost, get_litellm_params, - Logging + Logging, + acreate ) from .main import * # type: ignore from .integrations import * diff --git a/litellm/__pycache__/__init__.cpython-311.pyc b/litellm/__pycache__/__init__.cpython-311.pyc index 6e75350c8..a833b03a9 100644 Binary files a/litellm/__pycache__/__init__.cpython-311.pyc and b/litellm/__pycache__/__init__.cpython-311.pyc differ diff --git a/litellm/__pycache__/utils.cpython-311.pyc b/litellm/__pycache__/utils.cpython-311.pyc index 5bb67cb55..a8c9457cb 100644 Binary files a/litellm/__pycache__/utils.cpython-311.pyc and b/litellm/__pycache__/utils.cpython-311.pyc differ diff --git a/litellm/integrations/litedebugger.py b/litellm/integrations/litedebugger.py index afd70eefb..1880c4694 100644 --- a/litellm/integrations/litedebugger.py +++ b/litellm/integrations/litedebugger.py @@ -3,14 +3,14 @@ import requests, traceback, json, os class LiteDebugger: user_email = None dashboard_url = None - def __init__(self): + def __init__(self, email=None): self.api_url = "https://api.litellm.ai/debugger" - self.validate_environment() + self.validate_environment(email) pass - def validate_environment(self): + def validate_environment(self, email): try: - self.user_email = os.getenv("LITELLM_EMAIL") + self.user_email = os.getenv("LITELLM_EMAIL") or email self.dashboard_url = 'https://admin.litellm.ai/' + self.user_email print(f"Here's your free Dashboard 👉 {self.dashboard_url}") if self.user_email == None: diff --git a/litellm/tests/test_async_fn.py b/litellm/tests/test_async_fn.py index c20c5cde6..da6004c73 100644 --- a/litellm/tests/test_async_fn.py +++ b/litellm/tests/test_async_fn.py @@ -9,7 +9,7 @@ import asyncio sys.path.insert( 0, os.path.abspath("../..") ) # Adds the parent directory to the system path -from litellm import acompletion +from litellm import acompletion, acreate async def test_get_response(): @@ -24,3 +24,16 @@ async def test_get_response(): response = asyncio.run(test_get_response()) print(response) + +# async def test_get_response(): +# user_message = "Hello, how are you?" +# messages = [{"content": user_message, "role": "user"}] +# try: +# response = await acreate(model="gpt-3.5-turbo", messages=messages) +# except Exception as e: +# pytest.fail(f"error occurred: {e}") +# return response + + +# response = asyncio.run(test_get_response()) +# print(response) diff --git a/litellm/utils.py b/litellm/utils.py index 108f43e57..37e367728 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -718,7 +718,7 @@ def set_callbacks(callback_list): supabaseClient = Supabase() elif callback == "lite_debugger": print(f"instantiating lite_debugger") - liteDebuggerClient = LiteDebugger() + liteDebuggerClient = LiteDebugger(email=litellm.email) except Exception as e: raise e @@ -1008,6 +1008,9 @@ def handle_success(args, kwargs, result, start_time, end_time): pass +def acreate(*args, **kwargs): ## Thin client to handle the acreate langchain call + return litellm.acompletion(*args, **kwargs) + def prompt_token_calculator(model, messages): # use tiktoken or anthropic's tokenizer depending on the model text = " ".join(message["content"] for message in messages) @@ -1022,7 +1025,6 @@ def prompt_token_calculator(model, messages): num_tokens = len(encoding.encode(text)) return num_tokens - def valid_model(model): try: # for a given model name, check if the user has the right permissions to access the model diff --git a/pyproject.toml b/pyproject.toml index 54daedd69..94ec5ccde 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "litellm" -version = "0.1.446" +version = "0.1.447" description = "Library to easily interface with LLM API providers" authors = ["BerriAI"] license = "MIT License"