diff --git a/dist/litellm-0.1.446-py3-none-any.whl b/dist/litellm-0.1.446-py3-none-any.whl new file mode 100644 index 000000000..45196f85f Binary files /dev/null and b/dist/litellm-0.1.446-py3-none-any.whl differ diff --git a/dist/litellm-0.1.446.tar.gz b/dist/litellm-0.1.446.tar.gz new file mode 100644 index 000000000..812343977 Binary files /dev/null and b/dist/litellm-0.1.446.tar.gz differ diff --git a/docs/my-website/docs/debugging/hosted_debugging.md b/docs/my-website/docs/debugging/hosted_debugging.md index 709918751..aeefa7b48 100644 --- a/docs/my-website/docs/debugging/hosted_debugging.md +++ b/docs/my-website/docs/debugging/hosted_debugging.md @@ -5,7 +5,7 @@ LiteLLM offers a free hosted debugger UI for your api calls (https://admin.litel **Needs litellm>=0.1.438*** -You can enable this setting `lite_debugger` as a callback. +You can enable this setting `litellm.debugger=True`. Dashboard diff --git a/litellm/__init__.py b/litellm/__init__.py index 2271d60f4..6093c5375 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -17,7 +17,6 @@ openrouter_key: Optional[str] = None huggingface_key: Optional[str] = None vertex_project: Optional[str] = None vertex_location: Optional[str] = None -hugging_api_token: Optional[str] = None togetherai_api_key: Optional[str] = None caching = False caching_with_models = False # if you want the caching key to be model + prompt diff --git a/litellm/__pycache__/main.cpython-311.pyc b/litellm/__pycache__/main.cpython-311.pyc index c656f1519..05ed66e1b 100644 Binary files a/litellm/__pycache__/main.cpython-311.pyc and b/litellm/__pycache__/main.cpython-311.pyc differ diff --git a/litellm/main.py b/litellm/main.py index f2dc89eb4..3fb8da19f 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -166,6 +166,7 @@ def completion( response = openai.ChatCompletion.create( engine=model, messages=messages, **optional_params ) + ## LOGGING logging.post_call(input=messages, api_key=openai.api_key, original_response=response, additional_args={"headers": litellm.headers, "api_version": openai.api_version, "api_base": openai.api_base}) elif ( diff --git a/litellm/tests/test_streaming.py b/litellm/tests/test_streaming.py index ef2063828..10910a500 100644 --- a/litellm/tests/test_streaming.py +++ b/litellm/tests/test_streaming.py @@ -22,6 +22,32 @@ def logger_fn(model_call_object: dict): user_message = "Hello, how are you?" messages = [{"content": user_message, "role": "user"}] +# test on openai completion call +try: + response = completion( + model="gpt-3.5-turbo", messages=messages, stream=True, logger_fn=logger_fn + ) + for chunk in response: + print(chunk["choices"][0]["delta"]) + score += 1 +except: + print(f"error occurred: {traceback.format_exc()}") + pass + + +# test on azure completion call +try: + response = completion( + model="azure/chatgpt-test", messages=messages, stream=True, logger_fn=logger_fn + ) + for chunk in response: + print(chunk["choices"][0]["delta"]) + score += 1 +except: + print(f"error occurred: {traceback.format_exc()}") + pass + + # test on anthropic completion call try: response = completion( @@ -35,19 +61,19 @@ except: pass -# test on anthropic completion call -try: - response = completion( - model="meta-llama/Llama-2-7b-chat-hf", - messages=messages, - custom_llm_provider="huggingface", - custom_api_base="https://s7c7gytn18vnu4tw.us-east-1.aws.endpoints.huggingface.cloud", - stream=True, - logger_fn=logger_fn, - ) - for chunk in response: - print(chunk["choices"][0]["delta"]) - score += 1 -except: - print(f"error occurred: {traceback.format_exc()}") - pass +# # test on huggingface completion call +# try: +# response = completion( +# model="meta-llama/Llama-2-7b-chat-hf", +# messages=messages, +# custom_llm_provider="huggingface", +# custom_api_base="https://s7c7gytn18vnu4tw.us-east-1.aws.endpoints.huggingface.cloud", +# stream=True, +# logger_fn=logger_fn, +# ) +# for chunk in response: +# print(chunk["choices"][0]["delta"]) +# score += 1 +# except: +# print(f"error occurred: {traceback.format_exc()}") +# pass diff --git a/pyproject.toml b/pyproject.toml index f16c8287d..54daedd69 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "litellm" -version = "0.1.445" +version = "0.1.446" description = "Library to easily interface with LLM API providers" authors = ["BerriAI"] license = "MIT License"