diff --git a/litellm/tests/test_proxy.py b/litellm/tests/test_proxy.py deleted file mode 100644 index 3a2d95e77..000000000 --- a/litellm/tests/test_proxy.py +++ /dev/null @@ -1,38 +0,0 @@ -#### What this tests #### -# This tests the OpenAI-proxy server - -import sys, os -import traceback -sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the system path -from dotenv import load_dotenv - -load_dotenv() -import unittest -from unittest.mock import patch -from click.testing import CliRunner -import pytest -import litellm -from litellm.proxy.llm import litellm_completion -from litellm.proxy.proxy_server import initialize -def test_azure_call(): - try: - data = { - "model": "azure/chatgpt-v-2", - "messages": [{"role": "user", "content": "Hey!"}] - } - result = litellm_completion(data=data, user_api_base=os.getenv("AZURE_API_BASE"), type="chat_completion", user_temperature=None, user_max_tokens=None, user_model=None, user_headers=None, user_debug=False, model_router=None) - return result - except Exception as e: - pytest.fail(f"An error occurred: {e}") - -# test_azure_call() -## test debug -def test_debug(): - try: - initialize(model=None, alias=None, api_base=None, debug=True, temperature=None, max_tokens=None, max_budget=None, telemetry=None, drop_params=None, add_function_to_prompt=None, headers=None, save=None, api_version=None) - assert litellm.set_verbose == True - except Exception as e: - pytest.fail(f"An error occurred: {e}") - -# test_debug() -## test logs \ No newline at end of file diff --git a/litellm/tests/test_streaming.py b/litellm/tests/test_streaming.py index 9bf202929..ca6748dc8 100644 --- a/litellm/tests/test_streaming.py +++ b/litellm/tests/test_streaming.py @@ -661,36 +661,36 @@ def test_completion_replicate_stream_bad_key(): # test_completion_replicate_stream_bad_key() -def test_completion_bedrock_claude_stream(): - try: - litellm.set_verbose=False - response = completion( - model="bedrock/anthropic.claude-instant-v1", - messages=[{"role": "user", "content": "Be as verbose as possible and give as many details as possible, how does a court case get to the Supreme Court?"}], - temperature=1, - max_tokens=20, - stream=True, - ) - print(response) - complete_response = "" - has_finish_reason = False - # Add any assertions here to check the response - for idx, chunk in enumerate(response): - # print - chunk, finished = streaming_format_tests(idx, chunk) - has_finish_reason = finished - complete_response += chunk - if finished: - break - if has_finish_reason is False: - raise Exception("finish reason not set for last chunk") - if complete_response.strip() == "": - raise Exception("Empty response received") - print(f"completion_response: {complete_response}") - except RateLimitError: - pass - except Exception as e: - pytest.fail(f"Error occurred: {e}") +# def test_completion_bedrock_claude_stream(): +# try: +# litellm.set_verbose=False +# response = completion( +# model="bedrock/anthropic.claude-instant-v1", +# messages=[{"role": "user", "content": "Be as verbose as possible and give as many details as possible, how does a court case get to the Supreme Court?"}], +# temperature=1, +# max_tokens=20, +# stream=True, +# ) +# print(response) +# complete_response = "" +# has_finish_reason = False +# # Add any assertions here to check the response +# for idx, chunk in enumerate(response): +# # print +# chunk, finished = streaming_format_tests(idx, chunk) +# has_finish_reason = finished +# complete_response += chunk +# if finished: +# break +# if has_finish_reason is False: +# raise Exception("finish reason not set for last chunk") +# if complete_response.strip() == "": +# raise Exception("Empty response received") +# print(f"completion_response: {complete_response}") +# except RateLimitError: +# pass +# except Exception as e: +# pytest.fail(f"Error occurred: {e}") # test_completion_bedrock_claude_stream()