test(test_proxy.py): deprecating cli tool

This commit is contained in:
Krrish Dholakia 2023-10-21 15:53:35 -07:00
parent 883bef109f
commit 9c0b475a14
2 changed files with 30 additions and 68 deletions

View file

@ -1,38 +0,0 @@
#### What this tests ####
# This tests the OpenAI-proxy server
import sys, os
import traceback
sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the system path
from dotenv import load_dotenv
load_dotenv()
import unittest
from unittest.mock import patch
from click.testing import CliRunner
import pytest
import litellm
from litellm.proxy.llm import litellm_completion
from litellm.proxy.proxy_server import initialize
def test_azure_call():
try:
data = {
"model": "azure/chatgpt-v-2",
"messages": [{"role": "user", "content": "Hey!"}]
}
result = litellm_completion(data=data, user_api_base=os.getenv("AZURE_API_BASE"), type="chat_completion", user_temperature=None, user_max_tokens=None, user_model=None, user_headers=None, user_debug=False, model_router=None)
return result
except Exception as e:
pytest.fail(f"An error occurred: {e}")
# test_azure_call()
## test debug
def test_debug():
try:
initialize(model=None, alias=None, api_base=None, debug=True, temperature=None, max_tokens=None, max_budget=None, telemetry=None, drop_params=None, add_function_to_prompt=None, headers=None, save=None, api_version=None)
assert litellm.set_verbose == True
except Exception as e:
pytest.fail(f"An error occurred: {e}")
# test_debug()
## test logs

View file

@ -661,36 +661,36 @@ def test_completion_replicate_stream_bad_key():
# test_completion_replicate_stream_bad_key() # test_completion_replicate_stream_bad_key()
def test_completion_bedrock_claude_stream(): # def test_completion_bedrock_claude_stream():
try: # try:
litellm.set_verbose=False # litellm.set_verbose=False
response = completion( # response = completion(
model="bedrock/anthropic.claude-instant-v1", # model="bedrock/anthropic.claude-instant-v1",
messages=[{"role": "user", "content": "Be as verbose as possible and give as many details as possible, how does a court case get to the Supreme Court?"}], # messages=[{"role": "user", "content": "Be as verbose as possible and give as many details as possible, how does a court case get to the Supreme Court?"}],
temperature=1, # temperature=1,
max_tokens=20, # max_tokens=20,
stream=True, # stream=True,
) # )
print(response) # print(response)
complete_response = "" # complete_response = ""
has_finish_reason = False # has_finish_reason = False
# Add any assertions here to check the response # # Add any assertions here to check the response
for idx, chunk in enumerate(response): # for idx, chunk in enumerate(response):
# print # # print
chunk, finished = streaming_format_tests(idx, chunk) # chunk, finished = streaming_format_tests(idx, chunk)
has_finish_reason = finished # has_finish_reason = finished
complete_response += chunk # complete_response += chunk
if finished: # if finished:
break # break
if has_finish_reason is False: # if has_finish_reason is False:
raise Exception("finish reason not set for last chunk") # raise Exception("finish reason not set for last chunk")
if complete_response.strip() == "": # if complete_response.strip() == "":
raise Exception("Empty response received") # raise Exception("Empty response received")
print(f"completion_response: {complete_response}") # print(f"completion_response: {complete_response}")
except RateLimitError: # except RateLimitError:
pass # pass
except Exception as e: # except Exception as e:
pytest.fail(f"Error occurred: {e}") # pytest.fail(f"Error occurred: {e}")
# test_completion_bedrock_claude_stream() # test_completion_bedrock_claude_stream()