test(test_stream_chunk_builder.py): fix setting api key

This commit is contained in:
Krrish Dholakia 2023-11-24 11:47:48 -08:00
parent 2e8d582a34
commit c75e90663c
2 changed files with 277 additions and 278 deletions

View file

@ -1,294 +1,294 @@
#### What this tests #### # #### What this tests ####
# This tests calling batch_completions by running 100 messages together # # This tests calling batch_completions by running 100 messages together
import sys, os, time # import sys, os, time
import traceback, asyncio # import traceback, asyncio
import pytest # import pytest
sys.path.insert( # sys.path.insert(
0, os.path.abspath("../..") # 0, os.path.abspath("../..")
) # Adds the parent directory to the system path # ) # Adds the parent directory to the system path
import litellm # import litellm
from litellm import Router # from litellm import Router
from concurrent.futures import ThreadPoolExecutor # from concurrent.futures import ThreadPoolExecutor
from dotenv import load_dotenv # from dotenv import load_dotenv
load_dotenv() # load_dotenv()
def test_exception_raising(): # def test_exception_raising():
# this tests if the router raises an exception when invalid params are set # # this tests if the router raises an exception when invalid params are set
# in this test both deployments have bad keys - Keep this test. It validates if the router raises the most recent exception # # in this test both deployments have bad keys - Keep this test. It validates if the router raises the most recent exception
litellm.set_verbose=True # litellm.set_verbose=True
import openai # import openai
try: # try:
print("testing if router raises an exception") # print("testing if router raises an exception")
old_api_key = os.environ["AZURE_API_KEY"] # old_api_key = os.environ["AZURE_API_KEY"]
os.environ["AZURE_API_KEY"] = "" # os.environ["AZURE_API_KEY"] = ""
model_list = [ # model_list = [
{ # {
"model_name": "gpt-3.5-turbo", # openai model name # "model_name": "gpt-3.5-turbo", # openai model name
"litellm_params": { # params for litellm completion/embedding call # "litellm_params": { # params for litellm completion/embedding call
"model": "azure/chatgpt-v-2", # "model": "azure/chatgpt-v-2",
"api_key": "bad-key", # "api_key": "bad-key",
"api_version": os.getenv("AZURE_API_VERSION"), # "api_version": os.getenv("AZURE_API_VERSION"),
"api_base": os.getenv("AZURE_API_BASE") # "api_base": os.getenv("AZURE_API_BASE")
}, # },
"tpm": 240000, # "tpm": 240000,
"rpm": 1800 # "rpm": 1800
}, # },
{ # {
"model_name": "gpt-3.5-turbo", # openai model name # "model_name": "gpt-3.5-turbo", # openai model name
"litellm_params": { # # "litellm_params": { #
"model": "gpt-3.5-turbo", # "model": "gpt-3.5-turbo",
"api_key": "bad-key", # "api_key": "bad-key",
}, # },
"tpm": 240000, # "tpm": 240000,
"rpm": 1800 # "rpm": 1800
} # }
] # ]
router = Router(model_list=model_list, # router = Router(model_list=model_list,
redis_host=os.getenv("REDIS_HOST"), # redis_host=os.getenv("REDIS_HOST"),
redis_password=os.getenv("REDIS_PASSWORD"), # redis_password=os.getenv("REDIS_PASSWORD"),
redis_port=int(os.getenv("REDIS_PORT")), # redis_port=int(os.getenv("REDIS_PORT")),
routing_strategy="simple-shuffle", # routing_strategy="simple-shuffle",
set_verbose=False, # set_verbose=False,
num_retries=1) # type: ignore # num_retries=1) # type: ignore
response = router.completion( # response = router.completion(
model="gpt-3.5-turbo", # model="gpt-3.5-turbo",
messages=[ # messages=[
{ # {
"role": "user", # "role": "user",
"content": "hello this request will fail" # "content": "hello this request will fail"
} # }
] # ]
) # )
os.environ["AZURE_API_KEY"] = old_api_key # os.environ["AZURE_API_KEY"] = old_api_key
pytest.fail(f"Should have raised an Auth Error") # pytest.fail(f"Should have raised an Auth Error")
except openai.AuthenticationError: # except openai.AuthenticationError:
print("Test Passed: Caught an OPENAI AUTH Error, Good job. This is what we needed!") # print("Test Passed: Caught an OPENAI AUTH Error, Good job. This is what we needed!")
os.environ["AZURE_API_KEY"] = old_api_key # os.environ["AZURE_API_KEY"] = old_api_key
router.flush_cache() # router.flush_cache()
except Exception as e: # except Exception as e:
os.environ["AZURE_API_KEY"] = old_api_key # os.environ["AZURE_API_KEY"] = old_api_key
print("Got unexpected exception on router!", e) # print("Got unexpected exception on router!", e)
# test_exception_raising() # # test_exception_raising()
def test_reading_key_from_model_list(): # def test_reading_key_from_model_list():
# this tests if the router raises an exception when invalid params are set # # this tests if the router raises an exception when invalid params are set
# DO NOT REMOVE THIS TEST. It's an IMP ONE. Speak to Ishaan, if you are tring to remove this # # DO NOT REMOVE THIS TEST. It's an IMP ONE. Speak to Ishaan, if you are tring to remove this
litellm.set_verbose=False # litellm.set_verbose=False
import openai # import openai
try: # try:
print("testing if router raises an exception") # print("testing if router raises an exception")
old_api_key = os.environ["AZURE_API_KEY"] # old_api_key = os.environ["AZURE_API_KEY"]
os.environ.pop("AZURE_API_KEY", None) # os.environ.pop("AZURE_API_KEY", None)
model_list = [ # model_list = [
{ # {
"model_name": "gpt-3.5-turbo", # openai model name # "model_name": "gpt-3.5-turbo", # openai model name
"litellm_params": { # params for litellm completion/embedding call # "litellm_params": { # params for litellm completion/embedding call
"model": "azure/chatgpt-v-2", # "model": "azure/chatgpt-v-2",
"api_key": old_api_key, # "api_key": old_api_key,
"api_version": os.getenv("AZURE_API_VERSION"), # "api_version": os.getenv("AZURE_API_VERSION"),
"api_base": os.getenv("AZURE_API_BASE") # "api_base": os.getenv("AZURE_API_BASE")
}, # },
"tpm": 240000, # "tpm": 240000,
"rpm": 1800 # "rpm": 1800
} # }
] # ]
router = Router(model_list=model_list, # router = Router(model_list=model_list,
redis_host=os.getenv("REDIS_HOST"), # redis_host=os.getenv("REDIS_HOST"),
redis_password=os.getenv("REDIS_PASSWORD"), # redis_password=os.getenv("REDIS_PASSWORD"),
redis_port=int(os.getenv("REDIS_PORT")), # redis_port=int(os.getenv("REDIS_PORT")),
routing_strategy="simple-shuffle", # routing_strategy="simple-shuffle",
set_verbose=True, # set_verbose=True,
num_retries=1) # type: ignore # num_retries=1) # type: ignore
response = router.completion( # response = router.completion(
model="gpt-3.5-turbo", # model="gpt-3.5-turbo",
messages=[ # messages=[
{ # {
"role": "user", # "role": "user",
"content": "hello this request will fail" # "content": "hello this request will fail"
} # }
] # ]
) # )
os.environ["AZURE_API_KEY"] = old_api_key # os.environ["AZURE_API_KEY"] = old_api_key
router.flush_cache() # router.flush_cache()
except Exception as e: # except Exception as e:
os.environ["AZURE_API_KEY"] = old_api_key # os.environ["AZURE_API_KEY"] = old_api_key
print(f"FAILED TEST") # print(f"FAILED TEST")
pytest.fail("Got unexpected exception on router!", e) # pytest.fail("Got unexpected exception on router!", e)
# test_reading_key_from_model_list() # # test_reading_key_from_model_list()
### FUNCTION CALLING # ### FUNCTION CALLING
def test_function_calling(): # def test_function_calling():
model_list = [ # model_list = [
{ # {
"model_name": "gpt-3.5-turbo-0613", # "model_name": "gpt-3.5-turbo-0613",
"litellm_params": { # "litellm_params": {
"model": "gpt-3.5-turbo-0613", # "model": "gpt-3.5-turbo-0613",
"api_key": os.getenv("OPENAI_API_KEY"), # "api_key": os.getenv("OPENAI_API_KEY"),
}, # },
"tpm": 100000, # "tpm": 100000,
"rpm": 10000, # "rpm": 10000,
}, # },
] # ]
messages = [ # messages = [
{"role": "user", "content": "What is the weather like in Boston?"} # {"role": "user", "content": "What is the weather like in Boston?"}
] # ]
functions = [ # functions = [
{ # {
"name": "get_current_weather", # "name": "get_current_weather",
"description": "Get the current weather in a given location", # "description": "Get the current weather in a given location",
"parameters": { # "parameters": {
"type": "object", # "type": "object",
"properties": { # "properties": {
"location": { # "location": {
"type": "string", # "type": "string",
"description": "The city and state, e.g. San Francisco, CA" # "description": "The city and state, e.g. San Francisco, CA"
}, # },
"unit": { # "unit": {
"type": "string", # "type": "string",
"enum": ["celsius", "fahrenheit"] # "enum": ["celsius", "fahrenheit"]
} # }
}, # },
"required": ["location"] # "required": ["location"]
} # }
} # }
] # ]
router = Router(model_list=model_list, routing_strategy="latency-based-routing") # router = Router(model_list=model_list, routing_strategy="latency-based-routing")
response = router.completion(model="gpt-3.5-turbo-0613", messages=messages, functions=functions) # response = router.completion(model="gpt-3.5-turbo-0613", messages=messages, functions=functions)
print(response) # print(response)
def test_acompletion_on_router(): # def test_acompletion_on_router():
try: # try:
litellm.set_verbose = False # litellm.set_verbose = False
model_list = [ # model_list = [
{ # {
"model_name": "gpt-3.5-turbo", # "model_name": "gpt-3.5-turbo",
"litellm_params": { # "litellm_params": {
"model": "gpt-3.5-turbo-0613", # "model": "gpt-3.5-turbo-0613",
"api_key": os.getenv("OPENAI_API_KEY"), # "api_key": os.getenv("OPENAI_API_KEY"),
}, # },
"tpm": 100000, # "tpm": 100000,
"rpm": 10000, # "rpm": 10000,
}, # },
{ # {
"model_name": "gpt-3.5-turbo", # "model_name": "gpt-3.5-turbo",
"litellm_params": { # "litellm_params": {
"model": "azure/chatgpt-v-2", # "model": "azure/chatgpt-v-2",
"api_key": os.getenv("AZURE_API_KEY"), # "api_key": os.getenv("AZURE_API_KEY"),
"api_base": os.getenv("AZURE_API_BASE"), # "api_base": os.getenv("AZURE_API_BASE"),
"api_version": os.getenv("AZURE_API_VERSION") # "api_version": os.getenv("AZURE_API_VERSION")
}, # },
"tpm": 100000, # "tpm": 100000,
"rpm": 10000, # "rpm": 10000,
} # }
] # ]
messages = [ # messages = [
{"role": "user", "content": f"write a one sentence poem {time.time()}?"} # {"role": "user", "content": f"write a one sentence poem {time.time()}?"}
] # ]
start_time = time.time() # start_time = time.time()
router = Router(model_list=model_list, # router = Router(model_list=model_list,
redis_host=os.environ["REDIS_HOST"], # redis_host=os.environ["REDIS_HOST"],
redis_password=os.environ["REDIS_PASSWORD"], # redis_password=os.environ["REDIS_PASSWORD"],
redis_port=os.environ["REDIS_PORT"], # redis_port=os.environ["REDIS_PORT"],
cache_responses=True, # cache_responses=True,
timeout=30, # timeout=30,
routing_strategy="simple-shuffle") # routing_strategy="simple-shuffle")
async def get_response(): # async def get_response():
response1 = await router.acompletion(model="gpt-3.5-turbo", messages=messages) # response1 = await router.acompletion(model="gpt-3.5-turbo", messages=messages)
print(f"response1: {response1}") # print(f"response1: {response1}")
response2 = await router.acompletion(model="gpt-3.5-turbo", messages=messages) # response2 = await router.acompletion(model="gpt-3.5-turbo", messages=messages)
print(f"response2: {response2}") # print(f"response2: {response2}")
assert response1.id == response2.id # assert response1.id == response2.id
assert len(response1.choices[0].message.content) > 0 # assert len(response1.choices[0].message.content) > 0
assert response1.choices[0].message.content == response2.choices[0].message.content # assert response1.choices[0].message.content == response2.choices[0].message.content
asyncio.run(get_response()) # asyncio.run(get_response())
except litellm.Timeout as e: # except litellm.Timeout as e:
end_time = time.time() # end_time = time.time()
print(f"timeout error occurred: {end_time - start_time}") # print(f"timeout error occurred: {end_time - start_time}")
pass # pass
except Exception as e: # except Exception as e:
traceback.print_exc() # traceback.print_exc()
pytest.fail(f"Error occurred: {e}") # pytest.fail(f"Error occurred: {e}")
test_acompletion_on_router() # test_acompletion_on_router()
def test_function_calling_on_router(): # def test_function_calling_on_router():
try: # try:
litellm.set_verbose = True # litellm.set_verbose = True
model_list = [ # model_list = [
{ # {
"model_name": "gpt-3.5-turbo", # "model_name": "gpt-3.5-turbo",
"litellm_params": { # "litellm_params": {
"model": "gpt-3.5-turbo-0613", # "model": "gpt-3.5-turbo-0613",
"api_key": os.getenv("OPENAI_API_KEY"), # "api_key": os.getenv("OPENAI_API_KEY"),
}, # },
}, # },
] # ]
function1 = [ # function1 = [
{ # {
"name": "get_current_weather", # "name": "get_current_weather",
"description": "Get the current weather in a given location", # "description": "Get the current weather in a given location",
"parameters": { # "parameters": {
"type": "object", # "type": "object",
"properties": { # "properties": {
"location": { # "location": {
"type": "string", # "type": "string",
"description": "The city and state, e.g. San Francisco, CA", # "description": "The city and state, e.g. San Francisco, CA",
}, # },
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, # "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
}, # },
"required": ["location"], # "required": ["location"],
}, # },
} # }
] # ]
router = Router( # router = Router(
model_list=model_list, # model_list=model_list,
redis_host=os.getenv("REDIS_HOST"), # redis_host=os.getenv("REDIS_HOST"),
redis_password=os.getenv("REDIS_PASSWORD"), # redis_password=os.getenv("REDIS_PASSWORD"),
redis_port=os.getenv("REDIS_PORT") # redis_port=os.getenv("REDIS_PORT")
) # )
messages=[ # messages=[
{ # {
"role": "user", # "role": "user",
"content": "what's the weather in boston" # "content": "what's the weather in boston"
} # }
] # ]
response = router.completion(model="gpt-3.5-turbo", messages=messages, functions=function1) # response = router.completion(model="gpt-3.5-turbo", messages=messages, functions=function1)
print(f"final returned response: {response}") # print(f"final returned response: {response}")
assert isinstance(response["choices"][0]["message"]["function_call"], dict) # assert isinstance(response["choices"][0]["message"]["function_call"], dict)
except Exception as e: # except Exception as e:
print(f"An exception occurred: {e}") # print(f"An exception occurred: {e}")
# test_function_calling_on_router() # # test_function_calling_on_router()
def test_aembedding_on_router(): # def test_aembedding_on_router():
try: # try:
model_list = [ # model_list = [
{ # {
"model_name": "text-embedding-ada-002", # "model_name": "text-embedding-ada-002",
"litellm_params": { # "litellm_params": {
"model": "text-embedding-ada-002", # "model": "text-embedding-ada-002",
}, # },
"tpm": 100000, # "tpm": 100000,
"rpm": 10000, # "rpm": 10000,
}, # },
] # ]
async def embedding_call(): # async def embedding_call():
router = Router(model_list=model_list) # router = Router(model_list=model_list)
response = await router.aembedding( # response = await router.aembedding(
model="text-embedding-ada-002", # model="text-embedding-ada-002",
input=["good morning from litellm", "this is another item"], # input=["good morning from litellm", "this is another item"],
) # )
print(response) # print(response)
asyncio.run(embedding_call()) # asyncio.run(embedding_call())
except Exception as e: # except Exception as e:
traceback.print_exc() # traceback.print_exc()
pytest.fail(f"Error occurred: {e}") # pytest.fail(f"Error occurred: {e}")

View file

@ -33,7 +33,6 @@ function_schema = {
def test_stream_chunk_builder(): def test_stream_chunk_builder():
try: try:
litellm.set_verbose = False litellm.set_verbose = False
litellm.api_key = os.environ["OPENAI_API_KEY"]
response = completion( response = completion(
model="gpt-3.5-turbo", model="gpt-3.5-turbo",
messages=messages, messages=messages,