mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 11:14:04 +00:00
test: test_router.py
fix router testing n
This commit is contained in:
parent
a350b80d28
commit
dfba305508
2 changed files with 7 additions and 6 deletions
|
@ -20,7 +20,7 @@ from litellm.caching import Cache
|
||||||
messages = [{"role": "user", "content": f"who is ishaan {time.time()}"}]
|
messages = [{"role": "user", "content": f"who is ishaan {time.time()}"}]
|
||||||
def test_caching_v2(): # test in memory cache
|
def test_caching_v2(): # test in memory cache
|
||||||
try:
|
try:
|
||||||
litellm.cache = Cache(type="redis", host="os.environ/REDIS_HOST_2", port="os.environ/REDIS_PORT_2", password="os.environ/REDIS_PASSWORD_2", ssl="os.environ/REDIS_SSL")
|
litellm.cache = Cache(type="redis", host="os.environ/REDIS_HOST_2", port="os.environ/REDIS_PORT_2", password="os.environ/REDIS_PASSWORD_2", ssl="os.environ/REDIS_SSL_2")
|
||||||
response1 = completion(model="gpt-3.5-turbo", messages=messages, caching=True)
|
response1 = completion(model="gpt-3.5-turbo", messages=messages, caching=True)
|
||||||
response2 = completion(model="gpt-3.5-turbo", messages=messages, caching=True)
|
response2 = completion(model="gpt-3.5-turbo", messages=messages, caching=True)
|
||||||
print(f"response1: {response1}")
|
print(f"response1: {response1}")
|
||||||
|
@ -55,7 +55,7 @@ def test_caching_router():
|
||||||
"rpm": 1800
|
"rpm": 1800
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
litellm.cache = Cache(type="redis", host="os.environ/REDIS_HOST_2", port="os.environ/REDIS_PORT_2", password="os.environ/REDIS_PASSWORD_2", ssl="os.environ/REDIS_SSL")
|
litellm.cache = Cache(type="redis", host="os.environ/REDIS_HOST_2", port="os.environ/REDIS_PORT_2", password="os.environ/REDIS_PASSWORD_2", ssl="os.environ/REDIS_SSL_2")
|
||||||
router = Router(model_list=model_list,
|
router = Router(model_list=model_list,
|
||||||
routing_strategy="simple-shuffle",
|
routing_strategy="simple-shuffle",
|
||||||
set_verbose=False,
|
set_verbose=False,
|
||||||
|
@ -71,4 +71,4 @@ def test_caching_router():
|
||||||
print(f"error occurred: {traceback.format_exc()}")
|
print(f"error occurred: {traceback.format_exc()}")
|
||||||
pytest.fail(f"Error occurred: {e}")
|
pytest.fail(f"Error occurred: {e}")
|
||||||
|
|
||||||
test_caching_router()
|
# test_caching_router()
|
|
@ -626,13 +626,14 @@ def test_openai_completion_on_router():
|
||||||
messages=[
|
messages=[
|
||||||
{
|
{
|
||||||
"role": "user",
|
"role": "user",
|
||||||
"content": "hello from litellm test",
|
"content": f"hello from litellm test {time.time()}",
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
stream=True
|
stream=True
|
||||||
)
|
)
|
||||||
complete_response = ""
|
complete_response = ""
|
||||||
print(response)
|
print(response)
|
||||||
|
# if you want to see all the attributes and methods
|
||||||
async for chunk in response:
|
async for chunk in response:
|
||||||
print(chunk)
|
print(chunk)
|
||||||
complete_response += chunk.choices[0].delta.content or ""
|
complete_response += chunk.choices[0].delta.content or ""
|
||||||
|
@ -675,7 +676,7 @@ def test_openai_completion_on_router():
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
pytest.fail(f"Error occurred: {e}")
|
pytest.fail(f"Error occurred: {e}")
|
||||||
# test_openai_completion_on_router()
|
test_openai_completion_on_router()
|
||||||
|
|
||||||
|
|
||||||
def test_reading_keys_os_environ():
|
def test_reading_keys_os_environ():
|
||||||
|
@ -742,7 +743,7 @@ def test_reading_keys_os_environ():
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
pytest.fail(f"Error occurred: {e}")
|
pytest.fail(f"Error occurred: {e}")
|
||||||
|
|
||||||
test_reading_keys_os_environ()
|
# test_reading_keys_os_environ()
|
||||||
|
|
||||||
|
|
||||||
def test_reading_openai_keys_os_environ():
|
def test_reading_openai_keys_os_environ():
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue