test: testing fixes

This commit is contained in:
Krrish Dholakia 2023-12-12 10:57:51 -08:00
parent 9cf5ab468f
commit edbf97adf2
4 changed files with 9 additions and 3 deletions

View file

@ -1,5 +1,5 @@
Task exception was never retrieved Task exception was never retrieved
future: <Task finished name='Task-336' coro=<QueryEngine.aclose() done, defined at /opt/homebrew/lib/python3.11/site-packages/prisma/engine/query.py:110> exception=RuntimeError('Event loop is closed')> future: <Task finished name='Task-308' coro=<QueryEngine.aclose() done, defined at /opt/homebrew/lib/python3.11/site-packages/prisma/engine/query.py:110> exception=RuntimeError('Event loop is closed')>
Traceback (most recent call last): Traceback (most recent call last):
File "/opt/homebrew/lib/python3.11/site-packages/prisma/engine/query.py", line 112, in aclose File "/opt/homebrew/lib/python3.11/site-packages/prisma/engine/query.py", line 112, in aclose
await self._close_session() await self._close_session()

View file

@ -67,3 +67,9 @@ model_list:
description: this is a test openai model description: this is a test openai model
id: f6f74e14-ac64-4403-9365-319e584dcdc5 id: f6f74e14-ac64-4403-9365-319e584dcdc5
model_name: test_openai_models model_name: test_openai_models
- litellm_params:
model: gpt-3.5-turbo
model_info:
description: this is a test openai model
id: 9b1ef341-322c-410a-8992-903987fef439
model_name: test_openai_models

View file

@ -17,10 +17,10 @@ model_alias_map = {
"good-model": "anyscale/meta-llama/Llama-2-7b-chat-hf" "good-model": "anyscale/meta-llama/Llama-2-7b-chat-hf"
} }
litellm.model_alias_map = model_alias_map
def test_model_alias_map(): def test_model_alias_map():
try: try:
litellm.model_alias_map = model_alias_map
response = completion( response = completion(
"good-model", "good-model",
messages=[{"role": "user", "content": "Hey, how's it going?"}], messages=[{"role": "user", "content": "Hey, how's it going?"}],

View file

@ -366,7 +366,7 @@ def test_function_calling():
} }
] ]
router = Router(model_list=model_list, routing_strategy="latency-based-routing") router = Router(model_list=model_list)
response = router.completion(model="gpt-3.5-turbo-0613", messages=messages, functions=functions) response = router.completion(model="gpt-3.5-turbo-0613", messages=messages, functions=functions)
router.reset() router.reset()
print(response) print(response)