From d409ffbaa969fa20a4e255cc7419a7389a5ec888 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Mon, 17 Jun 2024 23:04:48 -0700 Subject: [PATCH] fix test_chat_completion_different_deployments --- proxy_server_config.yaml | 4 ++-- tests/test_openai_endpoints.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/proxy_server_config.yaml b/proxy_server_config.yaml index f1853dc83..e87452869 100644 --- a/proxy_server_config.yaml +++ b/proxy_server_config.yaml @@ -73,14 +73,14 @@ model_list: api_key: my-fake-key api_base: https://exampleopenaiendpoint-production.up.railway.app/ stream_timeout: 0.001 - rpm: 100 + rpm: 1000 - model_name: fake-openai-endpoint-3 litellm_params: model: openai/my-fake-model-2 api_key: my-fake-key api_base: https://exampleopenaiendpoint-production.up.railway.app/ stream_timeout: 0.001 - rpm: 100 + rpm: 1000 - model_name: "*" litellm_params: model: openai/* diff --git a/tests/test_openai_endpoints.py b/tests/test_openai_endpoints.py index e2f600b76..07bb74441 100644 --- a/tests/test_openai_endpoints.py +++ b/tests/test_openai_endpoints.py @@ -309,7 +309,7 @@ async def test_chat_completion_different_deployments(): # key_gen = await generate_key(session=session) key = "sk-1234" results = [] - for _ in range(5): + for _ in range(20): results.append( await chat_completion_with_headers( session=session, key=key, model="fake-openai-endpoint-3"