From 05858cb249bdd594a9dd084b6d71bf79b9449199 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Thu, 25 Jul 2024 18:54:30 -0700 Subject: [PATCH] test proxy all model --- tests/test_openai_endpoints.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/tests/test_openai_endpoints.py b/tests/test_openai_endpoints.py index 59ac10552..a77da8d52 100644 --- a/tests/test_openai_endpoints.py +++ b/tests/test_openai_endpoints.py @@ -7,6 +7,9 @@ from openai import OpenAI, AsyncOpenAI from typing import Optional, List, Union +LITELLM_MASTER_KEY = "sk-1234" + + def response_header_check(response): """ - assert if response headers < 4kb (nginx limit). @@ -467,6 +470,22 @@ async def test_openai_wildcard_chat_completion(): await chat_completion(session=session, key=key, model="gpt-3.5-turbo-0125") +@pytest.mark.asyncio +async def test_proxy_all_models(): + """ + - proxy_server_config.yaml has model = * / * + - Make chat completion call + - groq is NOT defined on /models + + + """ + async with aiohttp.ClientSession() as session: + # call chat/completions with a model that the key was not created for + the model is not on the config.yaml + await chat_completion( + session=session, key=LITELLM_MASTER_KEY, model="groq/llama3-8b-8192" + ) + + @pytest.mark.asyncio async def test_batch_chat_completions(): """