forked from phoenix/litellm-mirror
test proxy all model
This commit is contained in:
parent
9863520376
commit
05858cb249
1 changed files with 19 additions and 0 deletions
|
@ -7,6 +7,9 @@ from openai import OpenAI, AsyncOpenAI
|
||||||
from typing import Optional, List, Union
|
from typing import Optional, List, Union
|
||||||
|
|
||||||
|
|
||||||
|
LITELLM_MASTER_KEY = "sk-1234"
|
||||||
|
|
||||||
|
|
||||||
def response_header_check(response):
|
def response_header_check(response):
|
||||||
"""
|
"""
|
||||||
- assert if response headers < 4kb (nginx limit).
|
- assert if response headers < 4kb (nginx limit).
|
||||||
|
@ -467,6 +470,22 @@ async def test_openai_wildcard_chat_completion():
|
||||||
await chat_completion(session=session, key=key, model="gpt-3.5-turbo-0125")
|
await chat_completion(session=session, key=key, model="gpt-3.5-turbo-0125")
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_proxy_all_models():
|
||||||
|
"""
|
||||||
|
- proxy_server_config.yaml has model = * / *
|
||||||
|
- Make chat completion call
|
||||||
|
- groq is NOT defined on /models
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
# call chat/completions with a model that the key was not created for + the model is not on the config.yaml
|
||||||
|
await chat_completion(
|
||||||
|
session=session, key=LITELLM_MASTER_KEY, model="groq/llama3-8b-8192"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_batch_chat_completions():
|
async def test_batch_chat_completions():
|
||||||
"""
|
"""
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue