forked from phoenix/litellm-mirror
* ci(config.yml): add a 'check_code_quality' step Addresses https://github.com/BerriAI/litellm/issues/5991 * ci(config.yml): check why circle ci doesn't pick up this test * ci(config.yml): fix to run 'check_code_quality' tests * fix(__init__.py): fix unprotected import * fix(__init__.py): don't remove unused imports * build(ruff.toml): update ruff.toml to ignore unused imports * fix: fix: ruff + pyright - fix linting + type-checking errors * fix: fix linting errors * fix(lago.py): fix module init error * fix: fix linting errors * ci(config.yml): cd into correct dir for checks * fix(proxy_server.py): fix linting error * fix(utils.py): fix bare except causes ruff linting errors * fix: ruff - fix remaining linting errors * fix(clickhouse.py): use standard logging object * fix(__init__.py): fix unprotected import * fix: ruff - fix linting errors * fix: fix linting errors * ci(config.yml): cleanup code qa step (formatting handled in local_testing) * fix(_health_endpoints.py): fix ruff linting errors * ci(config.yml): just use ruff in check_code_quality pipeline for now * build(custom_guardrail.py): include missing file * style(embedding_handler.py): fix ruff check
94 lines
2.8 KiB
Python
94 lines
2.8 KiB
Python
#### What this tests ####
|
|
# This tests mock request calls to litellm
|
|
|
|
import os
|
|
import sys
|
|
import traceback
|
|
|
|
import pytest
|
|
|
|
sys.path.insert(
|
|
0, os.path.abspath("../..")
|
|
) # Adds the parent directory to the system path
|
|
import litellm
|
|
|
|
|
|
def test_mock_request():
|
|
try:
|
|
model = "gpt-3.5-turbo"
|
|
messages = [{"role": "user", "content": "Hey, I'm a mock request"}]
|
|
response = litellm.mock_completion(model=model, messages=messages, stream=False)
|
|
print(response)
|
|
print(type(response))
|
|
except Exception:
|
|
traceback.print_exc()
|
|
|
|
|
|
# test_mock_request()
|
|
def test_streaming_mock_request():
|
|
try:
|
|
model = "gpt-3.5-turbo"
|
|
messages = [{"role": "user", "content": "Hey, I'm a mock request"}]
|
|
response = litellm.mock_completion(model=model, messages=messages, stream=True)
|
|
complete_response = ""
|
|
for chunk in response:
|
|
complete_response += chunk["choices"][0]["delta"]["content"] or ""
|
|
if complete_response == "":
|
|
raise Exception("Empty response received")
|
|
except Exception:
|
|
traceback.print_exc()
|
|
|
|
|
|
# test_streaming_mock_request()
|
|
|
|
|
|
@pytest.mark.asyncio()
|
|
async def test_async_mock_streaming_request():
|
|
generator = await litellm.acompletion(
|
|
messages=[{"role": "user", "content": "Why is LiteLLM amazing?"}],
|
|
mock_response="LiteLLM is awesome",
|
|
stream=True,
|
|
model="gpt-3.5-turbo",
|
|
)
|
|
complete_response = ""
|
|
async for chunk in generator:
|
|
print(chunk)
|
|
complete_response += chunk["choices"][0]["delta"]["content"] or ""
|
|
|
|
assert (
|
|
complete_response == "LiteLLM is awesome"
|
|
), f"Unexpected response got {complete_response}"
|
|
|
|
|
|
def test_mock_request_n_greater_than_1():
|
|
try:
|
|
model = "gpt-3.5-turbo"
|
|
messages = [{"role": "user", "content": "Hey, I'm a mock request"}]
|
|
response = litellm.mock_completion(model=model, messages=messages, n=5)
|
|
print("response: ", response)
|
|
|
|
assert len(response.choices) == 5
|
|
for choice in response.choices:
|
|
assert choice.message.content == "This is a mock request"
|
|
|
|
except Exception:
|
|
traceback.print_exc()
|
|
|
|
|
|
@pytest.mark.asyncio()
|
|
async def test_async_mock_streaming_request_n_greater_than_1():
|
|
generator = await litellm.acompletion(
|
|
messages=[{"role": "user", "content": "Why is LiteLLM amazing?"}],
|
|
mock_response="LiteLLM is awesome",
|
|
stream=True,
|
|
model="gpt-3.5-turbo",
|
|
n=5,
|
|
)
|
|
complete_response = ""
|
|
async for chunk in generator:
|
|
print(chunk)
|
|
# complete_response += chunk["choices"][0]["delta"]["content"] or ""
|
|
|
|
# assert (
|
|
# complete_response == "LiteLLM is awesome"
|
|
# ), f"Unexpected response got {complete_response}"
|