litellm/tests/old_proxy_tests/tests/load_test_completion.py
Krish Dholakia d57be47b0f
Litellm ruff linting enforcement (#5992)
* ci(config.yml): add a 'check_code_quality' step

Addresses https://github.com/BerriAI/litellm/issues/5991

* ci(config.yml): check why circle ci doesn't pick up this test

* ci(config.yml): fix to run 'check_code_quality' tests

* fix(__init__.py): fix unprotected import

* fix(__init__.py): don't remove unused imports

* build(ruff.toml): update ruff.toml to ignore unused imports

* fix: fix: ruff + pyright - fix linting + type-checking errors

* fix: fix linting errors

* fix(lago.py): fix module init error

* fix: fix linting errors

* ci(config.yml): cd into correct dir for checks

* fix(proxy_server.py): fix linting error

* fix(utils.py): fix bare except

causes ruff linting errors

* fix: ruff - fix remaining linting errors

* fix(clickhouse.py): use standard logging object

* fix(__init__.py): fix unprotected import

* fix: ruff - fix linting errors

* fix: fix linting errors

* ci(config.yml): cleanup code qa step (formatting handled in local_testing)

* fix(_health_endpoints.py): fix ruff linting errors

* ci(config.yml): just use ruff in check_code_quality pipeline for now

* build(custom_guardrail.py): include missing file

* style(embedding_handler.py): fix ruff check
2024-10-01 19:44:20 -04:00

68 lines
2.1 KiB
Python

import time
import asyncio
import os
from openai import AsyncOpenAI, AsyncAzureOpenAI
import uuid
import traceback
from large_text import text
from dotenv import load_dotenv
from statistics import mean, median
litellm_client = AsyncOpenAI(base_url="http://0.0.0.0:4000/", api_key="sk-1234")
async def litellm_completion():
try:
start_time = time.time()
response = await litellm_client.chat.completions.create(
model="fake-openai-endpoint",
messages=[
{
"role": "user",
"content": f"This is a test{uuid.uuid4()}",
}
],
user="my-new-end-user-1",
)
end_time = time.time()
latency = end_time - start_time
print("response time=", latency)
return response, latency
except Exception as e:
with open("error_log.txt", "a") as error_log:
error_log.write(f"Error during completion: {str(e)}\n")
return None, 0
async def main():
latencies = []
for i in range(5):
start = time.time()
n = 100 # Number of concurrent tasks
tasks = [litellm_completion() for _ in range(n)]
chat_completions = await asyncio.gather(*tasks)
successful_completions = [c for c, l in chat_completions if c is not None]
completion_latencies = [l for c, l in chat_completions if c is not None]
latencies.extend(completion_latencies)
with open("error_log.txt", "a") as error_log:
for completion, latency in chat_completions:
if isinstance(completion, str):
error_log.write(completion + "\n")
print(n, time.time() - start, len(successful_completions))
if latencies:
average_latency = mean(latencies)
median_latency = median(latencies)
print(f"Average Latency per Response: {average_latency} seconds")
print(f"Median Latency per Response: {median_latency} seconds")
if __name__ == "__main__":
open("error_log.txt", "w").close()
asyncio.run(main())