mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 11:43:54 +00:00
(fix) add some better load testing
This commit is contained in:
parent
9bd53cec6a
commit
c6ffd456ff
6 changed files with 270 additions and 14 deletions
|
@ -1,56 +1,68 @@
|
|||
import time, asyncio, os
|
||||
import time
|
||||
import asyncio
|
||||
import os
|
||||
from openai import AsyncOpenAI, AsyncAzureOpenAI
|
||||
import uuid
|
||||
import traceback
|
||||
from large_text import text
|
||||
from dotenv import load_dotenv
|
||||
from statistics import mean, median
|
||||
|
||||
litellm_client = AsyncOpenAI(base_url="http://0.0.0.0:4000", api_key="sk-1234")
|
||||
litellm_client = AsyncOpenAI(base_url="http://0.0.0.0:4000/", api_key="sk-1234")
|
||||
|
||||
|
||||
async def litellm_completion():
|
||||
# Your existing code for litellm_completion goes here
|
||||
try:
|
||||
start_time = time.time()
|
||||
response = await litellm_client.chat.completions.create(
|
||||
model="fake_openai",
|
||||
model="fake-openai-endpoint",
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": f"{text}. Who was alexander the great? {uuid.uuid4()}",
|
||||
"content": f"This is a test{uuid.uuid4()}",
|
||||
}
|
||||
],
|
||||
user="my-new-end-user-1",
|
||||
)
|
||||
return response
|
||||
end_time = time.time()
|
||||
latency = end_time - start_time
|
||||
print("response time=", latency)
|
||||
return response, latency
|
||||
|
||||
except Exception as e:
|
||||
# If there's an exception, log the error message
|
||||
with open("error_log.txt", "a") as error_log:
|
||||
error_log.write(f"Error during completion: {str(e)}\n")
|
||||
pass
|
||||
return None, 0
|
||||
|
||||
|
||||
async def main():
|
||||
for i in range(3):
|
||||
latencies = []
|
||||
for i in range(5):
|
||||
start = time.time()
|
||||
n = 10 # Number of concurrent tasks
|
||||
n = 100 # Number of concurrent tasks
|
||||
tasks = [litellm_completion() for _ in range(n)]
|
||||
|
||||
chat_completions = await asyncio.gather(*tasks)
|
||||
|
||||
successful_completions = [c for c in chat_completions if c is not None]
|
||||
successful_completions = [c for c, l in chat_completions if c is not None]
|
||||
completion_latencies = [l for c, l in chat_completions if c is not None]
|
||||
latencies.extend(completion_latencies)
|
||||
|
||||
# Write errors to error_log.txt
|
||||
with open("error_log.txt", "a") as error_log:
|
||||
for completion in chat_completions:
|
||||
for completion, latency in chat_completions:
|
||||
if isinstance(completion, str):
|
||||
error_log.write(completion + "\n")
|
||||
|
||||
print(n, time.time() - start, len(successful_completions))
|
||||
|
||||
if latencies:
|
||||
average_latency = mean(latencies)
|
||||
median_latency = median(latencies)
|
||||
print(f"Average Latency per Response: {average_latency} seconds")
|
||||
print(f"Median Latency per Response: {median_latency} seconds")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Blank out contents of error_log.txt
|
||||
open("error_log.txt", "w").close()
|
||||
|
||||
asyncio.run(main())
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue