(test) sustained load test proxy

This commit is contained in:
ishaan-jaff 2024-01-02 12:10:19 +05:30
parent 31a896908b
commit 0acaaf8f8f

View file

@ -4,14 +4,14 @@ import uuid
import traceback import traceback
litellm_client = AsyncOpenAI(api_key="sk-1234", base_url="http://0.0.0.0:8000") litellm_client = AsyncOpenAI(base_url="http://0.0.0.0:8001", api_key="any")
async def litellm_completion(): async def litellm_completion():
# Your existing code for litellm_completion goes here # Your existing code for litellm_completion goes here
try: try:
response = await litellm_client.chat.completions.create( response = await litellm_client.chat.completions.create(
model="gpt-3.5-turbo", model="azure-gpt-3.5",
messages=[{"role": "user", "content": f"This is a test: {uuid.uuid4()}"}], messages=[{"role": "user", "content": f"This is a test: {uuid.uuid4()}"}],
) )
return response return response
@ -24,8 +24,9 @@ async def litellm_completion():
async def main(): async def main():
for i in range(1000000):
start = time.time() start = time.time()
n = 1000 # Number of concurrent tasks n = 500 # Number of concurrent tasks
tasks = [litellm_completion() for _ in range(n)] tasks = [litellm_completion() for _ in range(n)]
chat_completions = await asyncio.gather(*tasks) chat_completions = await asyncio.gather(*tasks)
@ -39,6 +40,7 @@ async def main():
error_log.write(completion + "\n") error_log.write(completion + "\n")
print(n, time.time() - start, len(successful_completions)) print(n, time.time() - start, len(successful_completions))
time.sleep(10)
if __name__ == "__main__": if __name__ == "__main__":