From 40d9e8ab232be5f77e8d72b24193e7162af9a4d2 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Mon, 27 Nov 2023 18:08:28 -0800 Subject: [PATCH] (test) load test --- litellm/proxy/tests/load_test_completion.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/litellm/proxy/tests/load_test_completion.py b/litellm/proxy/tests/load_test_completion.py index dfb40850d1..16f949e8d7 100644 --- a/litellm/proxy/tests/load_test_completion.py +++ b/litellm/proxy/tests/load_test_completion.py @@ -13,10 +13,13 @@ litellm_client = AsyncOpenAI( async def litellm_completion(): # Your existing code for litellm_completion goes here try: - return await litellm_client.chat.completions.create( + response = await litellm_client.chat.completions.create( model="gpt-3.5-turbo", messages=[{"role": "user", "content": f"This is a test: {uuid.uuid4()}"}], ) + print(response) + return response + except Exception as e: # If there's an exception, log the error message with open("error_log.txt", "a") as error_log: