(chore) update load test

This commit is contained in:
ishaan-jaff 2024-01-19 08:52:17 -08:00
parent cd08a02764
commit e6b5152e63
2 changed files with 7 additions and 8 deletions

View file

@ -57,12 +57,13 @@ model_list:
mode: embedding
litellm_settings:
fallbacks: [{"openai-gpt-3.5": ["azure-gpt-3.5"]}]
success_callback: ['langfuse']
# cache: True
# setting callback class
# callbacks: custom_callbacks.proxy_handler_instance # sets litellm.callbacks = [proxy_handler_instance]
general_settings:
master_key: sk-1234
# general_settings:
# master_key: sk-1234
# database_type: "dynamo_db"
# database_args: { # 👈 all args - https://github.com/BerriAI/litellm/blob/befbcbb7ac8f59835ce47415c128decf37aac328/litellm/proxy/_types.py#L190
# "billing_mode": "PAY_PER_REQUEST",

View file

@ -11,12 +11,10 @@ async def litellm_completion():
# Your existing code for litellm_completion goes here
try:
response = await litellm_client.chat.completions.create(
model="Azure OpenAI GPT-4 Canada-East (External)",
stream=True,
model="azure-gpt-3.5",
messages=[{"role": "user", "content": f"This is a test: {uuid.uuid4()}"}],
)
async for chunk in response:
print(chunk)
print(response)
return response
except Exception as e:
@ -27,9 +25,9 @@ async def litellm_completion():
async def main():
for i in range(1000000):
for i in range(150):
start = time.time()
n = 1000 # Number of concurrent tasks
n = 150 # Number of concurrent tasks
tasks = [litellm_completion() for _ in range(n)]
chat_completions = await asyncio.gather(*tasks)