mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 11:43:54 +00:00
(chore) update load test
This commit is contained in:
parent
cd08a02764
commit
e6b5152e63
2 changed files with 7 additions and 8 deletions
|
@ -57,12 +57,13 @@ model_list:
|
||||||
mode: embedding
|
mode: embedding
|
||||||
litellm_settings:
|
litellm_settings:
|
||||||
fallbacks: [{"openai-gpt-3.5": ["azure-gpt-3.5"]}]
|
fallbacks: [{"openai-gpt-3.5": ["azure-gpt-3.5"]}]
|
||||||
|
success_callback: ['langfuse']
|
||||||
# cache: True
|
# cache: True
|
||||||
# setting callback class
|
# setting callback class
|
||||||
# callbacks: custom_callbacks.proxy_handler_instance # sets litellm.callbacks = [proxy_handler_instance]
|
# callbacks: custom_callbacks.proxy_handler_instance # sets litellm.callbacks = [proxy_handler_instance]
|
||||||
|
|
||||||
general_settings:
|
# general_settings:
|
||||||
master_key: sk-1234
|
# master_key: sk-1234
|
||||||
# database_type: "dynamo_db"
|
# database_type: "dynamo_db"
|
||||||
# database_args: { # 👈 all args - https://github.com/BerriAI/litellm/blob/befbcbb7ac8f59835ce47415c128decf37aac328/litellm/proxy/_types.py#L190
|
# database_args: { # 👈 all args - https://github.com/BerriAI/litellm/blob/befbcbb7ac8f59835ce47415c128decf37aac328/litellm/proxy/_types.py#L190
|
||||||
# "billing_mode": "PAY_PER_REQUEST",
|
# "billing_mode": "PAY_PER_REQUEST",
|
||||||
|
|
|
@ -11,12 +11,10 @@ async def litellm_completion():
|
||||||
# Your existing code for litellm_completion goes here
|
# Your existing code for litellm_completion goes here
|
||||||
try:
|
try:
|
||||||
response = await litellm_client.chat.completions.create(
|
response = await litellm_client.chat.completions.create(
|
||||||
model="Azure OpenAI GPT-4 Canada-East (External)",
|
model="azure-gpt-3.5",
|
||||||
stream=True,
|
|
||||||
messages=[{"role": "user", "content": f"This is a test: {uuid.uuid4()}"}],
|
messages=[{"role": "user", "content": f"This is a test: {uuid.uuid4()}"}],
|
||||||
)
|
)
|
||||||
async for chunk in response:
|
print(response)
|
||||||
print(chunk)
|
|
||||||
return response
|
return response
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
@ -27,9 +25,9 @@ async def litellm_completion():
|
||||||
|
|
||||||
|
|
||||||
async def main():
|
async def main():
|
||||||
for i in range(1000000):
|
for i in range(150):
|
||||||
start = time.time()
|
start = time.time()
|
||||||
n = 1000 # Number of concurrent tasks
|
n = 150 # Number of concurrent tasks
|
||||||
tasks = [litellm_completion() for _ in range(n)]
|
tasks = [litellm_completion() for _ in range(n)]
|
||||||
|
|
||||||
chat_completions = await asyncio.gather(*tasks)
|
chat_completions = await asyncio.gather(*tasks)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue