(ci/cd) comment beta test

This commit is contained in:
ishaan-jaff 2023-12-08 14:27:53 -08:00
parent 73984504e9
commit 76d0c50523

View file

@ -1,82 +1,82 @@
import openai, json, time, asyncio # import openai, json, time, asyncio
client = openai.AsyncOpenAI( # client = openai.AsyncOpenAI(
api_key="sk-1234", # api_key="sk-1234",
base_url="http://0.0.0.0:8000" # base_url="http://0.0.0.0:8000"
) # )
super_fake_messages = [ # super_fake_messages = [
{ # {
"role": "user", # "role": "user",
"content": f"What's the weather like in San Francisco, Tokyo, and Paris? {time.time()}" # "content": f"What's the weather like in San Francisco, Tokyo, and Paris? {time.time()}"
}, # },
{ # {
"content": None, # "content": None,
"role": "assistant", # "role": "assistant",
"tool_calls": [ # "tool_calls": [
{ # {
"id": "1", # "id": "1",
"function": { # "function": {
"arguments": "{\"location\": \"San Francisco\", \"unit\": \"celsius\"}", # "arguments": "{\"location\": \"San Francisco\", \"unit\": \"celsius\"}",
"name": "get_current_weather" # "name": "get_current_weather"
}, # },
"type": "function" # "type": "function"
}, # },
{ # {
"id": "2", # "id": "2",
"function": { # "function": {
"arguments": "{\"location\": \"Tokyo\", \"unit\": \"celsius\"}", # "arguments": "{\"location\": \"Tokyo\", \"unit\": \"celsius\"}",
"name": "get_current_weather" # "name": "get_current_weather"
}, # },
"type": "function" # "type": "function"
}, # },
{ # {
"id": "3", # "id": "3",
"function": { # "function": {
"arguments": "{\"location\": \"Paris\", \"unit\": \"celsius\"}", # "arguments": "{\"location\": \"Paris\", \"unit\": \"celsius\"}",
"name": "get_current_weather" # "name": "get_current_weather"
}, # },
"type": "function" # "type": "function"
} # }
] # ]
}, # },
{ # {
"tool_call_id": "1", # "tool_call_id": "1",
"role": "tool", # "role": "tool",
"name": "get_current_weather", # "name": "get_current_weather",
"content": "{\"location\": \"San Francisco\", \"temperature\": \"90\", \"unit\": \"celsius\"}" # "content": "{\"location\": \"San Francisco\", \"temperature\": \"90\", \"unit\": \"celsius\"}"
}, # },
{ # {
"tool_call_id": "2", # "tool_call_id": "2",
"role": "tool", # "role": "tool",
"name": "get_current_weather", # "name": "get_current_weather",
"content": "{\"location\": \"Tokyo\", \"temperature\": \"30\", \"unit\": \"celsius\"}" # "content": "{\"location\": \"Tokyo\", \"temperature\": \"30\", \"unit\": \"celsius\"}"
}, # },
{ # {
"tool_call_id": "3", # "tool_call_id": "3",
"role": "tool", # "role": "tool",
"name": "get_current_weather", # "name": "get_current_weather",
"content": "{\"location\": \"Paris\", \"temperature\": \"50\", \"unit\": \"celsius\"}" # "content": "{\"location\": \"Paris\", \"temperature\": \"50\", \"unit\": \"celsius\"}"
} # }
] # ]
async def chat_completions(): # async def chat_completions():
super_fake_response = await client.chat.completions.create( # super_fake_response = await client.chat.completions.create(
model="gpt-3.5-turbo", # model="gpt-3.5-turbo",
messages=super_fake_messages, # messages=super_fake_messages,
seed=1337, # seed=1337,
stream=False # stream=False
) # get a new response from the model where it can see the function response # ) # get a new response from the model where it can see the function response
await asyncio.sleep(1) # await asyncio.sleep(1)
return super_fake_response # return super_fake_response
async def loadtest_fn(n = 1): # async def loadtest_fn(n = 1):
global num_task_cancelled_errors, exception_counts, chat_completions # global num_task_cancelled_errors, exception_counts, chat_completions
start = time.time() # start = time.time()
tasks = [chat_completions() for _ in range(n)] # tasks = [chat_completions() for _ in range(n)]
chat_completions = await asyncio.gather(*tasks) # chat_completions = await asyncio.gather(*tasks)
successful_completions = [c for c in chat_completions if c is not None] # successful_completions = [c for c in chat_completions if c is not None]
print(n, time.time() - start, len(successful_completions)) # print(n, time.time() - start, len(successful_completions))
# print(json.dumps(super_fake_response.model_dump(), indent=4)) # # print(json.dumps(super_fake_response.model_dump(), indent=4))
asyncio.run(loadtest_fn()) # asyncio.run(loadtest_fn())