mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-15 14:08:00 +00:00
chore: setup for performance benchmarking (#3096)
# What does this PR do? 1. Added a simple mock openai-compat server that serves chat/completion 2. Add a benchmark server in EKS that includes mock inference server 3. Add locust (https://locust.io/) file for load testing ## Test Plan bash apply.sh kubectl port-forward service/locust-web-ui 8089:8089 Go to localhost:8089 to start a load test <img width="1392" height="334" alt="image" src="https://github.com/user-attachments/assets/d6aa3deb-583a-42ed-889b-751262b8e91c" /> <img width="1362" height="881" alt="image" src="https://github.com/user-attachments/assets/6a28b9b4-05e6-44e2-b504-07e60c12d35e" />
This commit is contained in:
parent
2f51273215
commit
d6ae54723d
11 changed files with 1234 additions and 3 deletions
|
@ -140,6 +140,9 @@ docs = [
|
|||
"requests",
|
||||
]
|
||||
codegen = ["rich", "pydantic", "jinja2>=3.1.6"]
|
||||
benchmark = [
|
||||
"locust>=2.37.14",
|
||||
]
|
||||
|
||||
[project.urls]
|
||||
Homepage = "https://github.com/meta-llama/llama-stack"
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue