mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-09 11:20:58 +00:00
eleuther eval fix
This commit is contained in:
parent
ad18dc94ac
commit
fb565dfb06
3 changed files with 24 additions and 20 deletions
|
|
@ -54,27 +54,29 @@ class EvaluationClient(Evals):
|
|||
async def run_main(host: str, port: int):
|
||||
client = EvaluationClient(f"http://{host}:{port}")
|
||||
|
||||
# CustomDataset
|
||||
# Custom Eval Task
|
||||
# response = await client.run_evals(
|
||||
# model="Llama3.1-8B-Instruct",
|
||||
# dataset="mmlu-simple-eval-en",
|
||||
# task="mmlu",
|
||||
# eval_task_config=EvaluateTaskConfig(
|
||||
# n_samples=2,
|
||||
# ),
|
||||
# )
|
||||
|
||||
# Eleuther Eval Task
|
||||
response = await client.run_evals(
|
||||
model="Llama3.1-8B-Instruct",
|
||||
dataset="mmlu-simple-eval-en",
|
||||
task="mmlu",
|
||||
# task="meta_mmlu_pro_instruct",
|
||||
task="meta_ifeval",
|
||||
eval_task_config=EvaluateTaskConfig(
|
||||
n_samples=2,
|
||||
),
|
||||
)
|
||||
cprint(f"evaluate response={response}", "green")
|
||||
|
||||
# Eleuther Eval Task
|
||||
# response = await client.run_evals(
|
||||
# model="Llama3.1-8B-Instruct",
|
||||
# task="meta_mmlu_pro_instruct",
|
||||
# # task="meta_ifeval",
|
||||
# eval_task_config=EvaluateTaskConfig(
|
||||
# n_samples=2,
|
||||
# )
|
||||
# )
|
||||
# cprint(response.metrics["metrics_table"], "red")
|
||||
if response.formatted_report:
|
||||
cprint(response.formatted_report, "green")
|
||||
else:
|
||||
cprint(f"evaluate response={response}", "green")
|
||||
|
||||
|
||||
def main(host: str, port: int):
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue