mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-10 11:39:47 +00:00
evals new rebase
This commit is contained in:
parent
89d24a07f0
commit
31c046dcdf
28 changed files with 1141 additions and 87 deletions
5
llama_stack/providers/impls/third_party/evals/__init__.py
vendored
Normal file
5
llama_stack/providers/impls/third_party/evals/__init__.py
vendored
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
19
llama_stack/providers/impls/third_party/evals/eleuther/__init__.py
vendored
Normal file
19
llama_stack/providers/impls/third_party/evals/eleuther/__init__.py
vendored
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from .config import EleutherEvalsImplConfig # noqa
|
||||
from llama_stack.apis.inference import * # noqa: F403
|
||||
from llama_stack.distribution.datatypes import Api, ProviderSpec
|
||||
|
||||
|
||||
async def get_provider_impl(
|
||||
config: EleutherEvalsImplConfig, deps: Dict[Api, ProviderSpec]
|
||||
):
|
||||
from .eleuther import EleutherEvalsAdapter
|
||||
|
||||
impl = EleutherEvalsAdapter(config, deps[Api.inference])
|
||||
await impl.initialize()
|
||||
return impl
|
||||
10
llama_stack/providers/impls/third_party/evals/eleuther/config.py
vendored
Normal file
10
llama_stack/providers/impls/third_party/evals/eleuther/config.py
vendored
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class EleutherEvalsImplConfig(BaseModel): ...
|
||||
168
llama_stack/providers/impls/third_party/evals/eleuther/eleuther.py
vendored
Normal file
168
llama_stack/providers/impls/third_party/evals/eleuther/eleuther.py
vendored
Normal file
|
|
@ -0,0 +1,168 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
import asyncio
|
||||
from llama_stack.apis.inference import * # noqa: F403
|
||||
from llama_stack.apis.evals import * # noqa: F403
|
||||
import os
|
||||
import random
|
||||
import threading
|
||||
from pathlib import Path
|
||||
|
||||
import lm_eval
|
||||
import tqdm
|
||||
from lm_eval.api.model import LM
|
||||
from lm_eval.evaluator import evaluate, get_task_list
|
||||
from lm_eval.tasks import get_task_dict, TaskManager
|
||||
from termcolor import cprint
|
||||
|
||||
from .config import EleutherEvalsImplConfig
|
||||
|
||||
|
||||
# https://stackoverflow.com/questions/74703727/how-to-call-async-function-from-sync-funcion-and-get-result-while-a-loop-is-alr
|
||||
# We will use another thread wih its own event loop to run the async api within sync function
|
||||
_loop = asyncio.new_event_loop()
|
||||
_thr = threading.Thread(target=_loop.run_forever, name="Async Runner", daemon=True)
|
||||
|
||||
|
||||
class EleutherEvalsWrapper(LM):
|
||||
def __init__(
|
||||
self,
|
||||
inference_api: Inference,
|
||||
model: str,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(**kwargs)
|
||||
self.inference_api = inference_api
|
||||
self.model = model
|
||||
self.tokenizer = None
|
||||
self.tokenized_requests = False
|
||||
self.kwargs = kwargs
|
||||
|
||||
@property
|
||||
def eot_token_id(self):
|
||||
raise NotImplementedError("Not implemented")
|
||||
|
||||
@property
|
||||
def max_length(self) -> int:
|
||||
return NotImplementedError("Not implemented")
|
||||
|
||||
@property
|
||||
def max_gen_toks(self) -> int:
|
||||
return NotImplementedError("Not implemented")
|
||||
|
||||
@property
|
||||
def batch_size(self):
|
||||
# Isn't used because we override _loglikelihood_tokens
|
||||
raise NotImplementedError("No support for logits.")
|
||||
|
||||
@property
|
||||
def device(self):
|
||||
# Isn't used because we override _loglikelihood_tokens
|
||||
raise NotImplementedError("No support for logits.")
|
||||
|
||||
@property
|
||||
def world_size(self):
|
||||
return 1
|
||||
|
||||
def tok_encode(self, string: str) -> List[int]:
|
||||
return NotImplementedError("Not implemented")
|
||||
|
||||
def tok_decode(self, tokens: List[int]) -> str:
|
||||
return NotImplementedError("Not implemented")
|
||||
|
||||
def _loglikelihood_tokens(self, requests, disable_tqdm: bool = False):
|
||||
raise NotImplementedError("No support for logits.")
|
||||
|
||||
def _model_call(self, inps):
|
||||
# Isn't used because we override _loglikelihood_tokens
|
||||
raise NotImplementedError()
|
||||
|
||||
def _model_generate(self, context, max_length, eos_token_id):
|
||||
# Isn't used because we override generate_until
|
||||
raise NotImplementedError()
|
||||
|
||||
def loglikelihood(self, requests, disable_tqdm: bool = False):
|
||||
# TODO: implement inference completion with loglikelihood
|
||||
res = []
|
||||
for req in requests:
|
||||
res.append((-random.random(), False))
|
||||
|
||||
return res
|
||||
|
||||
def loglikelihood_rolling(self, requests, disable_tqdm: bool = False):
|
||||
raise NotImplementedError("No support for logits.")
|
||||
|
||||
def generate_until(self, requests, disable_tqdm: bool = False) -> List[str]:
|
||||
res = []
|
||||
if not _thr.is_alive():
|
||||
_thr.start()
|
||||
for req in tqdm.tqdm(requests):
|
||||
chat_completion_coro_fn = self.inference_api.chat_completion(
|
||||
model=self.model,
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": req.args[0],
|
||||
}
|
||||
],
|
||||
stream=False,
|
||||
)
|
||||
future = asyncio.run_coroutine_threadsafe(chat_completion_coro_fn, _loop)
|
||||
response = future.result()
|
||||
res.append(response.completion_message.content)
|
||||
|
||||
return res
|
||||
|
||||
|
||||
class EleutherEvalsAdapter(Evals):
|
||||
def __init__(self, config: EleutherEvalsImplConfig, inference_api: Inference):
|
||||
self.inference_api = inference_api
|
||||
|
||||
async def initialize(self) -> None:
|
||||
pass
|
||||
|
||||
async def shutdown(self) -> None:
|
||||
pass
|
||||
|
||||
async def run_evals(
|
||||
self,
|
||||
model: str,
|
||||
task: str,
|
||||
dataset: Optional[str] = None,
|
||||
eval_task_config: Optional[EvaluateTaskConfig] = None,
|
||||
) -> EvaluateResponse:
|
||||
cprint(f"Eleuther Evals: {model} {dataset} {task}", "red")
|
||||
|
||||
eluther_wrapper = EleutherEvalsWrapper(self.inference_api, model)
|
||||
current_dir = Path(os.path.dirname(os.path.abspath(__file__)))
|
||||
|
||||
# custom registry of harness tasks
|
||||
task_manager = TaskManager(
|
||||
include_path=str(current_dir / "tasks"),
|
||||
)
|
||||
|
||||
task_dict = get_task_dict(task, task_manager)
|
||||
cprint(task_dict, "blue")
|
||||
|
||||
task_types = set([t.task.OUTPUT_TYPE for t in get_task_list(task_dict)])
|
||||
cprint(task_types, "cyan")
|
||||
|
||||
output = evaluate(
|
||||
eluther_wrapper,
|
||||
task_dict,
|
||||
limit=eval_task_config.n_samples,
|
||||
)
|
||||
|
||||
formatted_output = lm_eval.utils.make_table(output)
|
||||
|
||||
cprint(formatted_output, "green")
|
||||
|
||||
return EvaluateResponse(
|
||||
metrics={
|
||||
"metrics_table": formatted_output,
|
||||
},
|
||||
)
|
||||
32
llama_stack/providers/impls/third_party/evals/eleuther/tasks/meta_ifeval/ifeval.yaml
vendored
Normal file
32
llama_stack/providers/impls/third_party/evals/eleuther/tasks/meta_ifeval/ifeval.yaml
vendored
Normal file
|
|
@ -0,0 +1,32 @@
|
|||
task: meta_ifeval
|
||||
dataset_path: meta-llama/Llama-3.1-8B-Instruct-evals
|
||||
dataset_name: Llama-3.1-8B-Instruct-evals__ifeval__strict__details
|
||||
output_type: generate_until
|
||||
test_split: latest
|
||||
process_docs: !function utils.process_docs
|
||||
num_fewshot: 0
|
||||
doc_to_text: prompt
|
||||
doc_to_target: 0
|
||||
generation_kwargs:
|
||||
until: []
|
||||
do_sample: false
|
||||
temperature: 0.0
|
||||
max_gen_toks: 1280
|
||||
process_results: !function utils.process_results
|
||||
metric_list:
|
||||
- metric: prompt_level_strict_acc
|
||||
aggregation: mean
|
||||
higher_is_better: true
|
||||
- metric: inst_level_strict_acc
|
||||
aggregation: !function utils.agg_inst_level_acc
|
||||
higher_is_better: true
|
||||
- metric: prompt_level_loose_acc
|
||||
aggregation: mean
|
||||
higher_is_better: true
|
||||
- metric: inst_level_loose_acc
|
||||
aggregation: !function utils.agg_inst_level_acc
|
||||
higher_is_better: true
|
||||
metadata:
|
||||
version: 2.0
|
||||
fewshot_config:
|
||||
sampler: first_n
|
||||
191
llama_stack/providers/impls/third_party/evals/eleuther/tasks/meta_ifeval/utils.py
vendored
Normal file
191
llama_stack/providers/impls/third_party/evals/eleuther/tasks/meta_ifeval/utils.py
vendored
Normal file
|
|
@ -0,0 +1,191 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
import dataclasses
|
||||
from typing import Dict, Optional, Union
|
||||
|
||||
import datasets
|
||||
|
||||
from lm_eval.tasks.ifeval import instructions_registry
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class InputExample:
|
||||
key: int
|
||||
instruction_id_list: list[str]
|
||||
prompt: str
|
||||
kwargs: list[Dict[str, Optional[Union[str, int]]]]
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class OutputExample:
|
||||
instruction_id_list: list[str]
|
||||
prompt: str
|
||||
response: str
|
||||
follow_all_instructions: bool
|
||||
follow_instruction_list: list[bool]
|
||||
|
||||
|
||||
def test_instruction_following_strict(
|
||||
inp,
|
||||
response,
|
||||
):
|
||||
"""Tests response to see if instructions are followed."""
|
||||
instruction_list = inp.instruction_id_list
|
||||
is_following_list = []
|
||||
|
||||
for index, instruction_id in enumerate(instruction_list):
|
||||
instruction_cls = instructions_registry.INSTRUCTION_DICT[instruction_id]
|
||||
instruction = instruction_cls(instruction_id)
|
||||
|
||||
# Remove None values from kwargs to avoid unexpected keyword argument errors in build_description method.
|
||||
kwargs = {k: v for k, v in inp.kwargs[index].items() if v}
|
||||
instruction.build_description(**kwargs)
|
||||
args = instruction.get_instruction_args()
|
||||
if args and "prompt" in args:
|
||||
instruction.build_description(prompt=inp.prompt)
|
||||
|
||||
if response.strip() and instruction.check_following(response):
|
||||
is_following_list.append(True)
|
||||
else:
|
||||
is_following_list.append(False)
|
||||
|
||||
return OutputExample(
|
||||
instruction_id_list=inp.instruction_id_list,
|
||||
prompt=inp.prompt,
|
||||
response=response,
|
||||
follow_all_instructions=all(is_following_list),
|
||||
follow_instruction_list=is_following_list,
|
||||
)
|
||||
|
||||
|
||||
def test_instruction_following_loose(
|
||||
inp,
|
||||
response,
|
||||
):
|
||||
"""Tests response for an upper bound for following instructions."""
|
||||
r = response.split("\n")
|
||||
response_remove_first = "\n".join(r[1:]).strip()
|
||||
response_remove_last = "\n".join(r[:-1]).strip()
|
||||
response_remove_both = "\n".join(r[1:-1]).strip()
|
||||
revised_response = response.replace("*", "")
|
||||
revised_response_remove_first = response_remove_first.replace("*", "")
|
||||
revised_response_remove_last = response_remove_last.replace("*", "")
|
||||
revised_response_remove_both = response_remove_both.replace("*", "")
|
||||
all_responses = [
|
||||
response,
|
||||
revised_response,
|
||||
response_remove_first,
|
||||
response_remove_last,
|
||||
response_remove_both,
|
||||
revised_response_remove_first,
|
||||
revised_response_remove_last,
|
||||
revised_response_remove_both,
|
||||
]
|
||||
instruction_list = inp.instruction_id_list
|
||||
is_following_list = []
|
||||
|
||||
for index, instruction_id in enumerate(instruction_list):
|
||||
instruction_cls = instructions_registry.INSTRUCTION_DICT[instruction_id]
|
||||
instruction = instruction_cls(instruction_id)
|
||||
|
||||
# Remove None values from kwargs to avoid unexpected keyword argument errors in build_description method.
|
||||
kwargs = {k: v for k, v in inp.kwargs[index].items() if v}
|
||||
instruction.build_description(**kwargs)
|
||||
args = instruction.get_instruction_args()
|
||||
if args and "prompt" in args:
|
||||
instruction.build_description(prompt=inp.prompt)
|
||||
|
||||
is_following = False
|
||||
for r in all_responses:
|
||||
if r.strip() and instruction.check_following(r):
|
||||
is_following = True
|
||||
break
|
||||
|
||||
is_following_list.append(is_following)
|
||||
|
||||
return OutputExample(
|
||||
instruction_id_list=inp.instruction_id_list,
|
||||
prompt=inp.prompt,
|
||||
response=response,
|
||||
follow_all_instructions=all(is_following_list),
|
||||
follow_instruction_list=is_following_list,
|
||||
)
|
||||
|
||||
|
||||
def process_results(doc, results):
|
||||
new_kwargs = []
|
||||
for item in doc["kwargs"]:
|
||||
if item["nth_paragraph"]:
|
||||
item["nth_paragraph"] = int(item["nth_paragraph"])
|
||||
new_kwargs.append(item)
|
||||
inp = InputExample(
|
||||
key=doc["key"],
|
||||
instruction_id_list=doc["instruction_id_list"],
|
||||
prompt=doc["prompt"],
|
||||
kwargs=new_kwargs,
|
||||
)
|
||||
response = results[0]
|
||||
|
||||
out_strict = test_instruction_following_strict(inp, response)
|
||||
out_loose = test_instruction_following_loose(inp, response)
|
||||
|
||||
return {
|
||||
"prompt_level_strict_acc": out_strict.follow_all_instructions,
|
||||
"inst_level_strict_acc": out_strict.follow_instruction_list,
|
||||
"prompt_level_loose_acc": out_loose.follow_all_instructions,
|
||||
"inst_level_loose_acc": out_loose.follow_instruction_list,
|
||||
}
|
||||
|
||||
|
||||
def agg_inst_level_acc(items):
|
||||
flat_items = [item for sublist in items for item in sublist]
|
||||
inst_level_acc = sum(flat_items) / len(flat_items)
|
||||
return inst_level_acc
|
||||
|
||||
|
||||
def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:
|
||||
def _get_question(example: dict) -> dict:
|
||||
# get the question from the ifeval dataset
|
||||
example["input_question"] = (
|
||||
eval(
|
||||
example["input_question"]
|
||||
.replace("null", "None")
|
||||
.replace("true", "True")
|
||||
.replace("false", "False")
|
||||
)["dialog"][0]["body"]
|
||||
.replace("Is it True that the first song", "Is it true that the first song")
|
||||
.replace("Is the following True", "Is the following true")
|
||||
)
|
||||
example["input_final_prompts"] = example["input_final_prompts"][0]
|
||||
return example
|
||||
|
||||
original_dataset_name = "wis-k/instruction-following-eval"
|
||||
ifeval_data = datasets.load_dataset(original_dataset_name, split="train")
|
||||
ifeval_df = ifeval_data.to_pandas()
|
||||
ifeval_df = ifeval_df.rename(columns={"prompt": "input_question"})
|
||||
|
||||
meta_dataset = dataset.map(_get_question)
|
||||
meta_df = meta_dataset.to_pandas()
|
||||
|
||||
# join the two datasets on the input_question column
|
||||
joined = meta_df.join(ifeval_df.set_index("input_question"), on="input_question")
|
||||
joined = joined.rename(columns={"input_final_prompts": "prompt"})
|
||||
joined = joined.rename(columns={"is_correct": "previous_is_correct"})
|
||||
joined = datasets.Dataset.from_pandas(joined)
|
||||
joined = joined.select_columns(
|
||||
[
|
||||
"input_question",
|
||||
"prompt",
|
||||
"previous_is_correct",
|
||||
"instruction_id_list",
|
||||
"kwargs",
|
||||
"output_prediction_text",
|
||||
"key",
|
||||
]
|
||||
)
|
||||
joined.rename_column("output_prediction_text", "previous_output_prediction_text")
|
||||
return joined
|
||||
|
|
@ -0,0 +1,29 @@
|
|||
task: meta_mmlu_pro_instruct
|
||||
dataset_path: meta-llama/Llama-3.1-8B-Instruct-evals
|
||||
dataset_name: Llama-3.1-8B-Instruct-evals__mmlu_pro__details
|
||||
test_split: latest
|
||||
output_type: generate_until
|
||||
process_docs: !function utils.process_docs
|
||||
doc_to_text: !function utils.doc_to_text
|
||||
doc_to_target: gold
|
||||
filter_list:
|
||||
- name: "strict-match"
|
||||
filter:
|
||||
- function: "regex"
|
||||
group_select: -1
|
||||
regex_pattern: 'best answer is ([A-Z])'
|
||||
- function: "take_first"
|
||||
generation_kwargs:
|
||||
until: []
|
||||
do_sample: false
|
||||
temperature: 0
|
||||
max_gen_toks: 1024
|
||||
num_fewshot: 0
|
||||
metric_list:
|
||||
- metric: exact_match
|
||||
aggregation: mean
|
||||
higher_is_better: true
|
||||
ignore_case: true
|
||||
ignore_punctuation: true
|
||||
metadata:
|
||||
version: 1.0
|
||||
35
llama_stack/providers/impls/third_party/evals/eleuther/tasks/meta_mmlu_pro/utils.py
vendored
Normal file
35
llama_stack/providers/impls/third_party/evals/eleuther/tasks/meta_mmlu_pro/utils.py
vendored
Normal file
|
|
@ -0,0 +1,35 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
import datasets
|
||||
|
||||
|
||||
def doc_to_text(doc: dict) -> str:
|
||||
return doc["input_final_prompts"][0]
|
||||
|
||||
|
||||
def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:
|
||||
def _process_doc(doc: dict) -> dict:
|
||||
out_doc = {
|
||||
"problem": doc["input_question"],
|
||||
"gold": doc["input_correct_responses"][0],
|
||||
}
|
||||
return out_doc
|
||||
|
||||
dataset = dataset.select_columns(
|
||||
[
|
||||
"input_question",
|
||||
"input_correct_responses",
|
||||
"input_final_prompts",
|
||||
"is_correct",
|
||||
"input_question_hash",
|
||||
"input_choice_list",
|
||||
"output_prediction_text",
|
||||
],
|
||||
)
|
||||
dataset = dataset.rename_column("is_correct", "previously_is_correct")
|
||||
dataset = dataset.map(_process_doc)
|
||||
return dataset
|
||||
Loading…
Add table
Add a link
Reference in a new issue