mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-28 15:02:37 +00:00
rename tasks
This commit is contained in:
parent
9c38d9ae13
commit
ae43044a57
7 changed files with 5 additions and 6 deletions
|
@ -44,7 +44,7 @@ async def run_main(host: str, port: int):
|
|||
|
||||
# CustomDataset
|
||||
response = await client.run_evals(
|
||||
"Llama3.2-1B-Instruct",
|
||||
"Llama3.1-8B-Instruct",
|
||||
"mmlu-simple-eval-en",
|
||||
"mmlu",
|
||||
)
|
||||
|
|
|
@ -90,7 +90,6 @@ class EleutherEvalsWrapper(LM):
|
|||
def generate_until(self, requests, disable_tqdm: bool = False) -> List[str]:
|
||||
res = []
|
||||
for req in requests:
|
||||
print("generation for msg: ", req.args[0])
|
||||
response = self.inference_api.chat_completion(
|
||||
model=self.model,
|
||||
messages=[
|
||||
|
@ -144,7 +143,7 @@ class EleutherEvalsAdapter(Evals):
|
|||
output = evaluate(
|
||||
eluther_wrapper,
|
||||
task_dict,
|
||||
limit=1,
|
||||
limit=10,
|
||||
)
|
||||
|
||||
formatted_output = lm_eval.utils.make_table(output)
|
||||
|
|
|
@ -21,7 +21,7 @@ providers:
|
|||
- provider_id: meta-reference
|
||||
provider_type: meta-reference
|
||||
config:
|
||||
model: Llama3.2-1B-Instruct
|
||||
model: Llama3.1-8B-Instruct
|
||||
quantization: null
|
||||
torch_seed: null
|
||||
max_seq_len: 4096
|
||||
|
@ -54,8 +54,8 @@ providers:
|
|||
provider_type: meta-reference
|
||||
config: {}
|
||||
models:
|
||||
- identifier: Llama3.2-1B-Instruct
|
||||
llama_model: Llama3.2-1B-Instruct
|
||||
- identifier: Llama3.1-8B-Instruct
|
||||
llama_model: Llama3.1-8B-Instruct
|
||||
provider_id: meta-reference
|
||||
shields:
|
||||
- identifier: llama_guard
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue