From f227045b6bd81f563744c54549dea59ce5ee95b9 Mon Sep 17 00:00:00 2001 From: Botao Chen Date: Tue, 25 Feb 2025 23:28:05 -0800 Subject: [PATCH] refine --- docs/notebooks/Alpha_Llama_Stack_Post_Training.ipynb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/notebooks/Alpha_Llama_Stack_Post_Training.ipynb b/docs/notebooks/Alpha_Llama_Stack_Post_Training.ipynb index 21155418e..c55c8da36 100644 --- a/docs/notebooks/Alpha_Llama_Stack_Post_Training.ipynb +++ b/docs/notebooks/Alpha_Llama_Stack_Post_Training.ipynb @@ -15,7 +15,7 @@ "\n", "Resource requirement:\n", "- You can run this notebook with Llama 3.2 3B instruct model on Colab's **FREE** T4 GPU\n", - "- You can run this notebook with Llama 3.1 8B instruct model on Colab's A100 GPU or any GPU types with more than 16GB memory\n", + "- You can run this notebook with Llama 3.1 8B instruct model on Colab's A100 GPU or any GPU types with more than 22GB memory\n", "- You need to spin up an ollama server on local host (will provider step by step instruction on this)\n", "\n", "> **Note**: Llama Stack post training APIs are in alpha release stage and still under heavy development\n" @@ -6360,7 +6360,7 @@ "# We limit to 50 rows from the dataset to save time\n", "eval_rows = client.datasetio.get_rows_paginated(\n", " dataset_id=\"eval_dataset\",\n", - " rows_in_page=50,\n", + " rows_in_page=-1,\n", ")\n", "\n", "from tqdm import tqdm\n",