From 995a1a1d0058733de0464a3a138fa8402dc88723 Mon Sep 17 00:00:00 2001 From: Karthi Keyan <84800257+KarthiDreamr@users.noreply.github.com> Date: Thu, 26 Sep 2024 23:07:15 +0530 Subject: [PATCH] Reordered pip install and llama model download (#112) Only after pip install step, llama cli command could be used (which is also specified in the notebook), so its common sense to put it before --- docs/getting_started.ipynb | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/getting_started.ipynb b/docs/getting_started.ipynb index d5e5cdc96..6ef852aff 100644 --- a/docs/getting_started.ipynb +++ b/docs/getting_started.ipynb @@ -39,13 +39,7 @@ "$ docker pull llamastack/llamastack-local-gpu\n", "```\n", "\n", - "2. Download model \n", - "```\n", - "$ llama download --help \n", - "$ llama download --source meta --model-id Llama3.2-11B-Vision-Instruct --meta-url \n", - "```\n", - "\n", - "3. pip install the llama stack client package \n", + "2. pip install the llama stack client package \n", "For this purpose, we will directly work with pre-built docker containers and use the python SDK\n", "```\n", "$ git clone https://github.com/meta-llama/llama-stack-apps.git\n", @@ -57,7 +51,13 @@ "$ pip install llama_stack llama_stack_client\n", "```\n", "This will install `llama_stack` and `llama_stack_client` packages. \n", - "This will also enable you to use the `llama` cli. \n", + "This will enable you to use the `llama` cli. \n", + "\n", + "3. Download model \n", + "```\n", + "$ llama download --help \n", + "$ llama download --source meta --model-id Llama3.2-11B-Vision-Instruct --meta-url \n", + "```\n", "\n", "4. Configure the Stack Server\n", "```\n",