From feb9eb8b0daf3af28021e512f798b16f6a9cb345 Mon Sep 17 00:00:00 2001 From: Christian Zaccaria <73656840+ChristianZaccaria@users.noreply.github.com> Date: Tue, 6 May 2025 17:51:20 +0100 Subject: [PATCH] docs: Remove datasets.rst and fix llama-stack build commands (#2061) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Issue Closes #2073 # What does this PR do? - Removes the `datasets.rst` from the list of document urls as it no longer exists in torchtune. Referenced PR: https://github.com/pytorch/torchtune/pull/1781 - Added a step to run `uv sync`. Previously, I would get the following error: ``` ➜ llama-stack git:(remove-deprecated-rst) uv venv --python 3.10 source .venv/bin/activate Using CPython 3.10.13 interpreter at: /usr/bin/python3.10 Creating virtual environment at: .venv Activate with: source .venv/bin/activate (llama-stack) ➜ llama-stack git:(remove-deprecated-rst) INFERENCE_MODEL=llama3.2:3b llama stack build --template ollama --image-type venv --run zsh: llama: command not found... ``` [//]: # (If resolving an issue, uncomment and update the line below) [//]: # (Closes #[issue-number]) ## Test Plan To test: Run through `rag_agent` example in the `detailed_tutorial.md` file. [//]: # (## Documentation) --- docs/notebooks/Llama_Stack_RAG_Lifecycle.ipynb | 2 -- docs/source/getting_started/detailed_tutorial.md | 4 +--- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/docs/notebooks/Llama_Stack_RAG_Lifecycle.ipynb b/docs/notebooks/Llama_Stack_RAG_Lifecycle.ipynb index 399a3bff1..e70cc3bbe 100644 --- a/docs/notebooks/Llama_Stack_RAG_Lifecycle.ipynb +++ b/docs/notebooks/Llama_Stack_RAG_Lifecycle.ipynb @@ -840,7 +840,6 @@ " \"memory_optimizations.rst\",\n", " \"chat.rst\",\n", " \"llama3.rst\",\n", - " \"datasets.rst\",\n", " \"qat_finetune.rst\",\n", " \"lora_finetune.rst\",\n", "]\n", @@ -1586,7 +1585,6 @@ " \"memory_optimizations.rst\",\n", " \"chat.rst\",\n", " \"llama3.rst\",\n", - " \"datasets.rst\",\n", " \"qat_finetune.rst\",\n", " \"lora_finetune.rst\",\n", "]\n", diff --git a/docs/source/getting_started/detailed_tutorial.md b/docs/source/getting_started/detailed_tutorial.md index a1504f249..e40a4903a 100644 --- a/docs/source/getting_started/detailed_tutorial.md +++ b/docs/source/getting_started/detailed_tutorial.md @@ -42,7 +42,7 @@ powershell -ExecutionPolicy ByPass -c "irm https://astral.sh/uv/install.ps1 | ie Setup your virtual environment. ```bash -uv venv --python 3.10 +uv sync --python 3.10 source .venv/bin/activate ``` ## Step 2: Run Llama Stack @@ -445,7 +445,6 @@ from llama_stack_client import LlamaStackClient from llama_stack_client import Agent, AgentEventLogger from llama_stack_client.types import Document import uuid -from termcolor import cprint client = LlamaStackClient(base_url="http://localhost:8321") @@ -463,7 +462,6 @@ urls = [ "memory_optimizations.rst", "chat.rst", "llama3.rst", - "datasets.rst", "qat_finetune.rst", "lora_finetune.rst", ]