mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-27 18:50:41 +00:00
docs, tests: replace datasets.rst with memory_optimizations.rst (#968)
datasets.rst was removed from torchtune repo. Signed-off-by: Ihar Hrachyshka <ihar.hrachyshka@gmail.com> # What does this PR do? Replace a missing 404 document with another one that exists. (Removed it from the list when memory_optimizations.rst was already pulled.) ## Test Plan Please describe: - tests you ran to verify your changes with result summaries. - provide instructions so it can be reproduced. ## Sources Please link relevant resources if necessary. ## Before submitting - [x] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [x] Ran pre-commit to handle lint / formatting issues. - [ ] Read the [contributor guideline](https://github.com/meta-llama/llama-stack/blob/main/CONTRIBUTING.md), Pull Request section? - [ ] Updated relevant documentation. - [ ] Wrote necessary unit or integration tests. Signed-off-by: Ihar Hrachyshka <ihar.hrachyshka@gmail.com>
This commit is contained in:
parent
529708215c
commit
5c8e35a9e2
5 changed files with 3 additions and 5 deletions
|
@ -1662,7 +1662,7 @@
|
||||||
"from termcolor import cprint\n",
|
"from termcolor import cprint\n",
|
||||||
"from llama_stack_client.types import Document\n",
|
"from llama_stack_client.types import Document\n",
|
||||||
"\n",
|
"\n",
|
||||||
"urls = [\"chat.rst\", \"llama3.rst\", \"datasets.rst\", \"lora_finetune.rst\"]\n",
|
"urls = [\"chat.rst\", \"llama3.rst\", \"memory_optimizations.rst\", \"lora_finetune.rst\"]\n",
|
||||||
"documents = [\n",
|
"documents = [\n",
|
||||||
" Document(\n",
|
" Document(\n",
|
||||||
" document_id=f\"num-{i}\",\n",
|
" document_id=f\"num-{i}\",\n",
|
||||||
|
|
|
@ -185,7 +185,7 @@ client = (
|
||||||
) # or create_http_client() depending on the environment you picked
|
) # or create_http_client() depending on the environment you picked
|
||||||
|
|
||||||
# Documents to be used for RAG
|
# Documents to be used for RAG
|
||||||
urls = ["chat.rst", "llama3.rst", "datasets.rst", "lora_finetune.rst"]
|
urls = ["chat.rst", "llama3.rst", "memory_optimizations.rst", "lora_finetune.rst"]
|
||||||
documents = [
|
documents = [
|
||||||
Document(
|
Document(
|
||||||
document_id=f"num-{i}",
|
document_id=f"num-{i}",
|
||||||
|
|
|
@ -152,7 +152,6 @@ class TestAgents:
|
||||||
"memory_optimizations.rst",
|
"memory_optimizations.rst",
|
||||||
"chat.rst",
|
"chat.rst",
|
||||||
"llama3.rst",
|
"llama3.rst",
|
||||||
"datasets.rst",
|
|
||||||
"qat_finetune.rst",
|
"qat_finetune.rst",
|
||||||
"lora_finetune.rst",
|
"lora_finetune.rst",
|
||||||
]
|
]
|
||||||
|
|
|
@ -28,7 +28,6 @@ def sample_documents():
|
||||||
"memory_optimizations.rst",
|
"memory_optimizations.rst",
|
||||||
"chat.rst",
|
"chat.rst",
|
||||||
"llama3.rst",
|
"llama3.rst",
|
||||||
"datasets.rst",
|
|
||||||
"qat_finetune.rst",
|
"qat_finetune.rst",
|
||||||
"lora_finetune.rst",
|
"lora_finetune.rst",
|
||||||
]
|
]
|
||||||
|
|
|
@ -264,7 +264,7 @@ def test_custom_tool(llama_stack_client, agent_config):
|
||||||
|
|
||||||
|
|
||||||
def test_rag_agent(llama_stack_client, agent_config):
|
def test_rag_agent(llama_stack_client, agent_config):
|
||||||
urls = ["chat.rst", "llama3.rst", "datasets.rst", "lora_finetune.rst"]
|
urls = ["chat.rst", "llama3.rst", "memory_optimizations.rst", "lora_finetune.rst"]
|
||||||
documents = [
|
documents = [
|
||||||
Document(
|
Document(
|
||||||
document_id=f"num-{i}",
|
document_id=f"num-{i}",
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue