docs: update documentation links (#3459)

# What does this PR do?
* Updates documentation links from readthedocs to llamastack.github.io

## Test Plan
* Manual testing
This commit is contained in:
Alexey Rybak 2025-09-17 10:37:35 -07:00 committed by GitHub
parent 9acf49753e
commit 9fe8097ca4
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
21 changed files with 997 additions and 993 deletions

View file

@ -11,7 +11,7 @@
"\n",
"# Llama Stack - Building AI Applications\n",
"\n",
"<img src=\"https://llama-stack.readthedocs.io/en/latest/_images/llama-stack.png\" alt=\"drawing\" width=\"500\"/>\n",
"<img src=\"https://llamastack.github.io/latest/_images/llama-stack.png\" alt=\"drawing\" width=\"500\"/>\n",
"\n",
"Get started with Llama Stack in minutes!\n",
"\n",
@ -138,7 +138,7 @@
},
"outputs": [],
"source": [
"import os \n",
"import os\n",
"import subprocess\n",
"\n",
"if \"UV_SYSTEM_PYTHON\" in os.environ:\n",
@ -150,13 +150,13 @@
"def run_llama_stack_server_background():\n",
" log_file = open(\"llama_stack_server.log\", \"w\")\n",
" process = subprocess.Popen(\n",
" f\"OLLAMA_URL=http://localhost:11434 uv run --with llama-stack llama stack run starter --image-type venv",
" f\"OLLAMA_URL=http://localhost:11434 uv run --with llama-stack llama stack run starter --image-type venv\n",
" shell=True,\n",
" stdout=log_file,\n",
" stderr=log_file,\n",
" text=True\n",
" )\n",
" \n",
"\n",
" print(f\"Starting Llama Stack server with PID: {process.pid}\")\n",
" return process\n",
"\n",
@ -164,11 +164,11 @@
" import requests\n",
" from requests.exceptions import ConnectionError\n",
" import time\n",
" \n",
"\n",
" url = \"http://0.0.0.0:8321/v1/health\"\n",
" max_retries = 30\n",
" retry_interval = 1\n",
" \n",
"\n",
" print(\"Waiting for server to start\", end=\"\")\n",
" for _ in range(max_retries):\n",
" try:\n",
@ -179,12 +179,12 @@
" except ConnectionError:\n",
" print(\".\", end=\"\", flush=True)\n",
" time.sleep(retry_interval)\n",
" \n",
"\n",
" print(\"\\nServer failed to start after\", max_retries * retry_interval, \"seconds\")\n",
" return False\n",
"\n",
"\n",
"# use this helper if needed to kill the server \n",
"# use this helper if needed to kill the server\n",
"def kill_llama_stack_server():\n",
" # Kill any existing llama stack server processes\n",
" os.system(\"ps aux | grep -v grep | grep llama_stack.core.server.server | awk '{print $2}' | xargs kill -9\")\n"