diff --git a/.github/workflows/README.md b/.github/workflows/README.md index 059bb873f..7c9d2bffd 100644 --- a/.github/workflows/README.md +++ b/.github/workflows/README.md @@ -21,4 +21,3 @@ Llama Stack uses GitHub Actions for Continuous Integration (CI). Below is a tabl | Test External API and Providers | [test-external.yml](test-external.yml) | Test the External API and Provider mechanisms | | UI Tests | [ui-unit-tests.yml](ui-unit-tests.yml) | Run the UI test suite | | Unit Tests | [unit-tests.yml](unit-tests.yml) | Run the unit test suite | -| Update ReadTheDocs | [update-readthedocs.yml](update-readthedocs.yml) | Update the Llama Stack ReadTheDocs site | diff --git a/.github/workflows/update-readthedocs.yml b/.github/workflows/update-readthedocs.yml deleted file mode 100644 index e12f0adf8..000000000 --- a/.github/workflows/update-readthedocs.yml +++ /dev/null @@ -1,70 +0,0 @@ -name: Update ReadTheDocs - -run-name: Update the Llama Stack ReadTheDocs site - -on: - workflow_dispatch: - inputs: - branch: - description: 'RTD version to update' - required: false - default: 'latest' - push: - branches: - - main - paths: - - 'docs/**' - - 'pyproject.toml' - - '.github/workflows/update-readthedocs.yml' - tags: - - '*' - pull_request: - branches: - - main - paths: - - 'docs/**' - - 'pyproject.toml' - - '.github/workflows/update-readthedocs.yml' - -concurrency: - group: ${{ github.workflow }}-${{ github.ref == 'refs/heads/main' && github.run_id || github.ref }} - cancel-in-progress: true - -jobs: - update-readthedocs: - runs-on: ubuntu-latest - env: - TOKEN: ${{ secrets.READTHEDOCS_TOKEN }} - steps: - - name: Checkout repository - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - - - name: Install dependencies - uses: ./.github/actions/setup-runner - - - name: Build HTML - run: | - cd docs - uv run make html - - - name: Trigger ReadTheDocs build - if: github.event_name != 'pull_request' - run: | - if [ -z "$TOKEN" ]; then - echo "READTHEDOCS_TOKEN is not set" - exit 1 - fi - - response=$(curl -X POST \ - -H "Content-Type: application/json" \ - -d "{ - \"token\": \"$TOKEN\", - \"version\": \"$GITHUB_REF_NAME\" - }" \ - https://readthedocs.org/api/v2/webhook/llama-stack/289768/) - - echo "Response: $response" - if [ $(echo $response | jq -r '.build_triggered') != 'true' ]; then - echo "Failed to trigger ReadTheDocs build" - exit 1 - fi diff --git a/.gitignore b/.gitignore index b516d4dd9..ca210db9a 100644 --- a/.gitignore +++ b/.gitignore @@ -18,7 +18,6 @@ Package.resolved .venv/ .vscode _build -docs/src # Sample tool-calling datasets generated by NVIDIA notebooks docs/notebooks/nvidia/tool_calling/sample_data/ pyrightconfig.json diff --git a/.readthedocs.yaml b/.readthedocs.yaml deleted file mode 100644 index 461977a6c..000000000 --- a/.readthedocs.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# .readthedocs.yaml -# Read the Docs configuration file -# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details - -# Required -version: 2 - -# Build documentation in the "docs/" directory with Sphinx -sphinx: - configuration: docs/source/conf.py - -# Set the OS, Python version and other tools you might need -build: - os: ubuntu-22.04 - tools: - python: "3.12" - jobs: - pre_create_environment: - - asdf plugin add uv - - asdf install uv latest - - asdf global uv latest - create_environment: - - uv venv "${READTHEDOCS_VIRTUALENV_PATH}" - install: - - UV_PROJECT_ENVIRONMENT="${READTHEDOCS_VIRTUALENV_PATH}" uv sync --frozen --group docs diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 14690924d..da0ba5717 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -187,14 +187,16 @@ Note that the provider "description" field will be used to generate the provider ### Building the Documentation -If you are making changes to the documentation at [https://llamastack.github.io/latest/](https://llamastack.github.io/latest/), you can use the following command to build the documentation and preview your changes. You will need [Sphinx](https://www.sphinx-doc.org/en/master/) and the readthedocs theme. +If you are making changes to the documentation at [https://llamastack.github.io/](https://llamastack.github.io/), you can use the following command to build the documentation and preview your changes. ```bash -# This rebuilds the documentation pages. -uv run --group docs make -C docs/ html +# This rebuilds the documentation pages and the OpenAPI spec. +npm install +npm run gen-api-docs all +npm run build -# This will start a local server (usually at http://127.0.0.1:8000) that automatically rebuilds and refreshes when you make changes to the documentation. -uv run --group docs sphinx-autobuild docs/source docs/build/html --write-all +# This will start a local server (usually at http://127.0.0.1:3000). +npm run serve ``` ### Update API Documentation @@ -205,4 +207,4 @@ If you modify or add new API endpoints, update the API documentation accordingly uv run ./docs/openapi_generator/run_openapi_generator.sh ``` -The generated API documentation will be available in `docs/_static/`. Make sure to review the changes before committing. +The generated API schema will be available in `docs/static/`. Make sure to review the changes before committing. diff --git a/docs/Makefile b/docs/Makefile deleted file mode 100644 index 92dd33a1a..000000000 --- a/docs/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -# Minimal makefile for Sphinx documentation -# - -# You can set these variables from the command line, and also -# from the environment for the first two. -SPHINXOPTS ?= -SPHINXBUILD ?= sphinx-build -SOURCEDIR = source -BUILDDIR = _build - -# Put it first so that "make" without argument is like "make help". -help: - @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) - -.PHONY: help Makefile - -# Catch-all target: route all unknown targets to Sphinx using the new -# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). -%: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/README.md b/docs/README.md index 2e03dd80b..1847e49d8 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,14 +1,17 @@ # Llama Stack Documentation -Here's a collection of comprehensive guides, examples, and resources for building AI applications with Llama Stack. For the complete documentation, visit our [Github page](https://llamastack.github.io/latest/getting_started/index.html). +Here's a collection of comprehensive guides, examples, and resources for building AI applications with Llama Stack. For the complete documentation, visit our [Github page](https://llamastack.github.io/getting_started/quickstart). ## Render locally -From the llama-stack root directory, run the following command to render the docs locally: +From the llama-stack `docs/` directory, run the following commands to render the docs locally: ```bash -uv run --group docs sphinx-autobuild docs/source docs/build/html --write-all +npm install +npm run gen-api-docs all +npm run build +npm run serve ``` -You can open up the docs in your browser at http://localhost:8000 +You can open up the docs in your browser at http://localhost:3000 ## Content diff --git a/docs/conftest.py b/docs/conftest.py deleted file mode 100644 index ab4d7e998..000000000 --- a/docs/conftest.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. - -import os -import time - - -def pytest_collection_modifyitems(items): - for item in items: - item.name = item.name.replace(' ', '_') - - -def pytest_runtest_teardown(item): - interval_seconds = os.getenv("LLAMA_STACK_TEST_INTERVAL_SECONDS") - if interval_seconds: - time.sleep(float(interval_seconds)) - - -def pytest_configure(config): - config.option.tbstyle = "short" - config.option.disable_warnings = True diff --git a/docs/contbuild.sh b/docs/contbuild.sh deleted file mode 100644 index c3687a3c8..000000000 --- a/docs/contbuild.sh +++ /dev/null @@ -1,7 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. - -sphinx-autobuild --write-all source build/html --watch source/ diff --git a/docs/docs/contributing/index.mdx b/docs/docs/contributing/index.mdx index 8b3f86b03..7f50a058e 100644 --- a/docs/docs/contributing/index.mdx +++ b/docs/docs/contributing/index.mdx @@ -187,14 +187,16 @@ Note that the provider "description" field will be used to generate the provider ### Building the Documentation -If you are making changes to the documentation at [https://llamastack.github.io/latest/](https://llamastack.github.io/latest/), you can use the following command to build the documentation and preview your changes. You will need [Sphinx](https://www.sphinx-doc.org/en/master/) and the readthedocs theme. +If you are making changes to the documentation at [https://llamastack.github.io/](https://llamastack.github.io/), you can use the following command to build the documentation and preview your changes. ```bash -# This rebuilds the documentation pages. -uv run --group docs make -C docs/ html +# This rebuilds the documentation pages and the OpenAPI spec. +npm install +npm run gen-api-docs all +npm run build -# This will start a local server (usually at http://127.0.0.1:8000) that automatically rebuilds and refreshes when you make changes to the documentation. -uv run --group docs sphinx-autobuild docs/source docs/build/html --write-all +# This will start a local server (usually at http://127.0.0.1:3000). +npm run serve ``` ### Update API Documentation @@ -205,7 +207,7 @@ If you modify or add new API endpoints, update the API documentation accordingly uv run ./docs/openapi_generator/run_openapi_generator.sh ``` -The generated API documentation will be available in `docs/_static/`. Make sure to review the changes before committing. +The generated API schema will be available in `docs/static/`. Make sure to review the changes before committing. ## Adding a New Provider diff --git a/docs/docs/index.mdx b/docs/docs/index.mdx index 21e895d3f..bed931fe7 100644 --- a/docs/docs/index.mdx +++ b/docs/docs/index.mdx @@ -45,9 +45,9 @@ Llama Stack consists of a server (with multiple pluggable API providers) and Cli ## Quick Links -- Ready to build? Check out the [Getting Started Guide](https://llama-stack.readthedocs.io/en/latest/getting_started/index.html) to get started. -- Want to contribute? See the [Contributing Guide](https://github.com/meta-llama/llama-stack/blob/main/CONTRIBUTING.md). -- Explore [Example Applications](https://github.com/meta-llama/llama-stack-apps) built with Llama Stack. +- Ready to build? Check out the [Getting Started Guide](https://llama-stack.github.io/getting_started/quickstart) to get started. +- Want to contribute? See the [Contributing Guide](https://github.com/llamastack/llama-stack/blob/main/CONTRIBUTING.md). +- Explore [Example Applications](https://github.com/llamastack/llama-stack-apps) built with Llama Stack. ## Rich Ecosystem Support @@ -59,13 +59,13 @@ Llama Stack provides adapters for popular providers across all API categories: - **Training & Evaluation**: HuggingFace, TorchTune, NVIDIA NEMO :::info Provider Details -For complete provider compatibility and setup instructions, see our [Providers Documentation](https://llama-stack.readthedocs.io/en/latest/providers/index.html). +For complete provider compatibility and setup instructions, see our [Providers Documentation](https://llamastack.github.io/providers/). ::: ## Get Started Today
docs
directory.
+ >
+ ),
+ },
+ {
+ title: 'Powered by React',
+ Svg: require('@site/static/img/undraw_docusaurus_react.svg').default,
+ description: (
+ <>
+ Extend or customize your website layout by reusing React. Docusaurus can
+ be extended while reusing the same header and footer.
+ >
+ ),
+ },
+];
+
+function Feature({Svg, title, description}) {
+ return (
+ {description}
++ Unified APIs for Inference, RAG, Agents, Tools, Safety, and Telemetry +
++ Get up and running with Llama Stack in just a few commands. Build your first RAG application locally. +
+{`# Install uv and start Ollama
+ollama run llama3.2:3b --keepalive 60m
+
+# Run Llama Stack server
+OLLAMA_URL=http://localhost:11434 \\
+ uv run --with llama-stack \\
+ llama stack build --distro starter \\
+ --image-type venv --run
+
+# Try the Python SDK
+from llama_stack_client import LlamaStackClient
+
+client = LlamaStackClient(
+ base_url="http://localhost:8321"
+)
+
+response = client.inference.chat_completion(
+ model="Llama3.2-3B-Instruct",
+ messages=[{
+ "role": "user",
+ "content": "What is machine learning?"
+ }]
+)`}
+ One consistent interface for all your AI needs - inference, safety, agents, and more.
+Swap between providers without code changes. Start local, deploy anywhere.
+Built-in safety, monitoring, and evaluation tools for enterprise applications.
+SDKs for Python, Node.js, iOS, Android, and REST APIs for any language.
++ Connect with developers building the future of AI applications +
+ +