From 0481fa954074583cf23709bf2e948fe14f5f9464 Mon Sep 17 00:00:00 2001 From: Ashwin Bharambe Date: Fri, 22 Nov 2024 20:42:17 -0800 Subject: [PATCH] Fix broken links with docs --- docs/contbuild.sh | 7 +++++ docs/source/conf.py | 29 +++++++++++++++++++ docs/source/contributing/new_api_provider.md | 6 ++-- .../distributions/ondevice_distro/ios_sdk.md | 2 +- .../distributions/self_hosted_distro/index.md | 28 +++++++++++++----- .../references/llama_cli_reference/index.md | 4 +-- .../developer_cookbook.md | 0 7 files changed, 63 insertions(+), 13 deletions(-) create mode 100644 docs/contbuild.sh rename docs/{source/getting_started => to_situate}/developer_cookbook.md (100%) diff --git a/docs/contbuild.sh b/docs/contbuild.sh new file mode 100644 index 000000000..c3687a3c8 --- /dev/null +++ b/docs/contbuild.sh @@ -0,0 +1,7 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +sphinx-autobuild --write-all source build/html --watch source/ diff --git a/docs/source/conf.py b/docs/source/conf.py index 5d88ae3d6..b657cddff 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -12,6 +12,8 @@ # -- Project information ----------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information +from docutils import nodes + project = "llama-stack" copyright = "2024, Meta" author = "Meta" @@ -59,6 +61,10 @@ myst_enable_extensions = [ "tasklist", ] +myst_substitutions = { + "docker_hub": "https://hub.docker.com/repository/docker/llamastack", +} + # Copy button settings copybutton_prompt_text = "$ " # for bash prompts copybutton_prompt_is_regexp = True @@ -98,3 +104,26 @@ redoc = [ ] redoc_uri = "https://cdn.redoc.ly/redoc/latest/bundles/redoc.standalone.js" + + +def setup(app): + def dockerhub_role(name, rawtext, text, lineno, inliner, options={}, content=[]): + url = f"https://hub.docker.com/r/llamastack/{text}" + node = nodes.reference(rawtext, text, refuri=url, **options) + return [node], [] + + def repopath_role(name, rawtext, text, lineno, inliner, options={}, content=[]): + parts = text.split("::") + if len(parts) == 2: + link_text = parts[0] + url_path = parts[1] + else: + link_text = text + url_path = text + + url = f"https://github.com/meta-llama/llama-stack/tree/main/{url_path}" + node = nodes.reference(rawtext, link_text, refuri=url, **options) + return [node], [] + + app.add_role("dockerhub", dockerhub_role) + app.add_role("repopath", repopath_role) diff --git a/docs/source/contributing/new_api_provider.md b/docs/source/contributing/new_api_provider.md index 80c74b568..9fea31d87 100644 --- a/docs/source/contributing/new_api_provider.md +++ b/docs/source/contributing/new_api_provider.md @@ -5,15 +5,15 @@ This guide contains references to walk you through adding a new API provider. 1. First, decide which API your provider falls into (e.g. Inference, Safety, Agents, Memory). 2. Decide whether your provider is a remote provider, or inline implmentation. A remote provider is a provider that makes a remote request to an service. An inline provider is a provider where implementation is executed locally. Checkout the examples, and follow the structure to add your own API provider. Please find the following code pointers: - - [Remote Adapters](https://github.com/meta-llama/llama-stack/tree/main/llama_stack/providers/remote) - - [Inline Providers](https://github.com/meta-llama/llama-stack/tree/main/llama_stack/providers/inline) + - {repopath}`Remote Providers::llama_stack/providers/remote` + - {repopath}`Inline Providers::llama_stack/providers/inline` 3. [Build a Llama Stack distribution](https://llama-stack.readthedocs.io/en/latest/distribution_dev/building_distro.html) with your API provider. 4. Test your code! ## Testing your newly added API providers -1. Start with an _integration test_ for your provider. That means we will instantiate the real provider, pass it real configuration and if it is a remote service, we will actually hit the remote service. We **strongly** discourage mocking for these tests at the provider level. Llama Stack is first and foremost about integration so we need to make sure stuff works end-to-end. See [llama_stack/providers/tests/inference/test_inference.py](../llama_stack/providers/tests/inference/test_inference.py) for an example. +1. Start with an _integration test_ for your provider. That means we will instantiate the real provider, pass it real configuration and if it is a remote service, we will actually hit the remote service. We **strongly** discourage mocking for these tests at the provider level. Llama Stack is first and foremost about integration so we need to make sure stuff works end-to-end. See {repopath}`llama_stack/providers/tests/inference/test_text_inference.py` for an example. 2. In addition, if you want to unit test functionality within your provider, feel free to do so. You can find some tests in `tests/` but they aren't well supported so far. diff --git a/docs/source/distributions/ondevice_distro/ios_sdk.md b/docs/source/distributions/ondevice_distro/ios_sdk.md index ea65ecd82..9623cd18b 100644 --- a/docs/source/distributions/ondevice_distro/ios_sdk.md +++ b/docs/source/distributions/ondevice_distro/ios_sdk.md @@ -5,7 +5,7 @@ We offer both remote and on-device use of Llama Stack in Swift via two component 1. [llama-stack-client-swift](https://github.com/meta-llama/llama-stack-client-swift/) 2. [LocalInferenceImpl](https://github.com/meta-llama/llama-stack/tree/main/llama_stack/providers/inline/ios/inference) -```{image} ../../../../_static/remote_or_local.gif +```{image} ../../../_static/remote_or_local.gif :alt: Seamlessly switching between local, on-device inference and remote hosted inference :width: 412px :align: center diff --git a/docs/source/distributions/self_hosted_distro/index.md b/docs/source/distributions/self_hosted_distro/index.md index be4d4d26f..d2d4e365d 100644 --- a/docs/source/distributions/self_hosted_distro/index.md +++ b/docs/source/distributions/self_hosted_distro/index.md @@ -1,13 +1,27 @@ # Self-Hosted Distributions +```{toctree} +:maxdepth: 1 +:hidden: + +ollama +tgi +remote-vllm +meta-reference-gpu +meta-reference-quantized-gpu +together +fireworks +bedrock +``` We offer deployable distributions where you can host your own Llama Stack server using local inference. | **Distribution** | **Llama Stack Docker** | Start This Distribution | |:----------------: |:------------------------------------------: |:-----------------------: | -| Meta Reference | [llamastack/distribution-meta-reference-gpu](https://hub.docker.com/repository/docker/llamastack/distribution-meta-reference-gpu/general) | [Guide](https://llama-stack.readthedocs.io/en/latest/getting_started/distributions/self_hosted_distro/meta-reference-gpu.html) | -| Meta Reference Quantized | [llamastack/distribution-meta-reference-quantized-gpu](https://hub.docker.com/repository/docker/llamastack/distribution-meta-reference-quantized-gpu/general) | [Guide](https://llama-stack.readthedocs.io/en/latest/getting_started/distributions/self_hosted_distro/meta-reference-quantized-gpu.html) | -| Ollama | [llamastack/distribution-ollama](https://hub.docker.com/repository/docker/llamastack/distribution-ollama/general) | [Guide](https://llama-stack.readthedocs.io/en/latest/getting_started/distributions/self_hosted_distro/ollama.html) | -| TGI | [llamastack/distribution-tgi](https://hub.docker.com/repository/docker/llamastack/distribution-tgi/general) | [Guide](https://llama-stack.readthedocs.io/en/latest/getting_started/distributions/self_hosted_distro/tgi.html) | -| Together | [llamastack/distribution-together](https://hub.docker.com/repository/docker/llamastack/distribution-together/general) | [Guide](https://llama-stack.readthedocs.io/en/latest/getting_started/distributions/self_hosted_distro/together.html) | -| Fireworks | [llamastack/distribution-fireworks](https://hub.docker.com/repository/docker/llamastack/distribution-fireworks/general) | [Guide](https://llama-stack.readthedocs.io/en/latest/getting_started/distributions/self_hosted_distro/fireworks.html) | -| Bedrock | [llamastack/distribution-bedrock](https://hub.docker.com/repository/docker/llamastack/distribution-bedrock/general) | [Guide](https://llama-stack.readthedocs.io/en/latest/getting_started/distributions/self_hosted_distro/bedrock.html) | +| Ollama | {dockerhub}`distribution-ollama` | [Guide](ollama) | +| TGI | {dockerhub}`distribution-tgi` | [Guide](tgi) | +| vLLM | {dockerhub}`distribution-remote-vllm` | [Guide](remote-vllm) | +| Meta Reference | {dockerhub}`distribution-meta-reference-gpu` | [Guide](meta-reference-gpu) | +| Meta Reference Quantized | {dockerhub}`distribution-meta-reference-quantized-gpu` | [Guide](meta-reference-quantized-gpu) | +| Together | {dockerhub}`distribution-together` | [Guide](together) | +| Fireworks | {dockerhub}`distribution-fireworks` | [Guide](fireworks) | +| Bedrock | {dockerhub}`distribution-bedrock` | [Guide](bedrock) | diff --git a/docs/source/references/llama_cli_reference/index.md b/docs/source/references/llama_cli_reference/index.md index 28d96f1f7..a0314644a 100644 --- a/docs/source/references/llama_cli_reference/index.md +++ b/docs/source/references/llama_cli_reference/index.md @@ -29,7 +29,7 @@ You have two ways to install Llama Stack: ## `llama` subcommands 1. `download`: `llama` cli tools supports downloading the model from Meta or Hugging Face. 2. `model`: Lists available models and their properties. -3. `stack`: Allows you to build and run a Llama Stack server. You can read more about this [here](../distributions/building_distro). +3. `stack`: Allows you to build and run a Llama Stack server. You can read more about this [here](../../distributions/building_distro). ### Sample Usage @@ -228,7 +228,7 @@ You can even run `llama model prompt-format` see all of the templates and their ``` llama model prompt-format -m Llama3.2-3B-Instruct ``` -![alt text](../../resources/prompt-format.png) +![alt text](../../../resources/prompt-format.png) diff --git a/docs/source/getting_started/developer_cookbook.md b/docs/to_situate/developer_cookbook.md similarity index 100% rename from docs/source/getting_started/developer_cookbook.md rename to docs/to_situate/developer_cookbook.md