mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-12 04:50:39 +00:00
chore(rename): move llama_stack.distribution to llama_stack.core (#2975)
We would like to rename the term `template` to `distribution`. To prepare for that, this is a precursor. cc @leseb
This commit is contained in:
parent
f3d5459647
commit
2665f00102
211 changed files with 351 additions and 348 deletions
12
.github/workflows/providers-build.yml
vendored
12
.github/workflows/providers-build.yml
vendored
|
@ -9,8 +9,8 @@ on:
|
||||||
paths:
|
paths:
|
||||||
- 'llama_stack/cli/stack/build.py'
|
- 'llama_stack/cli/stack/build.py'
|
||||||
- 'llama_stack/cli/stack/_build.py'
|
- 'llama_stack/cli/stack/_build.py'
|
||||||
- 'llama_stack/distribution/build.*'
|
- 'llama_stack/core/build.*'
|
||||||
- 'llama_stack/distribution/*.sh'
|
- 'llama_stack/core/*.sh'
|
||||||
- '.github/workflows/providers-build.yml'
|
- '.github/workflows/providers-build.yml'
|
||||||
- 'llama_stack/templates/**'
|
- 'llama_stack/templates/**'
|
||||||
- 'pyproject.toml'
|
- 'pyproject.toml'
|
||||||
|
@ -19,8 +19,8 @@ on:
|
||||||
paths:
|
paths:
|
||||||
- 'llama_stack/cli/stack/build.py'
|
- 'llama_stack/cli/stack/build.py'
|
||||||
- 'llama_stack/cli/stack/_build.py'
|
- 'llama_stack/cli/stack/_build.py'
|
||||||
- 'llama_stack/distribution/build.*'
|
- 'llama_stack/core/build.*'
|
||||||
- 'llama_stack/distribution/*.sh'
|
- 'llama_stack/core/*.sh'
|
||||||
- '.github/workflows/providers-build.yml'
|
- '.github/workflows/providers-build.yml'
|
||||||
- 'llama_stack/templates/**'
|
- 'llama_stack/templates/**'
|
||||||
- 'pyproject.toml'
|
- 'pyproject.toml'
|
||||||
|
@ -108,7 +108,7 @@ jobs:
|
||||||
IMAGE_ID=$(docker images --format "{{.Repository}}:{{.Tag}}" | head -n 1)
|
IMAGE_ID=$(docker images --format "{{.Repository}}:{{.Tag}}" | head -n 1)
|
||||||
entrypoint=$(docker inspect --format '{{ .Config.Entrypoint }}' $IMAGE_ID)
|
entrypoint=$(docker inspect --format '{{ .Config.Entrypoint }}' $IMAGE_ID)
|
||||||
echo "Entrypoint: $entrypoint"
|
echo "Entrypoint: $entrypoint"
|
||||||
if [ "$entrypoint" != "[python -m llama_stack.distribution.server.server --config /app/run.yaml]" ]; then
|
if [ "$entrypoint" != "[python -m llama_stack.core.server.server --config /app/run.yaml]" ]; then
|
||||||
echo "Entrypoint is not correct"
|
echo "Entrypoint is not correct"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
@ -142,7 +142,7 @@ jobs:
|
||||||
IMAGE_ID=$(docker images --format "{{.Repository}}:{{.Tag}}" | head -n 1)
|
IMAGE_ID=$(docker images --format "{{.Repository}}:{{.Tag}}" | head -n 1)
|
||||||
entrypoint=$(docker inspect --format '{{ .Config.Entrypoint }}' $IMAGE_ID)
|
entrypoint=$(docker inspect --format '{{ .Config.Entrypoint }}' $IMAGE_ID)
|
||||||
echo "Entrypoint: $entrypoint"
|
echo "Entrypoint: $entrypoint"
|
||||||
if [ "$entrypoint" != "[python -m llama_stack.distribution.server.server --config /app/run.yaml]" ]; then
|
if [ "$entrypoint" != "[python -m llama_stack.core.server.server --config /app/run.yaml]" ]; then
|
||||||
echo "Entrypoint is not correct"
|
echo "Entrypoint is not correct"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
|
@ -16,6 +16,9 @@ on:
|
||||||
- 'tests/external/*'
|
- 'tests/external/*'
|
||||||
- '.github/workflows/test-external-provider-module.yml' # This workflow
|
- '.github/workflows/test-external-provider-module.yml' # This workflow
|
||||||
|
|
||||||
|
# This workflow is disabled. See https://github.com/meta-llama/llama-stack/pull/2975#issuecomment-3138702984 for details
|
||||||
|
if: false
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test-external-providers-from-module:
|
test-external-providers-from-module:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
@ -47,7 +50,7 @@ jobs:
|
||||||
|
|
||||||
- name: Build distro from config file
|
- name: Build distro from config file
|
||||||
run: |
|
run: |
|
||||||
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. llama stack build --config tests/external/ramalama-stack/build.yaml
|
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack build --config tests/external/ramalama-stack/build.yaml
|
||||||
|
|
||||||
- name: Start Llama Stack server in background
|
- name: Start Llama Stack server in background
|
||||||
if: ${{ matrix.image-type }} == 'venv'
|
if: ${{ matrix.image-type }} == 'venv'
|
||||||
|
|
4
.github/workflows/test-external.yml
vendored
4
.github/workflows/test-external.yml
vendored
|
@ -43,11 +43,11 @@ jobs:
|
||||||
|
|
||||||
- name: Print distro dependencies
|
- name: Print distro dependencies
|
||||||
run: |
|
run: |
|
||||||
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. llama stack build --config tests/external/build.yaml --print-deps-only
|
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack build --config tests/external/build.yaml --print-deps-only
|
||||||
|
|
||||||
- name: Build distro from config file
|
- name: Build distro from config file
|
||||||
run: |
|
run: |
|
||||||
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. llama stack build --config tests/external/build.yaml
|
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack build --config tests/external/build.yaml
|
||||||
|
|
||||||
- name: Start Llama Stack server in background
|
- name: Start Llama Stack server in background
|
||||||
if: ${{ matrix.image-type }} == 'venv'
|
if: ${{ matrix.image-type }} == 'venv'
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
include pyproject.toml
|
include pyproject.toml
|
||||||
include llama_stack/models/llama/llama3/tokenizer.model
|
include llama_stack/models/llama/llama3/tokenizer.model
|
||||||
include llama_stack/models/llama/llama4/tokenizer.model
|
include llama_stack/models/llama/llama4/tokenizer.model
|
||||||
include llama_stack/distribution/*.sh
|
include llama_stack.core/*.sh
|
||||||
include llama_stack/cli/scripts/*.sh
|
include llama_stack/cli/scripts/*.sh
|
||||||
include llama_stack/templates/*/*.yaml
|
include llama_stack/templates/*/*.yaml
|
||||||
include llama_stack/providers/tests/test_cases/inference/*.json
|
include llama_stack/providers/tests/test_cases/inference/*.json
|
||||||
|
|
|
@ -165,7 +165,7 @@
|
||||||
"# use this helper if needed to kill the server \n",
|
"# use this helper if needed to kill the server \n",
|
||||||
"def kill_llama_stack_server():\n",
|
"def kill_llama_stack_server():\n",
|
||||||
" # Kill any existing llama stack server processes\n",
|
" # Kill any existing llama stack server processes\n",
|
||||||
" os.system(\"ps aux | grep -v grep | grep llama_stack.distribution.server.server | awk '{print $2}' | xargs kill -9\")\n"
|
" os.system(\"ps aux | grep -v grep | grep llama_stack.core.server.server | awk '{print $2}' | xargs kill -9\")\n"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
|
@ -275,7 +275,7 @@
|
||||||
"# use this helper if needed to kill the server \n",
|
"# use this helper if needed to kill the server \n",
|
||||||
"def kill_llama_stack_server():\n",
|
"def kill_llama_stack_server():\n",
|
||||||
" # Kill any existing llama stack server processes\n",
|
" # Kill any existing llama stack server processes\n",
|
||||||
" os.system(\"ps aux | grep -v grep | grep llama_stack.distribution.server.server | awk '{print $2}' | xargs kill -9\")\n"
|
" os.system(\"ps aux | grep -v grep | grep llama_stack.core.server.server | awk '{print $2}' | xargs kill -9\")\n"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
|
@ -265,7 +265,7 @@
|
||||||
"# use this helper if needed to kill the server \n",
|
"# use this helper if needed to kill the server \n",
|
||||||
"def kill_llama_stack_server():\n",
|
"def kill_llama_stack_server():\n",
|
||||||
" # Kill any existing llama stack server processes\n",
|
" # Kill any existing llama stack server processes\n",
|
||||||
" os.system(\"ps aux | grep -v grep | grep llama_stack.distribution.server.server | awk '{print $2}' | xargs kill -9\")\n"
|
" os.system(\"ps aux | grep -v grep | grep llama_stack.core.server.server | awk '{print $2}' | xargs kill -9\")\n"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
|
@ -3216,19 +3216,19 @@
|
||||||
"INFO:datasets:Duckdb version 1.1.3 available.\n",
|
"INFO:datasets:Duckdb version 1.1.3 available.\n",
|
||||||
"INFO:datasets:TensorFlow version 2.18.0 available.\n",
|
"INFO:datasets:TensorFlow version 2.18.0 available.\n",
|
||||||
"INFO:datasets:JAX version 0.4.33 available.\n",
|
"INFO:datasets:JAX version 0.4.33 available.\n",
|
||||||
"INFO:llama_stack.distribution.stack:Scoring_fns: basic::equality served by basic\n",
|
"INFO:llama_stack.core.stack:Scoring_fns: basic::equality served by basic\n",
|
||||||
"INFO:llama_stack.distribution.stack:Scoring_fns: basic::subset_of served by basic\n",
|
"INFO:llama_stack.core.stack:Scoring_fns: basic::subset_of served by basic\n",
|
||||||
"INFO:llama_stack.distribution.stack:Scoring_fns: basic::regex_parser_multiple_choice_answer served by basic\n",
|
"INFO:llama_stack.core.stack:Scoring_fns: basic::regex_parser_multiple_choice_answer served by basic\n",
|
||||||
"INFO:llama_stack.distribution.stack:Scoring_fns: braintrust::factuality served by braintrust\n",
|
"INFO:llama_stack.core.stack:Scoring_fns: braintrust::factuality served by braintrust\n",
|
||||||
"INFO:llama_stack.distribution.stack:Scoring_fns: braintrust::answer-correctness served by braintrust\n",
|
"INFO:llama_stack.core.stack:Scoring_fns: braintrust::answer-correctness served by braintrust\n",
|
||||||
"INFO:llama_stack.distribution.stack:Scoring_fns: braintrust::answer-relevancy served by braintrust\n",
|
"INFO:llama_stack.core.stack:Scoring_fns: braintrust::answer-relevancy served by braintrust\n",
|
||||||
"INFO:llama_stack.distribution.stack:Scoring_fns: braintrust::answer-similarity served by braintrust\n",
|
"INFO:llama_stack.core.stack:Scoring_fns: braintrust::answer-similarity served by braintrust\n",
|
||||||
"INFO:llama_stack.distribution.stack:Scoring_fns: braintrust::faithfulness served by braintrust\n",
|
"INFO:llama_stack.core.stack:Scoring_fns: braintrust::faithfulness served by braintrust\n",
|
||||||
"INFO:llama_stack.distribution.stack:Scoring_fns: braintrust::context-entity-recall served by braintrust\n",
|
"INFO:llama_stack.core.stack:Scoring_fns: braintrust::context-entity-recall served by braintrust\n",
|
||||||
"INFO:llama_stack.distribution.stack:Scoring_fns: braintrust::context-precision served by braintrust\n",
|
"INFO:llama_stack.core.stack:Scoring_fns: braintrust::context-precision served by braintrust\n",
|
||||||
"INFO:llama_stack.distribution.stack:Scoring_fns: braintrust::context-recall served by braintrust\n",
|
"INFO:llama_stack.core.stack:Scoring_fns: braintrust::context-recall served by braintrust\n",
|
||||||
"INFO:llama_stack.distribution.stack:Scoring_fns: braintrust::context-relevancy served by braintrust\n",
|
"INFO:llama_stack.core.stack:Scoring_fns: braintrust::context-relevancy served by braintrust\n",
|
||||||
"INFO:llama_stack.distribution.stack:\n"
|
"INFO:llama_stack.core.stack:\n"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -3448,7 +3448,7 @@
|
||||||
"\n",
|
"\n",
|
||||||
"os.environ['OPENAI_API_KEY'] = userdata.get('OPENAI_API_KEY')\n",
|
"os.environ['OPENAI_API_KEY'] = userdata.get('OPENAI_API_KEY')\n",
|
||||||
"\n",
|
"\n",
|
||||||
"from llama_stack.distribution.library_client import LlamaStackAsLibraryClient\n",
|
"from llama_stack.core.library_client import LlamaStackAsLibraryClient\n",
|
||||||
"client = LlamaStackAsLibraryClient(\"experimental-post-training\")\n",
|
"client = LlamaStackAsLibraryClient(\"experimental-post-training\")\n",
|
||||||
"_ = client.initialize()"
|
"_ = client.initialize()"
|
||||||
]
|
]
|
||||||
|
|
|
@ -48,7 +48,7 @@
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from llama_stack_client import LlamaStackClient, Agent\n",
|
"from llama_stack_client import LlamaStackClient, Agent\n",
|
||||||
"from llama_stack.distribution.library_client import LlamaStackAsLibraryClient\n",
|
"from llama_stack.core.library_client import LlamaStackAsLibraryClient\n",
|
||||||
"from rich.pretty import pprint\n",
|
"from rich.pretty import pprint\n",
|
||||||
"import json\n",
|
"import json\n",
|
||||||
"import uuid\n",
|
"import uuid\n",
|
||||||
|
|
|
@ -661,7 +661,7 @@
|
||||||
"except ImportError:\n",
|
"except ImportError:\n",
|
||||||
" print(\"Not in Google Colab environment\")\n",
|
" print(\"Not in Google Colab environment\")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"from llama_stack.distribution.library_client import LlamaStackAsLibraryClient\n",
|
"from llama_stack.core.library_client import LlamaStackAsLibraryClient\n",
|
||||||
"\n",
|
"\n",
|
||||||
"client = LlamaStackAsLibraryClient(\"together\")\n",
|
"client = LlamaStackAsLibraryClient(\"together\")\n",
|
||||||
"_ = client.initialize()"
|
"_ = client.initialize()"
|
||||||
|
|
|
@ -35,7 +35,7 @@
|
||||||
],
|
],
|
||||||
"source": [
|
"source": [
|
||||||
"from llama_stack_client import LlamaStackClient, Agent\n",
|
"from llama_stack_client import LlamaStackClient, Agent\n",
|
||||||
"from llama_stack.distribution.library_client import LlamaStackAsLibraryClient\n",
|
"from llama_stack.core.library_client import LlamaStackAsLibraryClient\n",
|
||||||
"from rich.pretty import pprint\n",
|
"from rich.pretty import pprint\n",
|
||||||
"import json\n",
|
"import json\n",
|
||||||
"import uuid\n",
|
"import uuid\n",
|
||||||
|
|
|
@ -194,7 +194,7 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from llama_stack.distribution.library_client import LlamaStackAsLibraryClient\n",
|
"from llama_stack.core.library_client import LlamaStackAsLibraryClient\n",
|
||||||
"\n",
|
"\n",
|
||||||
"client = LlamaStackAsLibraryClient(\"nvidia\")\n",
|
"client = LlamaStackAsLibraryClient(\"nvidia\")\n",
|
||||||
"client.initialize()"
|
"client.initialize()"
|
||||||
|
|
|
@ -56,7 +56,7 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from llama_stack.distribution.library_client import LlamaStackAsLibraryClient\n",
|
"from llama_stack.core.library_client import LlamaStackAsLibraryClient\n",
|
||||||
"\n",
|
"\n",
|
||||||
"client = LlamaStackAsLibraryClient(\"nvidia\")\n",
|
"client = LlamaStackAsLibraryClient(\"nvidia\")\n",
|
||||||
"client.initialize()"
|
"client.initialize()"
|
||||||
|
|
|
@ -56,7 +56,7 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from llama_stack.distribution.library_client import LlamaStackAsLibraryClient\n",
|
"from llama_stack.core.library_client import LlamaStackAsLibraryClient\n",
|
||||||
"\n",
|
"\n",
|
||||||
"client = LlamaStackAsLibraryClient(\"nvidia\")\n",
|
"client = LlamaStackAsLibraryClient(\"nvidia\")\n",
|
||||||
"client.initialize()"
|
"client.initialize()"
|
||||||
|
|
|
@ -56,7 +56,7 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from llama_stack.distribution.library_client import LlamaStackAsLibraryClient\n",
|
"from llama_stack.core.library_client import LlamaStackAsLibraryClient\n",
|
||||||
"\n",
|
"\n",
|
||||||
"client = LlamaStackAsLibraryClient(\"nvidia\")\n",
|
"client = LlamaStackAsLibraryClient(\"nvidia\")\n",
|
||||||
"client.initialize()"
|
"client.initialize()"
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
The RFC Specification (OpenAPI format) is generated from the set of API endpoints located in `llama_stack/distribution/server/endpoints.py` using the `generate.py` utility.
|
The RFC Specification (OpenAPI format) is generated from the set of API endpoints located in `llama_stack.core/server/endpoints.py` using the `generate.py` utility.
|
||||||
|
|
|
@ -17,7 +17,7 @@ import fire
|
||||||
import ruamel.yaml as yaml
|
import ruamel.yaml as yaml
|
||||||
|
|
||||||
from llama_stack.apis.version import LLAMA_STACK_API_VERSION # noqa: E402
|
from llama_stack.apis.version import LLAMA_STACK_API_VERSION # noqa: E402
|
||||||
from llama_stack.distribution.stack import LlamaStack # noqa: E402
|
from llama_stack.core.stack import LlamaStack # noqa: E402
|
||||||
|
|
||||||
from .pyopenapi.options import Options # noqa: E402
|
from .pyopenapi.options import Options # noqa: E402
|
||||||
from .pyopenapi.specification import Info, Server # noqa: E402
|
from .pyopenapi.specification import Info, Server # noqa: E402
|
||||||
|
|
|
@ -12,7 +12,7 @@ from typing import TextIO
|
||||||
from typing import Any, List, Optional, Union, get_type_hints, get_origin, get_args
|
from typing import Any, List, Optional, Union, get_type_hints, get_origin, get_args
|
||||||
|
|
||||||
from llama_stack.strong_typing.schema import object_to_json, StrictJsonType
|
from llama_stack.strong_typing.schema import object_to_json, StrictJsonType
|
||||||
from llama_stack.distribution.resolver import api_protocol_map
|
from llama_stack.core.resolver import api_protocol_map
|
||||||
|
|
||||||
from .generator import Generator
|
from .generator import Generator
|
||||||
from .options import Options
|
from .options import Options
|
||||||
|
|
|
@ -73,7 +73,7 @@ The API is defined in the [YAML](_static/llama-stack-spec.yaml) and [HTML](_stat
|
||||||
|
|
||||||
To prove out the API, we implemented a handful of use cases to make things more concrete. The [llama-stack-apps](https://github.com/meta-llama/llama-stack-apps) repository contains [6 different examples](https://github.com/meta-llama/llama-stack-apps/tree/main/examples) ranging from very basic to a multi turn agent.
|
To prove out the API, we implemented a handful of use cases to make things more concrete. The [llama-stack-apps](https://github.com/meta-llama/llama-stack-apps) repository contains [6 different examples](https://github.com/meta-llama/llama-stack-apps/tree/main/examples) ranging from very basic to a multi turn agent.
|
||||||
|
|
||||||
There is also a sample inference endpoint implementation in the [llama-stack](https://github.com/meta-llama/llama-stack/blob/main/llama_stack/distribution/server/server.py) repository.
|
There is also a sample inference endpoint implementation in the [llama-stack](https://github.com/meta-llama/llama-stack/blob/main/llama_stack.core/server/server.py) repository.
|
||||||
|
|
||||||
## Limitations
|
## Limitations
|
||||||
|
|
||||||
|
|
|
@ -187,7 +187,7 @@
|
||||||
"# use this helper if needed to kill the server \n",
|
"# use this helper if needed to kill the server \n",
|
||||||
"def kill_llama_stack_server():\n",
|
"def kill_llama_stack_server():\n",
|
||||||
" # Kill any existing llama stack server processes\n",
|
" # Kill any existing llama stack server processes\n",
|
||||||
" os.system(\"ps aux | grep -v grep | grep llama_stack.distribution.server.server | awk '{print $2}' | xargs kill -9\")\n"
|
" os.system(\"ps aux | grep -v grep | grep llama_stack.core.server.server | awk '{print $2}' | xargs kill -9\")\n"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
|
@ -355,7 +355,7 @@ server:
|
||||||
8. Run the server:
|
8. Run the server:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
python -m llama_stack.distribution.server.server --yaml-config ~/.llama/run-byoa.yaml
|
python -m llama_stack.core.server.server --yaml-config ~/.llama/run-byoa.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
9. Test the API:
|
9. Test the API:
|
||||||
|
|
|
@ -103,5 +103,5 @@ llama stack run together
|
||||||
|
|
||||||
2. Start Streamlit UI
|
2. Start Streamlit UI
|
||||||
```bash
|
```bash
|
||||||
uv run --with ".[ui]" streamlit run llama_stack/distribution/ui/app.py
|
uv run --with ".[ui]" streamlit run llama_stack.core/ui/app.py
|
||||||
```
|
```
|
||||||
|
|
|
@ -174,7 +174,7 @@ spec:
|
||||||
- name: llama-stack
|
- name: llama-stack
|
||||||
image: localhost/llama-stack-run-k8s:latest
|
image: localhost/llama-stack-run-k8s:latest
|
||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
command: ["python", "-m", "llama_stack.distribution.server.server", "--config", "/app/config.yaml"]
|
command: ["python", "-m", "llama_stack.core.server.server", "--config", "/app/config.yaml"]
|
||||||
ports:
|
ports:
|
||||||
- containerPort: 5000
|
- containerPort: 5000
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
|
|
|
@ -59,7 +59,7 @@ Build a Llama stack container
|
||||||
|
|
||||||
options:
|
options:
|
||||||
-h, --help show this help message and exit
|
-h, --help show this help message and exit
|
||||||
--config CONFIG Path to a config file to use for the build. You can find example configs in llama_stack/distributions/**/build.yaml. If this argument is not provided, you will
|
--config CONFIG Path to a config file to use for the build. You can find example configs in llama_stack.cores/**/build.yaml. If this argument is not provided, you will
|
||||||
be prompted to enter information interactively (default: None)
|
be prompted to enter information interactively (default: None)
|
||||||
--template TEMPLATE Name of the example template config to use for build. You may use `llama stack build --list-templates` to check out the available templates (default: None)
|
--template TEMPLATE Name of the example template config to use for build. You may use `llama stack build --list-templates` to check out the available templates (default: None)
|
||||||
--list-templates Show the available templates for building a Llama Stack distribution (default: False)
|
--list-templates Show the available templates for building a Llama Stack distribution (default: False)
|
||||||
|
|
|
@ -10,7 +10,7 @@ llama stack build --template starter --image-type venv
|
||||||
```
|
```
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from llama_stack.distribution.library_client import LlamaStackAsLibraryClient
|
from llama_stack.core.library_client import LlamaStackAsLibraryClient
|
||||||
|
|
||||||
client = LlamaStackAsLibraryClient(
|
client = LlamaStackAsLibraryClient(
|
||||||
"starter",
|
"starter",
|
||||||
|
|
|
@ -52,7 +52,7 @@ spec:
|
||||||
value: "${SAFETY_MODEL}"
|
value: "${SAFETY_MODEL}"
|
||||||
- name: TAVILY_SEARCH_API_KEY
|
- name: TAVILY_SEARCH_API_KEY
|
||||||
value: "${TAVILY_SEARCH_API_KEY}"
|
value: "${TAVILY_SEARCH_API_KEY}"
|
||||||
command: ["python", "-m", "llama_stack.distribution.server.server", "--config", "/etc/config/stack_run_config.yaml", "--port", "8321"]
|
command: ["python", "-m", "llama_stack.core.server.server", "--config", "/etc/config/stack_run_config.yaml", "--port", "8321"]
|
||||||
ports:
|
ports:
|
||||||
- containerPort: 8321
|
- containerPort: 8321
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
|
|
|
@ -66,7 +66,7 @@
|
||||||
"from pydantic import BaseModel\n",
|
"from pydantic import BaseModel\n",
|
||||||
"from termcolor import cprint\n",
|
"from termcolor import cprint\n",
|
||||||
"\n",
|
"\n",
|
||||||
"from llama_stack.distribution.datatypes import RemoteProviderConfig\n",
|
"from llama_stack.core.datatypes import RemoteProviderConfig\n",
|
||||||
"from llama_stack.apis.safety import Safety\n",
|
"from llama_stack.apis.safety import Safety\n",
|
||||||
"from llama_stack_client import LlamaStackClient\n",
|
"from llama_stack_client import LlamaStackClient\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
# This source code is licensed under the terms described in the LICENSE file in
|
# This source code is licensed under the terms described in the LICENSE file in
|
||||||
# the root directory of this source tree.
|
# the root directory of this source tree.
|
||||||
|
|
||||||
from llama_stack.distribution.library_client import ( # noqa: F401
|
from llama_stack.core.library_client import ( # noqa: F401
|
||||||
AsyncLlamaStackAsLibraryClient,
|
AsyncLlamaStackAsLibraryClient,
|
||||||
LlamaStackAsLibraryClient,
|
LlamaStackAsLibraryClient,
|
||||||
)
|
)
|
||||||
|
|
|
@ -323,7 +323,7 @@ def _hf_download(
|
||||||
from huggingface_hub import snapshot_download
|
from huggingface_hub import snapshot_download
|
||||||
from huggingface_hub.utils import GatedRepoError, RepositoryNotFoundError
|
from huggingface_hub.utils import GatedRepoError, RepositoryNotFoundError
|
||||||
|
|
||||||
from llama_stack.distribution.utils.model_utils import model_local_dir
|
from llama_stack.core.utils.model_utils import model_local_dir
|
||||||
|
|
||||||
repo_id = model.huggingface_repo
|
repo_id = model.huggingface_repo
|
||||||
if repo_id is None:
|
if repo_id is None:
|
||||||
|
@ -361,7 +361,7 @@ def _meta_download(
|
||||||
info: "LlamaDownloadInfo",
|
info: "LlamaDownloadInfo",
|
||||||
max_concurrent_downloads: int,
|
max_concurrent_downloads: int,
|
||||||
):
|
):
|
||||||
from llama_stack.distribution.utils.model_utils import model_local_dir
|
from llama_stack.core.utils.model_utils import model_local_dir
|
||||||
|
|
||||||
output_dir = Path(model_local_dir(model.descriptor()))
|
output_dir = Path(model_local_dir(model.descriptor()))
|
||||||
os.makedirs(output_dir, exist_ok=True)
|
os.makedirs(output_dir, exist_ok=True)
|
||||||
|
@ -403,7 +403,7 @@ class Manifest(BaseModel):
|
||||||
|
|
||||||
|
|
||||||
def _download_from_manifest(manifest_file: str, max_concurrent_downloads: int):
|
def _download_from_manifest(manifest_file: str, max_concurrent_downloads: int):
|
||||||
from llama_stack.distribution.utils.model_utils import model_local_dir
|
from llama_stack.core.utils.model_utils import model_local_dir
|
||||||
|
|
||||||
with open(manifest_file) as f:
|
with open(manifest_file) as f:
|
||||||
d = json.load(f)
|
d = json.load(f)
|
||||||
|
|
|
@ -11,7 +11,7 @@ from pathlib import Path
|
||||||
|
|
||||||
from llama_stack.cli.subcommand import Subcommand
|
from llama_stack.cli.subcommand import Subcommand
|
||||||
from llama_stack.cli.table import print_table
|
from llama_stack.cli.table import print_table
|
||||||
from llama_stack.distribution.utils.config_dirs import DEFAULT_CHECKPOINT_DIR
|
from llama_stack.core.utils.config_dirs import DEFAULT_CHECKPOINT_DIR
|
||||||
from llama_stack.models.llama.sku_list import all_registered_models
|
from llama_stack.models.llama.sku_list import all_registered_models
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -9,7 +9,7 @@ import os
|
||||||
import shutil
|
import shutil
|
||||||
|
|
||||||
from llama_stack.cli.subcommand import Subcommand
|
from llama_stack.cli.subcommand import Subcommand
|
||||||
from llama_stack.distribution.utils.config_dirs import DEFAULT_CHECKPOINT_DIR
|
from llama_stack.core.utils.config_dirs import DEFAULT_CHECKPOINT_DIR
|
||||||
from llama_stack.models.llama.sku_list import resolve_model
|
from llama_stack.models.llama.sku_list import resolve_model
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -23,27 +23,27 @@ from termcolor import colored, cprint
|
||||||
|
|
||||||
from llama_stack.cli.stack.utils import ImageType
|
from llama_stack.cli.stack.utils import ImageType
|
||||||
from llama_stack.cli.table import print_table
|
from llama_stack.cli.table import print_table
|
||||||
from llama_stack.distribution.build import (
|
from llama_stack.core.build import (
|
||||||
SERVER_DEPENDENCIES,
|
SERVER_DEPENDENCIES,
|
||||||
build_image,
|
build_image,
|
||||||
get_provider_dependencies,
|
get_provider_dependencies,
|
||||||
)
|
)
|
||||||
from llama_stack.distribution.configure import parse_and_maybe_upgrade_config
|
from llama_stack.core.configure import parse_and_maybe_upgrade_config
|
||||||
from llama_stack.distribution.datatypes import (
|
from llama_stack.core.datatypes import (
|
||||||
BuildConfig,
|
BuildConfig,
|
||||||
BuildProvider,
|
BuildProvider,
|
||||||
DistributionSpec,
|
DistributionSpec,
|
||||||
Provider,
|
Provider,
|
||||||
StackRunConfig,
|
StackRunConfig,
|
||||||
)
|
)
|
||||||
from llama_stack.distribution.distribution import get_provider_registry
|
from llama_stack.core.distribution import get_provider_registry
|
||||||
from llama_stack.distribution.external import load_external_apis
|
from llama_stack.core.external import load_external_apis
|
||||||
from llama_stack.distribution.resolver import InvalidProviderError
|
from llama_stack.core.resolver import InvalidProviderError
|
||||||
from llama_stack.distribution.stack import replace_env_vars
|
from llama_stack.core.stack import replace_env_vars
|
||||||
from llama_stack.distribution.utils.config_dirs import DISTRIBS_BASE_DIR, EXTERNAL_PROVIDERS_DIR
|
from llama_stack.core.utils.config_dirs import DISTRIBS_BASE_DIR, EXTERNAL_PROVIDERS_DIR
|
||||||
from llama_stack.distribution.utils.dynamic import instantiate_class_type
|
from llama_stack.core.utils.dynamic import instantiate_class_type
|
||||||
from llama_stack.distribution.utils.exec import formulate_run_args, run_command
|
from llama_stack.core.utils.exec import formulate_run_args, run_command
|
||||||
from llama_stack.distribution.utils.image_types import LlamaStackImageType
|
from llama_stack.core.utils.image_types import LlamaStackImageType
|
||||||
from llama_stack.providers.datatypes import Api
|
from llama_stack.providers.datatypes import Api
|
||||||
|
|
||||||
TEMPLATES_PATH = Path(__file__).parent.parent.parent / "templates"
|
TEMPLATES_PATH = Path(__file__).parent.parent.parent / "templates"
|
||||||
|
|
|
@ -27,7 +27,7 @@ class StackBuild(Subcommand):
|
||||||
"--config",
|
"--config",
|
||||||
type=str,
|
type=str,
|
||||||
default=None,
|
default=None,
|
||||||
help="Path to a config file to use for the build. You can find example configs in llama_stack/distributions/**/build.yaml. If this argument is not provided, you will be prompted to enter information interactively",
|
help="Path to a config file to use for the build. You can find example configs in llama_stack.cores/**/build.yaml. If this argument is not provided, you will be prompted to enter information interactively",
|
||||||
)
|
)
|
||||||
|
|
||||||
self.parser.add_argument(
|
self.parser.add_argument(
|
||||||
|
|
|
@ -26,7 +26,7 @@ class StackListApis(Subcommand):
|
||||||
|
|
||||||
def _run_apis_list_cmd(self, args: argparse.Namespace) -> None:
|
def _run_apis_list_cmd(self, args: argparse.Namespace) -> None:
|
||||||
from llama_stack.cli.table import print_table
|
from llama_stack.cli.table import print_table
|
||||||
from llama_stack.distribution.distribution import stack_apis
|
from llama_stack.core.distribution import stack_apis
|
||||||
|
|
||||||
# eventually, this should query a registry at llama.meta.com/llamastack/distributions
|
# eventually, this should query a registry at llama.meta.com/llamastack/distributions
|
||||||
headers = [
|
headers = [
|
||||||
|
|
|
@ -23,7 +23,7 @@ class StackListProviders(Subcommand):
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def providable_apis(self):
|
def providable_apis(self):
|
||||||
from llama_stack.distribution.distribution import providable_apis
|
from llama_stack.core.distribution import providable_apis
|
||||||
|
|
||||||
return [api.value for api in providable_apis()]
|
return [api.value for api in providable_apis()]
|
||||||
|
|
||||||
|
@ -38,7 +38,7 @@ class StackListProviders(Subcommand):
|
||||||
|
|
||||||
def _run_providers_list_cmd(self, args: argparse.Namespace) -> None:
|
def _run_providers_list_cmd(self, args: argparse.Namespace) -> None:
|
||||||
from llama_stack.cli.table import print_table
|
from llama_stack.cli.table import print_table
|
||||||
from llama_stack.distribution.distribution import Api, get_provider_registry
|
from llama_stack.core.distribution import Api, get_provider_registry
|
||||||
|
|
||||||
all_providers = get_provider_registry()
|
all_providers = get_provider_registry()
|
||||||
if args.api:
|
if args.api:
|
||||||
|
|
|
@ -85,8 +85,8 @@ class StackRun(Subcommand):
|
||||||
def _run_stack_run_cmd(self, args: argparse.Namespace) -> None:
|
def _run_stack_run_cmd(self, args: argparse.Namespace) -> None:
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
from llama_stack.distribution.configure import parse_and_maybe_upgrade_config
|
from llama_stack.core.configure import parse_and_maybe_upgrade_config
|
||||||
from llama_stack.distribution.utils.exec import formulate_run_args, run_command
|
from llama_stack.core.utils.exec import formulate_run_args, run_command
|
||||||
|
|
||||||
if args.enable_ui:
|
if args.enable_ui:
|
||||||
self._start_ui_development_server(args.port)
|
self._start_ui_development_server(args.port)
|
||||||
|
@ -94,7 +94,7 @@ class StackRun(Subcommand):
|
||||||
|
|
||||||
if args.config:
|
if args.config:
|
||||||
try:
|
try:
|
||||||
from llama_stack.distribution.utils.config_resolution import Mode, resolve_config_or_template
|
from llama_stack.core.utils.config_resolution import Mode, resolve_config_or_template
|
||||||
|
|
||||||
config_file = resolve_config_or_template(args.config, Mode.RUN)
|
config_file = resolve_config_or_template(args.config, Mode.RUN)
|
||||||
except ValueError as e:
|
except ValueError as e:
|
||||||
|
@ -127,7 +127,7 @@ class StackRun(Subcommand):
|
||||||
# using the current environment packages.
|
# using the current environment packages.
|
||||||
if not image_type and not image_name:
|
if not image_type and not image_name:
|
||||||
logger.info("No image type or image name provided. Assuming environment packages.")
|
logger.info("No image type or image name provided. Assuming environment packages.")
|
||||||
from llama_stack.distribution.server.server import main as server_main
|
from llama_stack.core.server.server import main as server_main
|
||||||
|
|
||||||
# Build the server args from the current args passed to the CLI
|
# Build the server args from the current args passed to the CLI
|
||||||
server_args = argparse.Namespace()
|
server_args = argparse.Namespace()
|
||||||
|
|
|
@ -107,7 +107,7 @@ def verify_files(model_dir: Path, checksums: dict[str, str], console: Console) -
|
||||||
|
|
||||||
|
|
||||||
def run_verify_cmd(args: argparse.Namespace, parser: argparse.ArgumentParser):
|
def run_verify_cmd(args: argparse.Namespace, parser: argparse.ArgumentParser):
|
||||||
from llama_stack.distribution.utils.model_utils import model_local_dir
|
from llama_stack.core.utils.model_utils import model_local_dir
|
||||||
|
|
||||||
console = Console()
|
console = Console()
|
||||||
model_dir = Path(model_local_dir(args.model_id))
|
model_dir = Path(model_local_dir(args.model_id))
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
|
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from llama_stack.distribution.datatypes import User
|
from llama_stack.core.datatypes import User
|
||||||
|
|
||||||
from .conditions import (
|
from .conditions import (
|
||||||
Condition,
|
Condition,
|
|
@ -12,11 +12,11 @@ from pathlib import Path
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
from termcolor import cprint
|
from termcolor import cprint
|
||||||
|
|
||||||
from llama_stack.distribution.datatypes import BuildConfig
|
from llama_stack.core.datatypes import BuildConfig
|
||||||
from llama_stack.distribution.distribution import get_provider_registry
|
from llama_stack.core.distribution import get_provider_registry
|
||||||
from llama_stack.distribution.external import load_external_apis
|
from llama_stack.core.external import load_external_apis
|
||||||
from llama_stack.distribution.utils.exec import run_command
|
from llama_stack.core.utils.exec import run_command
|
||||||
from llama_stack.distribution.utils.image_types import LlamaStackImageType
|
from llama_stack.core.utils.image_types import LlamaStackImageType
|
||||||
from llama_stack.providers.datatypes import Api
|
from llama_stack.providers.datatypes import Api
|
||||||
from llama_stack.templates.template import DistributionTemplate
|
from llama_stack.templates.template import DistributionTemplate
|
||||||
|
|
||||||
|
@ -122,7 +122,7 @@ def build_image(
|
||||||
normal_deps.extend(api_spec.pip_packages)
|
normal_deps.extend(api_spec.pip_packages)
|
||||||
|
|
||||||
if build_config.image_type == LlamaStackImageType.CONTAINER.value:
|
if build_config.image_type == LlamaStackImageType.CONTAINER.value:
|
||||||
script = str(importlib.resources.files("llama_stack") / "distribution/build_container.sh")
|
script = str(importlib.resources.files("llama_stack") / "core/build_container.sh")
|
||||||
args = [
|
args = [
|
||||||
script,
|
script,
|
||||||
"--template-or-config",
|
"--template-or-config",
|
||||||
|
@ -139,7 +139,7 @@ def build_image(
|
||||||
if run_config is not None:
|
if run_config is not None:
|
||||||
args.extend(["--run-config", run_config])
|
args.extend(["--run-config", run_config])
|
||||||
elif build_config.image_type == LlamaStackImageType.CONDA.value:
|
elif build_config.image_type == LlamaStackImageType.CONDA.value:
|
||||||
script = str(importlib.resources.files("llama_stack") / "distribution/build_conda_env.sh")
|
script = str(importlib.resources.files("llama_stack") / "core/build_conda_env.sh")
|
||||||
args = [
|
args = [
|
||||||
script,
|
script,
|
||||||
"--env-name",
|
"--env-name",
|
||||||
|
@ -150,7 +150,7 @@ def build_image(
|
||||||
" ".join(normal_deps),
|
" ".join(normal_deps),
|
||||||
]
|
]
|
||||||
elif build_config.image_type == LlamaStackImageType.VENV.value:
|
elif build_config.image_type == LlamaStackImageType.VENV.value:
|
||||||
script = str(importlib.resources.files("llama_stack") / "distribution/build_venv.sh")
|
script = str(importlib.resources.files("llama_stack") / "core/build_venv.sh")
|
||||||
args = [
|
args = [
|
||||||
script,
|
script,
|
||||||
"--env-name",
|
"--env-name",
|
|
@ -327,12 +327,12 @@ EOF
|
||||||
# If a run config is provided, we use the --config flag
|
# If a run config is provided, we use the --config flag
|
||||||
if [[ -n "$run_config" ]]; then
|
if [[ -n "$run_config" ]]; then
|
||||||
add_to_container << EOF
|
add_to_container << EOF
|
||||||
ENTRYPOINT ["python", "-m", "llama_stack.distribution.server.server", "--config", "$RUN_CONFIG_PATH"]
|
ENTRYPOINT ["python", "-m", "llama_stack.core.server.server", "--config", "$RUN_CONFIG_PATH"]
|
||||||
EOF
|
EOF
|
||||||
# If a template is provided (not a yaml file), we use the --template flag
|
# If a template is provided (not a yaml file), we use the --template flag
|
||||||
elif [[ "$template_or_config" != *.yaml ]]; then
|
elif [[ "$template_or_config" != *.yaml ]]; then
|
||||||
add_to_container << EOF
|
add_to_container << EOF
|
||||||
ENTRYPOINT ["python", "-m", "llama_stack.distribution.server.server", "--template", "$template_or_config"]
|
ENTRYPOINT ["python", "-m", "llama_stack.core.server.server", "--template", "$template_or_config"]
|
||||||
EOF
|
EOF
|
||||||
fi
|
fi
|
||||||
|
|
|
@ -7,20 +7,20 @@ import logging
|
||||||
import textwrap
|
import textwrap
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from llama_stack.distribution.datatypes import (
|
from llama_stack.core.datatypes import (
|
||||||
LLAMA_STACK_RUN_CONFIG_VERSION,
|
LLAMA_STACK_RUN_CONFIG_VERSION,
|
||||||
DistributionSpec,
|
DistributionSpec,
|
||||||
Provider,
|
Provider,
|
||||||
StackRunConfig,
|
StackRunConfig,
|
||||||
)
|
)
|
||||||
from llama_stack.distribution.distribution import (
|
from llama_stack.core.distribution import (
|
||||||
builtin_automatically_routed_apis,
|
builtin_automatically_routed_apis,
|
||||||
get_provider_registry,
|
get_provider_registry,
|
||||||
)
|
)
|
||||||
from llama_stack.distribution.stack import cast_image_name_to_string, replace_env_vars
|
from llama_stack.core.stack import cast_image_name_to_string, replace_env_vars
|
||||||
from llama_stack.distribution.utils.config_dirs import EXTERNAL_PROVIDERS_DIR
|
from llama_stack.core.utils.config_dirs import EXTERNAL_PROVIDERS_DIR
|
||||||
from llama_stack.distribution.utils.dynamic import instantiate_class_type
|
from llama_stack.core.utils.dynamic import instantiate_class_type
|
||||||
from llama_stack.distribution.utils.prompt_for_config import prompt_for_config
|
from llama_stack.core.utils.prompt_for_config import prompt_for_config
|
||||||
from llama_stack.providers.datatypes import Api, ProviderSpec
|
from llama_stack.providers.datatypes import Api, ProviderSpec
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
|
@ -24,7 +24,7 @@ from llama_stack.apis.shields import Shield, ShieldInput
|
||||||
from llama_stack.apis.tools import Tool, ToolGroup, ToolGroupInput, ToolRuntime
|
from llama_stack.apis.tools import Tool, ToolGroup, ToolGroupInput, ToolRuntime
|
||||||
from llama_stack.apis.vector_dbs import VectorDB, VectorDBInput
|
from llama_stack.apis.vector_dbs import VectorDB, VectorDBInput
|
||||||
from llama_stack.apis.vector_io import VectorIO
|
from llama_stack.apis.vector_io import VectorIO
|
||||||
from llama_stack.distribution.access_control.datatypes import AccessRule
|
from llama_stack.core.access_control.datatypes import AccessRule
|
||||||
from llama_stack.providers.datatypes import Api, ProviderSpec
|
from llama_stack.providers.datatypes import Api, ProviderSpec
|
||||||
from llama_stack.providers.utils.kvstore.config import KVStoreConfig, SqliteKVStoreConfig
|
from llama_stack.providers.utils.kvstore.config import KVStoreConfig, SqliteKVStoreConfig
|
||||||
from llama_stack.providers.utils.sqlstore.sqlstore import SqlStoreConfig
|
from llama_stack.providers.utils.sqlstore.sqlstore import SqlStoreConfig
|
|
@ -12,8 +12,8 @@ from typing import Any
|
||||||
import yaml
|
import yaml
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
from llama_stack.distribution.datatypes import BuildConfig, DistributionSpec
|
from llama_stack.core.datatypes import BuildConfig, DistributionSpec
|
||||||
from llama_stack.distribution.external import load_external_apis
|
from llama_stack.core.external import load_external_apis
|
||||||
from llama_stack.log import get_logger
|
from llama_stack.log import get_logger
|
||||||
from llama_stack.providers.datatypes import (
|
from llama_stack.providers.datatypes import (
|
||||||
AdapterSpec,
|
AdapterSpec,
|
|
@ -8,7 +8,7 @@
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
from llama_stack.apis.datatypes import Api, ExternalApiSpec
|
from llama_stack.apis.datatypes import Api, ExternalApiSpec
|
||||||
from llama_stack.distribution.datatypes import BuildConfig, StackRunConfig
|
from llama_stack.core.datatypes import BuildConfig, StackRunConfig
|
||||||
from llama_stack.log import get_logger
|
from llama_stack.log import get_logger
|
||||||
|
|
||||||
logger = get_logger(name=__name__, category="core")
|
logger = get_logger(name=__name__, category="core")
|
|
@ -15,9 +15,9 @@ from llama_stack.apis.inspect import (
|
||||||
RouteInfo,
|
RouteInfo,
|
||||||
VersionInfo,
|
VersionInfo,
|
||||||
)
|
)
|
||||||
from llama_stack.distribution.datatypes import StackRunConfig
|
from llama_stack.core.datatypes import StackRunConfig
|
||||||
from llama_stack.distribution.external import load_external_apis
|
from llama_stack.core.external import load_external_apis
|
||||||
from llama_stack.distribution.server.routes import get_all_api_routes
|
from llama_stack.core.server.routes import get_all_api_routes
|
||||||
from llama_stack.providers.datatypes import HealthStatus
|
from llama_stack.providers.datatypes import HealthStatus
|
||||||
|
|
||||||
|
|
|
@ -31,23 +31,23 @@ from pydantic import BaseModel, TypeAdapter
|
||||||
from rich.console import Console
|
from rich.console import Console
|
||||||
from termcolor import cprint
|
from termcolor import cprint
|
||||||
|
|
||||||
from llama_stack.distribution.build import print_pip_install_help
|
from llama_stack.core.build import print_pip_install_help
|
||||||
from llama_stack.distribution.configure import parse_and_maybe_upgrade_config
|
from llama_stack.core.configure import parse_and_maybe_upgrade_config
|
||||||
from llama_stack.distribution.datatypes import Api, BuildConfig, BuildProvider, DistributionSpec
|
from llama_stack.core.datatypes import Api, BuildConfig, BuildProvider, DistributionSpec
|
||||||
from llama_stack.distribution.request_headers import (
|
from llama_stack.core.request_headers import (
|
||||||
PROVIDER_DATA_VAR,
|
PROVIDER_DATA_VAR,
|
||||||
request_provider_data_context,
|
request_provider_data_context,
|
||||||
)
|
)
|
||||||
from llama_stack.distribution.resolver import ProviderRegistry
|
from llama_stack.core.resolver import ProviderRegistry
|
||||||
from llama_stack.distribution.server.routes import RouteImpls, find_matching_route, initialize_route_impls
|
from llama_stack.core.server.routes import RouteImpls, find_matching_route, initialize_route_impls
|
||||||
from llama_stack.distribution.stack import (
|
from llama_stack.core.stack import (
|
||||||
construct_stack,
|
construct_stack,
|
||||||
get_stack_run_config_from_template,
|
get_stack_run_config_from_template,
|
||||||
replace_env_vars,
|
replace_env_vars,
|
||||||
)
|
)
|
||||||
from llama_stack.distribution.utils.config import redact_sensitive_fields
|
from llama_stack.core.utils.config import redact_sensitive_fields
|
||||||
from llama_stack.distribution.utils.context import preserve_contexts_async_generator
|
from llama_stack.core.utils.context import preserve_contexts_async_generator
|
||||||
from llama_stack.distribution.utils.exec import in_notebook
|
from llama_stack.core.utils.exec import in_notebook
|
||||||
from llama_stack.providers.utils.telemetry.tracing import (
|
from llama_stack.providers.utils.telemetry.tracing import (
|
||||||
CURRENT_TRACE_CONTEXT,
|
CURRENT_TRACE_CONTEXT,
|
||||||
end_trace,
|
end_trace,
|
|
@ -10,7 +10,7 @@ import logging
|
||||||
from contextlib import AbstractContextManager
|
from contextlib import AbstractContextManager
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from llama_stack.distribution.datatypes import User
|
from llama_stack.core.datatypes import User
|
||||||
|
|
||||||
from .utils.dynamic import instantiate_class_type
|
from .utils.dynamic import instantiate_class_type
|
||||||
|
|
|
@ -27,18 +27,18 @@ from llama_stack.apis.telemetry import Telemetry
|
||||||
from llama_stack.apis.tools import ToolGroups, ToolRuntime
|
from llama_stack.apis.tools import ToolGroups, ToolRuntime
|
||||||
from llama_stack.apis.vector_dbs import VectorDBs
|
from llama_stack.apis.vector_dbs import VectorDBs
|
||||||
from llama_stack.apis.vector_io import VectorIO
|
from llama_stack.apis.vector_io import VectorIO
|
||||||
from llama_stack.distribution.client import get_client_impl
|
from llama_stack.core.client import get_client_impl
|
||||||
from llama_stack.distribution.datatypes import (
|
from llama_stack.core.datatypes import (
|
||||||
AccessRule,
|
AccessRule,
|
||||||
AutoRoutedProviderSpec,
|
AutoRoutedProviderSpec,
|
||||||
Provider,
|
Provider,
|
||||||
RoutingTableProviderSpec,
|
RoutingTableProviderSpec,
|
||||||
StackRunConfig,
|
StackRunConfig,
|
||||||
)
|
)
|
||||||
from llama_stack.distribution.distribution import builtin_automatically_routed_apis
|
from llama_stack.core.distribution import builtin_automatically_routed_apis
|
||||||
from llama_stack.distribution.external import load_external_apis
|
from llama_stack.core.external import load_external_apis
|
||||||
from llama_stack.distribution.store import DistributionRegistry
|
from llama_stack.core.store import DistributionRegistry
|
||||||
from llama_stack.distribution.utils.dynamic import instantiate_class_type
|
from llama_stack.core.utils.dynamic import instantiate_class_type
|
||||||
from llama_stack.log import get_logger
|
from llama_stack.log import get_logger
|
||||||
from llama_stack.providers.datatypes import (
|
from llama_stack.providers.datatypes import (
|
||||||
Api,
|
Api,
|
||||||
|
@ -183,7 +183,7 @@ def specs_for_autorouted_apis(apis_to_serve: list[str] | set[str]) -> dict[str,
|
||||||
spec=RoutingTableProviderSpec(
|
spec=RoutingTableProviderSpec(
|
||||||
api=info.routing_table_api,
|
api=info.routing_table_api,
|
||||||
router_api=info.router_api,
|
router_api=info.router_api,
|
||||||
module="llama_stack.distribution.routers",
|
module="llama_stack.core.routers",
|
||||||
api_dependencies=[],
|
api_dependencies=[],
|
||||||
deps__=[f"inner-{info.router_api.value}"],
|
deps__=[f"inner-{info.router_api.value}"],
|
||||||
),
|
),
|
||||||
|
@ -197,7 +197,7 @@ def specs_for_autorouted_apis(apis_to_serve: list[str] | set[str]) -> dict[str,
|
||||||
config={},
|
config={},
|
||||||
spec=AutoRoutedProviderSpec(
|
spec=AutoRoutedProviderSpec(
|
||||||
api=info.router_api,
|
api=info.router_api,
|
||||||
module="llama_stack.distribution.routers",
|
module="llama_stack.core.routers",
|
||||||
routing_table_api=info.routing_table_api,
|
routing_table_api=info.routing_table_api,
|
||||||
api_dependencies=[info.routing_table_api],
|
api_dependencies=[info.routing_table_api],
|
||||||
# Add telemetry as an optional dependency to all auto-routed providers
|
# Add telemetry as an optional dependency to all auto-routed providers
|
|
@ -6,9 +6,9 @@
|
||||||
|
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from llama_stack.distribution.datatypes import AccessRule, RoutedProtocol
|
from llama_stack.core.datatypes import AccessRule, RoutedProtocol
|
||||||
from llama_stack.distribution.stack import StackRunConfig
|
from llama_stack.core.stack import StackRunConfig
|
||||||
from llama_stack.distribution.store import DistributionRegistry
|
from llama_stack.core.store import DistributionRegistry
|
||||||
from llama_stack.providers.datatypes import Api, RoutingTable
|
from llama_stack.providers.datatypes import Api, RoutingTable
|
||||||
from llama_stack.providers.utils.inference.inference_store import InferenceStore
|
from llama_stack.providers.utils.inference.inference_store import InferenceStore
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from llama_stack.apis.benchmarks import Benchmark, Benchmarks, ListBenchmarksResponse
|
from llama_stack.apis.benchmarks import Benchmark, Benchmarks, ListBenchmarksResponse
|
||||||
from llama_stack.distribution.datatypes import (
|
from llama_stack.core.datatypes import (
|
||||||
BenchmarkWithOwner,
|
BenchmarkWithOwner,
|
||||||
)
|
)
|
||||||
from llama_stack.log import get_logger
|
from llama_stack.log import get_logger
|
|
@ -10,16 +10,16 @@ from llama_stack.apis.common.errors import ModelNotFoundError
|
||||||
from llama_stack.apis.models import Model
|
from llama_stack.apis.models import Model
|
||||||
from llama_stack.apis.resource import ResourceType
|
from llama_stack.apis.resource import ResourceType
|
||||||
from llama_stack.apis.scoring_functions import ScoringFn
|
from llama_stack.apis.scoring_functions import ScoringFn
|
||||||
from llama_stack.distribution.access_control.access_control import AccessDeniedError, is_action_allowed
|
from llama_stack.core.access_control.access_control import AccessDeniedError, is_action_allowed
|
||||||
from llama_stack.distribution.access_control.datatypes import Action
|
from llama_stack.core.access_control.datatypes import Action
|
||||||
from llama_stack.distribution.datatypes import (
|
from llama_stack.core.datatypes import (
|
||||||
AccessRule,
|
AccessRule,
|
||||||
RoutableObject,
|
RoutableObject,
|
||||||
RoutableObjectWithProvider,
|
RoutableObjectWithProvider,
|
||||||
RoutedProtocol,
|
RoutedProtocol,
|
||||||
)
|
)
|
||||||
from llama_stack.distribution.request_headers import get_authenticated_user
|
from llama_stack.core.request_headers import get_authenticated_user
|
||||||
from llama_stack.distribution.store import DistributionRegistry
|
from llama_stack.core.store import DistributionRegistry
|
||||||
from llama_stack.log import get_logger
|
from llama_stack.log import get_logger
|
||||||
from llama_stack.providers.datatypes import Api, RoutingTable
|
from llama_stack.providers.datatypes import Api, RoutingTable
|
||||||
|
|
|
@ -19,7 +19,7 @@ from llama_stack.apis.datasets import (
|
||||||
URIDataSource,
|
URIDataSource,
|
||||||
)
|
)
|
||||||
from llama_stack.apis.resource import ResourceType
|
from llama_stack.apis.resource import ResourceType
|
||||||
from llama_stack.distribution.datatypes import (
|
from llama_stack.core.datatypes import (
|
||||||
DatasetWithOwner,
|
DatasetWithOwner,
|
||||||
)
|
)
|
||||||
from llama_stack.log import get_logger
|
from llama_stack.log import get_logger
|
|
@ -9,7 +9,7 @@ from typing import Any
|
||||||
|
|
||||||
from llama_stack.apis.common.errors import ModelNotFoundError
|
from llama_stack.apis.common.errors import ModelNotFoundError
|
||||||
from llama_stack.apis.models import ListModelsResponse, Model, Models, ModelType, OpenAIListModelsResponse, OpenAIModel
|
from llama_stack.apis.models import ListModelsResponse, Model, Models, ModelType, OpenAIListModelsResponse, OpenAIModel
|
||||||
from llama_stack.distribution.datatypes import (
|
from llama_stack.core.datatypes import (
|
||||||
ModelWithOwner,
|
ModelWithOwner,
|
||||||
RegistryEntrySource,
|
RegistryEntrySource,
|
||||||
)
|
)
|
|
@ -12,7 +12,7 @@ from llama_stack.apis.scoring_functions import (
|
||||||
ScoringFnParams,
|
ScoringFnParams,
|
||||||
ScoringFunctions,
|
ScoringFunctions,
|
||||||
)
|
)
|
||||||
from llama_stack.distribution.datatypes import (
|
from llama_stack.core.datatypes import (
|
||||||
ScoringFnWithOwner,
|
ScoringFnWithOwner,
|
||||||
)
|
)
|
||||||
from llama_stack.log import get_logger
|
from llama_stack.log import get_logger
|
|
@ -8,7 +8,7 @@ from typing import Any
|
||||||
|
|
||||||
from llama_stack.apis.resource import ResourceType
|
from llama_stack.apis.resource import ResourceType
|
||||||
from llama_stack.apis.shields import ListShieldsResponse, Shield, Shields
|
from llama_stack.apis.shields import ListShieldsResponse, Shield, Shields
|
||||||
from llama_stack.distribution.datatypes import (
|
from llama_stack.core.datatypes import (
|
||||||
ShieldWithOwner,
|
ShieldWithOwner,
|
||||||
)
|
)
|
||||||
from llama_stack.log import get_logger
|
from llama_stack.log import get_logger
|
|
@ -8,7 +8,7 @@ from typing import Any
|
||||||
|
|
||||||
from llama_stack.apis.common.content_types import URL
|
from llama_stack.apis.common.content_types import URL
|
||||||
from llama_stack.apis.tools import ListToolGroupsResponse, ListToolsResponse, Tool, ToolGroup, ToolGroups
|
from llama_stack.apis.tools import ListToolGroupsResponse, ListToolsResponse, Tool, ToolGroup, ToolGroups
|
||||||
from llama_stack.distribution.datatypes import ToolGroupWithOwner
|
from llama_stack.core.datatypes import ToolGroupWithOwner
|
||||||
from llama_stack.log import get_logger
|
from llama_stack.log import get_logger
|
||||||
|
|
||||||
from .common import CommonRoutingTableImpl
|
from .common import CommonRoutingTableImpl
|
|
@ -23,7 +23,7 @@ from llama_stack.apis.vector_io.vector_io import (
|
||||||
VectorStoreObject,
|
VectorStoreObject,
|
||||||
VectorStoreSearchResponsePage,
|
VectorStoreSearchResponsePage,
|
||||||
)
|
)
|
||||||
from llama_stack.distribution.datatypes import (
|
from llama_stack.core.datatypes import (
|
||||||
VectorDBWithOwner,
|
VectorDBWithOwner,
|
||||||
)
|
)
|
||||||
from llama_stack.log import get_logger
|
from llama_stack.log import get_logger
|
|
@ -9,10 +9,10 @@ import json
|
||||||
import httpx
|
import httpx
|
||||||
from aiohttp import hdrs
|
from aiohttp import hdrs
|
||||||
|
|
||||||
from llama_stack.distribution.datatypes import AuthenticationConfig, User
|
from llama_stack.core.datatypes import AuthenticationConfig, User
|
||||||
from llama_stack.distribution.request_headers import user_from_scope
|
from llama_stack.core.request_headers import user_from_scope
|
||||||
from llama_stack.distribution.server.auth_providers import create_auth_provider
|
from llama_stack.core.server.auth_providers import create_auth_provider
|
||||||
from llama_stack.distribution.server.routes import find_matching_route, initialize_route_impls
|
from llama_stack.core.server.routes import find_matching_route, initialize_route_impls
|
||||||
from llama_stack.log import get_logger
|
from llama_stack.log import get_logger
|
||||||
|
|
||||||
logger = get_logger(name=__name__, category="auth")
|
logger = get_logger(name=__name__, category="auth")
|
|
@ -14,7 +14,7 @@ import httpx
|
||||||
from jose import jwt
|
from jose import jwt
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
from llama_stack.distribution.datatypes import (
|
from llama_stack.core.datatypes import (
|
||||||
AuthenticationConfig,
|
AuthenticationConfig,
|
||||||
CustomAuthConfig,
|
CustomAuthConfig,
|
||||||
GitHubTokenAuthConfig,
|
GitHubTokenAuthConfig,
|
|
@ -15,7 +15,7 @@ from starlette.routing import Route
|
||||||
from llama_stack.apis.datatypes import Api, ExternalApiSpec
|
from llama_stack.apis.datatypes import Api, ExternalApiSpec
|
||||||
from llama_stack.apis.tools import RAGToolRuntime, SpecialToolGroup
|
from llama_stack.apis.tools import RAGToolRuntime, SpecialToolGroup
|
||||||
from llama_stack.apis.version import LLAMA_STACK_API_VERSION
|
from llama_stack.apis.version import LLAMA_STACK_API_VERSION
|
||||||
from llama_stack.distribution.resolver import api_protocol_map
|
from llama_stack.core.resolver import api_protocol_map
|
||||||
from llama_stack.schema_utils import WebMethod
|
from llama_stack.schema_utils import WebMethod
|
||||||
|
|
||||||
EndpointFunc = Callable[..., Any]
|
EndpointFunc = Callable[..., Any]
|
|
@ -33,35 +33,35 @@ from pydantic import BaseModel, ValidationError
|
||||||
|
|
||||||
from llama_stack.apis.common.responses import PaginatedResponse
|
from llama_stack.apis.common.responses import PaginatedResponse
|
||||||
from llama_stack.cli.utils import add_config_template_args, get_config_from_args
|
from llama_stack.cli.utils import add_config_template_args, get_config_from_args
|
||||||
from llama_stack.distribution.access_control.access_control import AccessDeniedError
|
from llama_stack.core.access_control.access_control import AccessDeniedError
|
||||||
from llama_stack.distribution.datatypes import (
|
from llama_stack.core.datatypes import (
|
||||||
AuthenticationRequiredError,
|
AuthenticationRequiredError,
|
||||||
LoggingConfig,
|
LoggingConfig,
|
||||||
StackRunConfig,
|
StackRunConfig,
|
||||||
)
|
)
|
||||||
from llama_stack.distribution.distribution import builtin_automatically_routed_apis
|
from llama_stack.core.distribution import builtin_automatically_routed_apis
|
||||||
from llama_stack.distribution.external import ExternalApiSpec, load_external_apis
|
from llama_stack.core.external import ExternalApiSpec, load_external_apis
|
||||||
from llama_stack.distribution.request_headers import (
|
from llama_stack.core.request_headers import (
|
||||||
PROVIDER_DATA_VAR,
|
PROVIDER_DATA_VAR,
|
||||||
request_provider_data_context,
|
request_provider_data_context,
|
||||||
user_from_scope,
|
user_from_scope,
|
||||||
)
|
)
|
||||||
from llama_stack.distribution.resolver import InvalidProviderError
|
from llama_stack.core.resolver import InvalidProviderError
|
||||||
from llama_stack.distribution.server.routes import (
|
from llama_stack.core.server.routes import (
|
||||||
find_matching_route,
|
find_matching_route,
|
||||||
get_all_api_routes,
|
get_all_api_routes,
|
||||||
initialize_route_impls,
|
initialize_route_impls,
|
||||||
)
|
)
|
||||||
from llama_stack.distribution.stack import (
|
from llama_stack.core.stack import (
|
||||||
cast_image_name_to_string,
|
cast_image_name_to_string,
|
||||||
construct_stack,
|
construct_stack,
|
||||||
replace_env_vars,
|
replace_env_vars,
|
||||||
shutdown_stack,
|
shutdown_stack,
|
||||||
validate_env_pair,
|
validate_env_pair,
|
||||||
)
|
)
|
||||||
from llama_stack.distribution.utils.config import redact_sensitive_fields
|
from llama_stack.core.utils.config import redact_sensitive_fields
|
||||||
from llama_stack.distribution.utils.config_resolution import Mode, resolve_config_or_template
|
from llama_stack.core.utils.config_resolution import Mode, resolve_config_or_template
|
||||||
from llama_stack.distribution.utils.context import preserve_contexts_async_generator
|
from llama_stack.core.utils.context import preserve_contexts_async_generator
|
||||||
from llama_stack.log import get_logger
|
from llama_stack.log import get_logger
|
||||||
from llama_stack.providers.datatypes import Api
|
from llama_stack.providers.datatypes import Api
|
||||||
from llama_stack.providers.inline.telemetry.meta_reference.config import TelemetryConfig
|
from llama_stack.providers.inline.telemetry.meta_reference.config import TelemetryConfig
|
|
@ -34,14 +34,14 @@ from llama_stack.apis.telemetry import Telemetry
|
||||||
from llama_stack.apis.tools import RAGToolRuntime, ToolGroups, ToolRuntime
|
from llama_stack.apis.tools import RAGToolRuntime, ToolGroups, ToolRuntime
|
||||||
from llama_stack.apis.vector_dbs import VectorDBs
|
from llama_stack.apis.vector_dbs import VectorDBs
|
||||||
from llama_stack.apis.vector_io import VectorIO
|
from llama_stack.apis.vector_io import VectorIO
|
||||||
from llama_stack.distribution.datatypes import Provider, StackRunConfig
|
from llama_stack.core.datatypes import Provider, StackRunConfig
|
||||||
from llama_stack.distribution.distribution import get_provider_registry
|
from llama_stack.core.distribution import get_provider_registry
|
||||||
from llama_stack.distribution.inspect import DistributionInspectConfig, DistributionInspectImpl
|
from llama_stack.core.inspect import DistributionInspectConfig, DistributionInspectImpl
|
||||||
from llama_stack.distribution.providers import ProviderImpl, ProviderImplConfig
|
from llama_stack.core.providers import ProviderImpl, ProviderImplConfig
|
||||||
from llama_stack.distribution.resolver import ProviderRegistry, resolve_impls
|
from llama_stack.core.resolver import ProviderRegistry, resolve_impls
|
||||||
from llama_stack.distribution.routing_tables.common import CommonRoutingTableImpl
|
from llama_stack.core.routing_tables.common import CommonRoutingTableImpl
|
||||||
from llama_stack.distribution.store.registry import create_dist_registry
|
from llama_stack.core.store.registry import create_dist_registry
|
||||||
from llama_stack.distribution.utils.dynamic import instantiate_class_type
|
from llama_stack.core.utils.dynamic import instantiate_class_type
|
||||||
from llama_stack.log import get_logger
|
from llama_stack.log import get_logger
|
||||||
from llama_stack.providers.datatypes import Api
|
from llama_stack.providers.datatypes import Api
|
||||||
|
|
|
@ -122,7 +122,7 @@ if [[ "$env_type" == "venv" || "$env_type" == "conda" ]]; then
|
||||||
yaml_config_arg=""
|
yaml_config_arg=""
|
||||||
fi
|
fi
|
||||||
|
|
||||||
$PYTHON_BINARY -m llama_stack.distribution.server.server \
|
$PYTHON_BINARY -m llama_stack.core.server.server \
|
||||||
$yaml_config_arg \
|
$yaml_config_arg \
|
||||||
--port "$port" \
|
--port "$port" \
|
||||||
$env_vars \
|
$env_vars \
|
|
@ -10,8 +10,8 @@ from typing import Protocol
|
||||||
|
|
||||||
import pydantic
|
import pydantic
|
||||||
|
|
||||||
from llama_stack.distribution.datatypes import RoutableObjectWithProvider
|
from llama_stack.core.datatypes import RoutableObjectWithProvider
|
||||||
from llama_stack.distribution.utils.config_dirs import DISTRIBS_BASE_DIR
|
from llama_stack.core.utils.config_dirs import DISTRIBS_BASE_DIR
|
||||||
from llama_stack.log import get_logger
|
from llama_stack.log import get_logger
|
||||||
from llama_stack.providers.utils.kvstore import KVStore, kvstore_impl
|
from llama_stack.providers.utils.kvstore import KVStore, kvstore_impl
|
||||||
from llama_stack.providers.utils.kvstore.config import KVStoreConfig, SqliteKVStoreConfig
|
from llama_stack.providers.utils.kvstore.config import KVStoreConfig, SqliteKVStoreConfig
|
|
@ -36,7 +36,7 @@ llama-stack-client benchmarks register \
|
||||||
3. Start Streamlit UI
|
3. Start Streamlit UI
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
uv run --with ".[ui]" streamlit run llama_stack/distribution/ui/app.py
|
uv run --with ".[ui]" streamlit run llama_stack.core/ui/app.py
|
||||||
```
|
```
|
||||||
|
|
||||||
## Environment Variables
|
## Environment Variables
|
|
@ -6,7 +6,7 @@
|
||||||
|
|
||||||
import streamlit as st
|
import streamlit as st
|
||||||
|
|
||||||
from llama_stack.distribution.ui.modules.api import llama_stack_api
|
from llama_stack.core.ui.modules.api import llama_stack_api
|
||||||
|
|
||||||
|
|
||||||
def datasets():
|
def datasets():
|
|
@ -6,7 +6,7 @@
|
||||||
|
|
||||||
import streamlit as st
|
import streamlit as st
|
||||||
|
|
||||||
from llama_stack.distribution.ui.modules.api import llama_stack_api
|
from llama_stack.core.ui.modules.api import llama_stack_api
|
||||||
|
|
||||||
|
|
||||||
def benchmarks():
|
def benchmarks():
|
|
@ -6,7 +6,7 @@
|
||||||
|
|
||||||
import streamlit as st
|
import streamlit as st
|
||||||
|
|
||||||
from llama_stack.distribution.ui.modules.api import llama_stack_api
|
from llama_stack.core.ui.modules.api import llama_stack_api
|
||||||
|
|
||||||
|
|
||||||
def models():
|
def models():
|
|
@ -6,7 +6,7 @@
|
||||||
|
|
||||||
import streamlit as st
|
import streamlit as st
|
||||||
|
|
||||||
from llama_stack.distribution.ui.modules.api import llama_stack_api
|
from llama_stack.core.ui.modules.api import llama_stack_api
|
||||||
|
|
||||||
|
|
||||||
def providers():
|
def providers():
|
|
@ -6,12 +6,12 @@
|
||||||
|
|
||||||
from streamlit_option_menu import option_menu
|
from streamlit_option_menu import option_menu
|
||||||
|
|
||||||
from llama_stack.distribution.ui.page.distribution.datasets import datasets
|
from llama_stack.core.ui.page.distribution.datasets import datasets
|
||||||
from llama_stack.distribution.ui.page.distribution.eval_tasks import benchmarks
|
from llama_stack.core.ui.page.distribution.eval_tasks import benchmarks
|
||||||
from llama_stack.distribution.ui.page.distribution.models import models
|
from llama_stack.core.ui.page.distribution.models import models
|
||||||
from llama_stack.distribution.ui.page.distribution.scoring_functions import scoring_functions
|
from llama_stack.core.ui.page.distribution.scoring_functions import scoring_functions
|
||||||
from llama_stack.distribution.ui.page.distribution.shields import shields
|
from llama_stack.core.ui.page.distribution.shields import shields
|
||||||
from llama_stack.distribution.ui.page.distribution.vector_dbs import vector_dbs
|
from llama_stack.core.ui.page.distribution.vector_dbs import vector_dbs
|
||||||
|
|
||||||
|
|
||||||
def resources_page():
|
def resources_page():
|
|
@ -6,7 +6,7 @@
|
||||||
|
|
||||||
import streamlit as st
|
import streamlit as st
|
||||||
|
|
||||||
from llama_stack.distribution.ui.modules.api import llama_stack_api
|
from llama_stack.core.ui.modules.api import llama_stack_api
|
||||||
|
|
||||||
|
|
||||||
def scoring_functions():
|
def scoring_functions():
|
|
@ -6,7 +6,7 @@
|
||||||
|
|
||||||
import streamlit as st
|
import streamlit as st
|
||||||
|
|
||||||
from llama_stack.distribution.ui.modules.api import llama_stack_api
|
from llama_stack.core.ui.modules.api import llama_stack_api
|
||||||
|
|
||||||
|
|
||||||
def shields():
|
def shields():
|
|
@ -6,7 +6,7 @@
|
||||||
|
|
||||||
import streamlit as st
|
import streamlit as st
|
||||||
|
|
||||||
from llama_stack.distribution.ui.modules.api import llama_stack_api
|
from llama_stack.core.ui.modules.api import llama_stack_api
|
||||||
|
|
||||||
|
|
||||||
def vector_dbs():
|
def vector_dbs():
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue