mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-04 02:03:44 +00:00
chore: Stack server no longer depends on llama-stack-client
This commit is contained in:
parent
9df073450f
commit
2221cc2cc4
12 changed files with 24 additions and 20 deletions
|
|
@ -11,7 +11,7 @@ If you are planning to use an external service for Inference (even Ollama or TGI
|
||||||
This avoids the overhead of setting up a server.
|
This avoids the overhead of setting up a server.
|
||||||
```bash
|
```bash
|
||||||
# setup
|
# setup
|
||||||
uv pip install llama-stack
|
uv pip install llama-stack llama-stack-client
|
||||||
llama stack list-deps starter | xargs -L1 uv pip install
|
llama stack list-deps starter | xargs -L1 uv pip install
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -37,7 +37,7 @@
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# NBVAL_SKIP\n",
|
"# NBVAL_SKIP\n",
|
||||||
"!pip install -U llama-stack\n",
|
"!pip install -U llama-stack llama-stack-client\n",
|
||||||
"llama stack list-deps fireworks | xargs -L1 uv pip install\n"
|
"llama stack list-deps fireworks | xargs -L1 uv pip install\n"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|
|
||||||
|
|
@ -44,7 +44,7 @@
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# NBVAL_SKIP\n",
|
"# NBVAL_SKIP\n",
|
||||||
"!pip install -U llama-stack"
|
"!pip install -U llama-stack llama-stack-client\n",
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
|
||||||
|
|
@ -74,6 +74,7 @@
|
||||||
"source": [
|
"source": [
|
||||||
"```bash\n",
|
"```bash\n",
|
||||||
"uv sync --extra dev\n",
|
"uv sync --extra dev\n",
|
||||||
|
"uv pip install -U llama-stack-client\n",
|
||||||
"uv pip install -e .\n",
|
"uv pip install -e .\n",
|
||||||
"source .venv/bin/activate\n",
|
"source .venv/bin/activate\n",
|
||||||
"```"
|
"```"
|
||||||
|
|
|
||||||
|
|
@ -30,7 +30,6 @@ dependencies = [
|
||||||
"httpx",
|
"httpx",
|
||||||
"jinja2>=3.1.6",
|
"jinja2>=3.1.6",
|
||||||
"jsonschema",
|
"jsonschema",
|
||||||
"llama-stack-client>=0.3.0",
|
|
||||||
"openai>=2.5.0",
|
"openai>=2.5.0",
|
||||||
"prompt-toolkit",
|
"prompt-toolkit",
|
||||||
"python-dotenv",
|
"python-dotenv",
|
||||||
|
|
|
||||||
|
|
@ -3,8 +3,3 @@
|
||||||
#
|
#
|
||||||
# This source code is licensed under the terms described in the LICENSE file in
|
# This source code is licensed under the terms described in the LICENSE file in
|
||||||
# the root directory of this source tree.
|
# the root directory of this source tree.
|
||||||
|
|
||||||
from llama_stack.core.library_client import ( # noqa: F401
|
|
||||||
AsyncLlamaStackAsLibraryClient,
|
|
||||||
LlamaStackAsLibraryClient,
|
|
||||||
)
|
|
||||||
|
|
|
||||||
|
|
@ -18,6 +18,8 @@ from typing import Any, TypeVar, Union, get_args, get_origin
|
||||||
import httpx
|
import httpx
|
||||||
import yaml
|
import yaml
|
||||||
from fastapi import Response as FastAPIResponse
|
from fastapi import Response as FastAPIResponse
|
||||||
|
|
||||||
|
try:
|
||||||
from llama_stack_client import (
|
from llama_stack_client import (
|
||||||
NOT_GIVEN,
|
NOT_GIVEN,
|
||||||
APIResponse,
|
APIResponse,
|
||||||
|
|
@ -26,6 +28,11 @@ from llama_stack_client import (
|
||||||
AsyncStream,
|
AsyncStream,
|
||||||
LlamaStackClient,
|
LlamaStackClient,
|
||||||
)
|
)
|
||||||
|
except ImportError as e:
|
||||||
|
raise ImportError(
|
||||||
|
"llama-stack-client is not installed. Please install it with `pip install llama-stack-client`."
|
||||||
|
) from e
|
||||||
|
|
||||||
from pydantic import BaseModel, TypeAdapter
|
from pydantic import BaseModel, TypeAdapter
|
||||||
from rich.console import Console
|
from rich.console import Console
|
||||||
from termcolor import cprint
|
from termcolor import cprint
|
||||||
|
|
|
||||||
|
|
@ -20,6 +20,7 @@ This provider enables dataset management using NVIDIA's NeMo Customizer service.
|
||||||
Build the NVIDIA environment:
|
Build the NVIDIA environment:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
uv pip install llama-stack-client
|
||||||
uv run llama stack list-deps nvidia | xargs -L1 uv pip install
|
uv run llama stack list-deps nvidia | xargs -L1 uv pip install
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -18,6 +18,7 @@ This provider enables running inference using NVIDIA NIM.
|
||||||
Build the NVIDIA environment:
|
Build the NVIDIA environment:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
uv pip install llama-stack-client
|
||||||
uv run llama stack list-deps nvidia | xargs -L1 uv pip install
|
uv run llama stack list-deps nvidia | xargs -L1 uv pip install
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -22,6 +22,7 @@ This provider enables fine-tuning of LLMs using NVIDIA's NeMo Customizer service
|
||||||
Build the NVIDIA environment:
|
Build the NVIDIA environment:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
uv pip install llama-stack-client
|
||||||
uv run llama stack list-deps nvidia | xargs -L1 uv pip install
|
uv run llama stack list-deps nvidia | xargs -L1 uv pip install
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -19,6 +19,7 @@ This provider enables safety checks and guardrails for LLM interactions using NV
|
||||||
Build the NVIDIA environment:
|
Build the NVIDIA environment:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
uv pip install llama-stack-client
|
||||||
uv run llama stack list-deps nvidia | xargs -L1 uv pip install
|
uv run llama stack list-deps nvidia | xargs -L1 uv pip install
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
||||||
2
uv.lock
generated
2
uv.lock
generated
|
|
@ -1945,7 +1945,6 @@ dependencies = [
|
||||||
{ name = "httpx" },
|
{ name = "httpx" },
|
||||||
{ name = "jinja2" },
|
{ name = "jinja2" },
|
||||||
{ name = "jsonschema" },
|
{ name = "jsonschema" },
|
||||||
{ name = "llama-stack-client" },
|
|
||||||
{ name = "openai" },
|
{ name = "openai" },
|
||||||
{ name = "opentelemetry-exporter-otlp-proto-http" },
|
{ name = "opentelemetry-exporter-otlp-proto-http" },
|
||||||
{ name = "opentelemetry-sdk" },
|
{ name = "opentelemetry-sdk" },
|
||||||
|
|
@ -2096,7 +2095,6 @@ requires-dist = [
|
||||||
{ name = "httpx" },
|
{ name = "httpx" },
|
||||||
{ name = "jinja2", specifier = ">=3.1.6" },
|
{ name = "jinja2", specifier = ">=3.1.6" },
|
||||||
{ name = "jsonschema" },
|
{ name = "jsonschema" },
|
||||||
{ name = "llama-stack-client", specifier = ">=0.3.0" },
|
|
||||||
{ name = "llama-stack-client", marker = "extra == 'ui'", specifier = ">=0.3.0" },
|
{ name = "llama-stack-client", marker = "extra == 'ui'", specifier = ">=0.3.0" },
|
||||||
{ name = "openai", specifier = ">=2.5.0" },
|
{ name = "openai", specifier = ">=2.5.0" },
|
||||||
{ name = "opentelemetry-exporter-otlp-proto-http", specifier = ">=1.30.0" },
|
{ name = "opentelemetry-exporter-otlp-proto-http", specifier = ">=1.30.0" },
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue