mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-27 18:50:41 +00:00
fix: multiple issues with getting_started notebook (#1795)
Fixes multiple issues 1. llama stack build of dependencies was breaking with incompatible numpy / pandas when importing datasets Moved the notebook to start a local server instead of using library as a client. This way the setup is cleaner since its all contained and by using `uv run --with` we can test both the server setup process too in CI and release time. 2. The change to [1] surfaced some other issues - running `llama stack run` was defaulting to conda env name - provider data was not being managed properly - Some notebook cells (telemetry for evals) were not updated with latest changes Fixed all the issues and update the notebook. ### Test 1. Manually run it all in local env 2. `pytest -v -s --nbval-lax docs/getting_started.ipynb`
This commit is contained in:
parent
bdfe7fee92
commit
cb2a9784ab
4 changed files with 445 additions and 1842 deletions
File diff suppressed because one or more lines are too long
|
@ -43,7 +43,7 @@ class StackRun(Subcommand):
|
||||||
self.parser.add_argument(
|
self.parser.add_argument(
|
||||||
"--image-name",
|
"--image-name",
|
||||||
type=str,
|
type=str,
|
||||||
default=os.environ.get("CONDA_DEFAULT_ENV"),
|
default=None,
|
||||||
help="Name of the image to run. Defaults to the current conda environment",
|
help="Name of the image to run. Defaults to the current conda environment",
|
||||||
)
|
)
|
||||||
self.parser.add_argument(
|
self.parser.add_argument(
|
||||||
|
|
|
@ -13,6 +13,7 @@ LLAMA_CHECKPOINT_DIR=${LLAMA_CHECKPOINT_DIR:-}
|
||||||
LLAMA_STACK_DIR=${LLAMA_STACK_DIR:-}
|
LLAMA_STACK_DIR=${LLAMA_STACK_DIR:-}
|
||||||
TEST_PYPI_VERSION=${TEST_PYPI_VERSION:-}
|
TEST_PYPI_VERSION=${TEST_PYPI_VERSION:-}
|
||||||
PYPI_VERSION=${PYPI_VERSION:-}
|
PYPI_VERSION=${PYPI_VERSION:-}
|
||||||
|
VIRTUAL_ENV=${VIRTUAL_ENV:-}
|
||||||
|
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
|
|
||||||
|
@ -69,22 +70,25 @@ while [[ $# -gt 0 ]]; do
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
|
|
||||||
PYTHON_BINARY="python"
|
PYTHON_BINARY="python"
|
||||||
case "$env_type" in
|
case "$env_type" in
|
||||||
"venv")
|
"venv")
|
||||||
# Activate virtual environment
|
if [ -n "$VIRTUAL_ENV" && "$VIRTUAL_ENV" == "$env_path_or_name" ]; then
|
||||||
if [ ! -d "$env_path_or_name" ]; then
|
echo -e "${GREEN}Virtual environment already activated${NC}" >&2
|
||||||
echo -e "${RED}Error: Virtual environment not found at $env_path_or_name${NC}" >&2
|
else
|
||||||
exit 1
|
# Activate virtual environment
|
||||||
fi
|
if [ ! -d "$env_path_or_name" ]; then
|
||||||
|
echo -e "${RED}Error: Virtual environment not found at $env_path_or_name${NC}" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
if [ ! -f "$env_path_or_name/bin/activate" ]; then
|
if [ ! -f "$env_path_or_name/bin/activate" ]; then
|
||||||
echo -e "${RED}Error: Virtual environment activate binary not found at $env_path_or_name/bin/activate" >&2
|
echo -e "${RED}Error: Virtual environment activate binary not found at $env_path_or_name/bin/activate" >&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
source "$env_path_or_name/bin/activate"
|
source "$env_path_or_name/bin/activate"
|
||||||
|
fi
|
||||||
;;
|
;;
|
||||||
"conda")
|
"conda")
|
||||||
if ! is_command_available conda; then
|
if ! is_command_available conda; then
|
||||||
|
|
|
@ -18,15 +18,19 @@ def preserve_contexts_async_generator(
|
||||||
This is needed because we start a new asyncio event loop for each streaming request,
|
This is needed because we start a new asyncio event loop for each streaming request,
|
||||||
and we need to preserve the context across the event loop boundary.
|
and we need to preserve the context across the event loop boundary.
|
||||||
"""
|
"""
|
||||||
|
# Capture initial context values
|
||||||
|
initial_context_values = {context_var.name: context_var.get() for context_var in context_vars}
|
||||||
|
|
||||||
async def wrapper() -> AsyncGenerator[T, None]:
|
async def wrapper() -> AsyncGenerator[T, None]:
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
item = await gen.__anext__()
|
# Restore context values before any await
|
||||||
context_values = {context_var.name: context_var.get() for context_var in context_vars}
|
|
||||||
yield item
|
|
||||||
for context_var in context_vars:
|
for context_var in context_vars:
|
||||||
_ = context_var.set(context_values[context_var.name])
|
context_var.set(initial_context_values[context_var.name])
|
||||||
|
|
||||||
|
item = await gen.__anext__()
|
||||||
|
yield item
|
||||||
|
|
||||||
except StopAsyncIteration:
|
except StopAsyncIteration:
|
||||||
break
|
break
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue