mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-18 15:27:16 +00:00
Some checks failed
Integration Auth Tests / test-matrix (oauth2_token) (push) Failing after 1s
SqlStore Integration Tests / test-postgres (3.13) (push) Failing after 0s
SqlStore Integration Tests / test-postgres (3.12) (push) Failing after 0s
Test External Providers Installed via Module / test-external-providers-from-module (venv) (push) Has been skipped
Python Package Build Test / build (3.13) (push) Failing after 2s
Test Llama Stack Build / generate-matrix (push) Successful in 6s
Unit Tests / unit-tests (3.12) (push) Failing after 5s
Test Llama Stack Build / build-single-provider (push) Failing after 9s
Test Llama Stack Build / build-ubi9-container-distribution (push) Failing after 10s
Vector IO Integration Tests / test-matrix (push) Failing after 14s
Unit Tests / unit-tests (3.13) (push) Failing after 7s
Test External API and Providers / test-external (venv) (push) Failing after 12s
API Conformance Tests / check-schema-compatibility (push) Successful in 19s
Test Llama Stack Build / build (push) Failing after 7s
Integration Tests (Replay) / Integration Tests (, , , client=, ) (push) Failing after 26s
Test Llama Stack Build / build-custom-container-distribution (push) Failing after 25s
Python Package Build Test / build (3.12) (push) Failing after 33s
UI Tests / ui-tests (22) (push) Successful in 1m26s
Pre-commit / pre-commit (push) Successful in 2m18s
Also a critical bug fix so test recordings can be found inside docker
49 lines
1.4 KiB
Python
49 lines
1.4 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
import os
|
|
from contextvars import ContextVar
|
|
|
|
from llama_stack.core.request_headers import PROVIDER_DATA_VAR
|
|
|
|
TEST_CONTEXT: ContextVar[str | None] = ContextVar("llama_stack_test_context", default=None)
|
|
|
|
|
|
def get_test_context() -> str | None:
|
|
return TEST_CONTEXT.get()
|
|
|
|
|
|
def set_test_context(value: str | None):
|
|
return TEST_CONTEXT.set(value)
|
|
|
|
|
|
def reset_test_context(token) -> None:
|
|
TEST_CONTEXT.reset(token)
|
|
|
|
|
|
def sync_test_context_from_provider_data():
|
|
"""Sync test context from provider data when running in server test mode."""
|
|
if "LLAMA_STACK_TEST_INFERENCE_MODE" not in os.environ:
|
|
return None
|
|
|
|
stack_config_type = os.environ.get("LLAMA_STACK_TEST_STACK_CONFIG_TYPE", "library_client")
|
|
if stack_config_type != "server":
|
|
return None
|
|
|
|
try:
|
|
provider_data = PROVIDER_DATA_VAR.get()
|
|
except LookupError:
|
|
provider_data = None
|
|
|
|
if provider_data and "__test_id" in provider_data:
|
|
return TEST_CONTEXT.set(provider_data["__test_id"])
|
|
|
|
return None
|
|
|
|
|
|
def is_debug_mode() -> bool:
|
|
"""Check if test recording debug mode is enabled via LLAMA_STACK_TEST_DEBUG env var."""
|
|
return os.environ.get("LLAMA_STACK_TEST_DEBUG", "").lower() in ("1", "true", "yes")
|