simplification

This commit is contained in:
Ashwin Bharambe 2025-11-18 15:44:07 -08:00
parent 78a676e231
commit 91e178d1ef
3 changed files with 116 additions and 47 deletions

View file

@ -47,14 +47,65 @@ def get_setup_env_vars(setup_name, suite_name=None):
return setup.env return setup.env
def get_setup_config(setup_name, suite_name=None):
"""
Get full configuration (env vars + defaults) for a setup.
Args:
setup_name: Name of the setup (e.g., 'ollama', 'gpt')
suite_name: Optional suite name to get default setup if setup_name is None
Returns:
Dictionary with 'env' and 'defaults' keys
"""
# If no setup specified, try to get default from suite
if not setup_name and suite_name:
suite = SUITE_DEFINITIONS.get(suite_name)
if suite and suite.default_setup:
setup_name = suite.default_setup
if not setup_name:
return {"env": {}, "defaults": {}}
setup = SETUP_DEFINITIONS.get(setup_name)
if not setup:
print(
f"Error: Unknown setup '{setup_name}'. Available: {', '.join(sorted(SETUP_DEFINITIONS.keys()))}",
file=sys.stderr,
)
sys.exit(1)
return {"env": setup.env, "defaults": setup.defaults}
def main(): def main():
parser = argparse.ArgumentParser(description="Extract environment variables from a test setup") parser = argparse.ArgumentParser(description="Extract environment variables and defaults from a test setup")
parser.add_argument("--setup", help="Setup name (e.g., ollama, gpt)") parser.add_argument("--setup", help="Setup name (e.g., ollama, gpt)")
parser.add_argument("--suite", help="Suite name to get default setup from if --setup not provided") parser.add_argument("--suite", help="Suite name to get default setup from if --setup not provided")
parser.add_argument("--format", choices=["bash", "json"], default="bash", help="Output format (default: bash)") parser.add_argument("--format", choices=["bash", "json"], default="bash", help="Output format (default: bash)")
parser.add_argument(
"--include-defaults",
action="store_true",
help="Include setup defaults (text_model, embedding_model, etc.) in addition to env vars",
)
args = parser.parse_args() args = parser.parse_args()
if args.include_defaults:
config = get_setup_config(args.setup, args.suite)
if args.format == "bash":
# Output env vars as bash export statements
for key, value in config["env"].items():
print(f"export {key}='{value}'")
# Output defaults as bash export statements with LLAMA_STACK_TEST_ prefix
for key, value in config["defaults"].items():
env_key = f"LLAMA_STACK_TEST_{key.upper()}"
print(f"export {env_key}='{value}'")
elif args.format == "json":
import json
print(json.dumps(config))
else:
env_vars = get_setup_env_vars(args.setup, args.suite) env_vars = get_setup_env_vars(args.setup, args.suite)
if args.format == "bash": if args.format == "bash":

View file

@ -198,36 +198,12 @@ echo "$SETUP_ENV"
eval "$SETUP_ENV" eval "$SETUP_ENV"
echo "" echo ""
if [[ -n "$RESOLVED_TEST_SETUP" ]]; then if [[ -n "$RESOLVED_TEST_SETUP" ]]; then
SETUP_DEFAULTS=$(PYTHONPATH=$THIS_DIR/.. python - "$RESOLVED_TEST_SETUP" <<'PY' # Export setup name - TypeScript tests will call get_setup_env.py themselves to get model defaults
import sys export LLAMA_STACK_TEST_SETUP="$RESOLVED_TEST_SETUP"
from tests.integration.suites import SETUP_DEFINITIONS
setup_name = sys.argv[1] # Export model env vars for Python tests using get_setup_env.py
if not setup_name: SETUP_DEFAULTS_ENV=$(PYTHONPATH=$THIS_DIR/.. python $THIS_DIR/get_setup_env.py --setup "$RESOLVED_TEST_SETUP" --format bash --include-defaults)
sys.exit(0) eval "$SETUP_DEFAULTS_ENV"
setup = SETUP_DEFINITIONS.get(setup_name)
if not setup:
sys.exit(0)
for key, value in setup.defaults.items():
print(f"{key}={value}")
PY
)
while IFS='=' read -r key value; do
case "$key" in
text_model)
export LLAMA_STACK_TEST_MODEL="$value"
;;
embedding_model)
export LLAMA_STACK_TEST_EMBEDDING_MODEL="$value"
;;
vision_model)
export LLAMA_STACK_TEST_VISION_MODEL="$value"
;;
esac
done <<< "$SETUP_DEFAULTS"
fi fi
ROOT_DIR="$THIS_DIR/.." ROOT_DIR="$THIS_DIR/.."

View file

@ -9,26 +9,68 @@
* This file mimics pytest's fixture system by providing shared test configuration. * This file mimics pytest's fixture system by providing shared test configuration.
*/ */
import { execSync } from 'child_process';
import * as path from 'path';
import LlamaStackClient from 'llama-stack-client'; import LlamaStackClient from 'llama-stack-client';
// Read configuration from environment variables (set by scripts/integration-test.sh) /**
export const TEST_CONFIG = { * Load test configuration from the Python setup system.
baseURL: process.env['TEST_API_BASE_URL'], * This reads setup definitions from tests/integration/suites.py via get_setup_env.py.
textModel: process.env['LLAMA_STACK_TEST_MODEL'], */
embeddingModel: process.env['LLAMA_STACK_TEST_EMBEDDING_MODEL'], function loadTestConfig() {
} as const; const baseURL = process.env['TEST_API_BASE_URL'];
const setupName = process.env['LLAMA_STACK_TEST_SETUP'];
// Validate required configuration if (!baseURL) {
beforeAll(() => {
if (!TEST_CONFIG.baseURL) {
throw new Error( throw new Error(
'TEST_API_BASE_URL is required for integration tests. ' + 'TEST_API_BASE_URL is required for integration tests. ' +
'Run tests using: ./scripts/integration-test.sh', 'Run tests using: ./scripts/integration-test.sh',
); );
} }
// If setup is specified, load config from Python
let textModel = process.env['LLAMA_STACK_TEST_MODEL'];
let embeddingModel = process.env['LLAMA_STACK_TEST_EMBEDDING_MODEL'];
if (setupName && !textModel) {
try {
// Call Python script to get setup configuration
const rootDir = path.resolve(__dirname, '../../..');
const scriptPath = path.join(rootDir, 'scripts/get_setup_env.py');
const configJson = execSync(
`cd ${rootDir} && PYTHONPATH=. python ${scriptPath} --setup ${setupName} --format json --include-defaults`,
{ encoding: 'utf-8' }
);
const config = JSON.parse(configJson);
// Map Python defaults to TypeScript env vars
if (config.defaults) {
textModel = config.defaults.text_model;
embeddingModel = config.defaults.embedding_model;
}
} catch (error) {
console.warn(`Warning: Failed to load config for setup "${setupName}":`, error);
}
}
return {
baseURL,
textModel,
embeddingModel,
setupName,
};
}
// Read configuration from environment variables (set by scripts/integration-test.sh)
export const TEST_CONFIG = loadTestConfig();
// Validate required configuration
beforeAll(() => {
console.log('\n=== Integration Test Configuration ==='); console.log('\n=== Integration Test Configuration ===');
console.log(`Base URL: ${TEST_CONFIG.baseURL}`); console.log(`Base URL: ${TEST_CONFIG.baseURL}`);
console.log(`Setup: ${TEST_CONFIG.setupName || 'NOT SET'}`);
console.log( console.log(
`Text Model: ${TEST_CONFIG.textModel || 'NOT SET - tests requiring text model will be skipped'}`, `Text Model: ${TEST_CONFIG.textModel || 'NOT SET - tests requiring text model will be skipped'}`,
); );