diff --git a/scripts/get_setup_env.py b/scripts/get_setup_env.py index b0a059ca0..755cfefea 100755 --- a/scripts/get_setup_env.py +++ b/scripts/get_setup_env.py @@ -16,37 +16,6 @@ import sys from tests.integration.suites import SETUP_DEFINITIONS, SUITE_DEFINITIONS -def get_setup_env_vars(setup_name, suite_name=None): - """ - Get environment variables for a setup, with optional suite default fallback. - - Args: - setup_name: Name of the setup (e.g., 'ollama', 'gpt') - suite_name: Optional suite name to get default setup if setup_name is None - - Returns: - Dictionary of environment variables - """ - # If no setup specified, try to get default from suite - if not setup_name and suite_name: - suite = SUITE_DEFINITIONS.get(suite_name) - if suite and suite.default_setup: - setup_name = suite.default_setup - - if not setup_name: - return {} - - setup = SETUP_DEFINITIONS.get(setup_name) - if not setup: - print( - f"Error: Unknown setup '{setup_name}'. Available: {', '.join(sorted(SETUP_DEFINITIONS.keys()))}", - file=sys.stderr, - ) - sys.exit(1) - - return setup.env - - def get_setup_config(setup_name, suite_name=None): """ Get full configuration (env vars + defaults) for a setup. @@ -83,39 +52,23 @@ def main(): parser.add_argument("--setup", help="Setup name (e.g., ollama, gpt)") parser.add_argument("--suite", help="Suite name to get default setup from if --setup not provided") parser.add_argument("--format", choices=["bash", "json"], default="bash", help="Output format (default: bash)") - parser.add_argument( - "--include-defaults", - action="store_true", - help="Include setup defaults (text_model, embedding_model, etc.) in addition to env vars", - ) args = parser.parse_args() - if args.include_defaults: - config = get_setup_config(args.setup, args.suite) - if args.format == "bash": - # Output env vars as bash export statements - for key, value in config["env"].items(): - print(f"export {key}='{value}'") - # Output defaults as bash export statements with LLAMA_STACK_TEST_ prefix - for key, value in config["defaults"].items(): - env_key = f"LLAMA_STACK_TEST_{key.upper()}" - print(f"export {env_key}='{value}'") - elif args.format == "json": - import json + config = get_setup_config(args.setup, args.suite) - print(json.dumps(config)) - else: - env_vars = get_setup_env_vars(args.setup, args.suite) + if args.format == "bash": + # Output env vars as bash export statements + for key, value in config["env"].items(): + print(f"export {key}='{value}'") + # Output defaults as bash export statements with LLAMA_STACK_TEST_ prefix + for key, value in config["defaults"].items(): + env_key = f"LLAMA_STACK_TEST_{key.upper()}" + print(f"export {env_key}='{value}'") + elif args.format == "json": + import json - if args.format == "bash": - # Output as bash export statements - for key, value in env_vars.items(): - print(f"export {key}='{value}'") - elif args.format == "json": - import json - - print(json.dumps(env_vars)) + print(json.dumps(config)) if __name__ == "__main__": diff --git a/scripts/integration-tests.sh b/scripts/integration-tests.sh index ae88da68a..20ecd0c4d 100755 --- a/scripts/integration-tests.sh +++ b/scripts/integration-tests.sh @@ -180,14 +180,10 @@ echo "Setting up environment variables:" echo "$SETUP_ENV" eval "$SETUP_ENV" echo "" -if [[ -n "$TEST_SETUP" ]]; then - # Export setup name - TypeScript tests will call get_setup_env.py themselves to get model defaults - export LLAMA_STACK_TEST_SETUP="$TEST_SETUP" - # Export model env vars for Python tests using get_setup_env.py - SETUP_DEFAULTS_ENV=$(PYTHONPATH=$THIS_DIR/.. python $THIS_DIR/get_setup_env.py --setup "$TEST_SETUP" --format bash --include-defaults) - eval "$SETUP_DEFAULTS_ENV" -fi +# Export suite and setup names for TypeScript tests +export LLAMA_STACK_TEST_SUITE="$TEST_SUITE" +export LLAMA_STACK_TEST_SETUP="$TEST_SETUP" ROOT_DIR="$THIS_DIR/.." cd $ROOT_DIR @@ -255,18 +251,6 @@ run_client_ts_tests() { # Then install the client from local directory echo "Installing llama-stack-client from: $TS_CLIENT_PATH" npm install "$TS_CLIENT_PATH" --silent - - # Verify installation - echo "Verifying llama-stack-client installation..." - if npm list llama-stack-client 2>/dev/null | grep -q llama-stack-client; then - echo "✅ llama-stack-client successfully installed" - npm list llama-stack-client - else - echo "❌ llama-stack-client not found in node_modules" - echo "Installed packages:" - npm list --depth=0 - return 1 - fi else # It's an npm version specifier - install from npm echo "Installing llama-stack-client@${TS_CLIENT_PATH} from npm" @@ -276,23 +260,20 @@ run_client_ts_tests() { else npm install "llama-stack-client@${TS_CLIENT_PATH}" --silent fi - - # Verify installation - echo "Verifying llama-stack-client installation..." - if npm list llama-stack-client 2>/dev/null | grep -q llama-stack-client; then - echo "✅ llama-stack-client successfully installed" - npm list llama-stack-client - else - echo "❌ llama-stack-client not found in node_modules" - echo "Installed packages:" - npm list --depth=0 - return 1 - fi fi - # Export env vars for the test runner to read suites.json - export LLAMA_STACK_TEST_SUITE="$TEST_SUITE" - # LLAMA_STACK_TEST_SETUP already exported earlier + # Verify installation + echo "Verifying llama-stack-client installation..." + if npm list llama-stack-client 2>/dev/null | grep -q llama-stack-client; then + echo "✅ llama-stack-client successfully installed" + npm list llama-stack-client + else + echo "❌ llama-stack-client not found in node_modules" + echo "Installed packages:" + npm list --depth=0 + popd >/dev/null + return 1 + fi echo "Running TypeScript tests for suite $TEST_SUITE (setup $TEST_SETUP)" npm test diff --git a/tests/integration/client-typescript/setup.ts b/tests/integration/client-typescript/setup.ts index 82bc03087..75cabab74 100644 --- a/tests/integration/client-typescript/setup.ts +++ b/tests/integration/client-typescript/setup.ts @@ -9,8 +9,6 @@ * This file mimics pytest's fixture system by providing shared test configuration. */ -import { execSync } from 'child_process'; -import * as path from 'path'; import LlamaStackClient from 'llama-stack-client'; /** @@ -20,6 +18,8 @@ import LlamaStackClient from 'llama-stack-client'; function loadTestConfig() { const baseURL = process.env['TEST_API_BASE_URL']; const setupName = process.env['LLAMA_STACK_TEST_SETUP']; + const textModel = process.env['LLAMA_STACK_TEST_TEXT_MODEL']; + const embeddingModel = process.env['LLAMA_STACK_TEST_EMBEDDING_MODEL']; if (!baseURL) { throw new Error( @@ -28,33 +28,6 @@ function loadTestConfig() { ); } - // If setup is specified, load config from Python - let textModel = process.env['LLAMA_STACK_TEST_TEXT_MODEL']; - let embeddingModel = process.env['LLAMA_STACK_TEST_EMBEDDING_MODEL']; - - if (setupName && !textModel) { - try { - // Call Python script to get setup configuration - const rootDir = path.resolve(__dirname, '../../..'); - const scriptPath = path.join(rootDir, 'scripts/get_setup_env.py'); - - const configJson = execSync( - `cd ${rootDir} && PYTHONPATH=. python ${scriptPath} --setup ${setupName} --format json --include-defaults`, - { encoding: 'utf-8' } - ); - - const config = JSON.parse(configJson); - - // Map Python defaults to TypeScript env vars - if (config.defaults) { - textModel = config.defaults.text_model; - embeddingModel = config.defaults.embedding_model; - } - } catch (error) { - console.warn(`Warning: Failed to load config for setup "${setupName}":`, error); - } - } - return { baseURL, textModel,