mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-03 01:48:05 +00:00
reduce crap written initially by Claude
This commit is contained in:
parent
a94093bcbd
commit
f15a6367e2
3 changed files with 29 additions and 122 deletions
|
|
@ -16,37 +16,6 @@ import sys
|
||||||
from tests.integration.suites import SETUP_DEFINITIONS, SUITE_DEFINITIONS
|
from tests.integration.suites import SETUP_DEFINITIONS, SUITE_DEFINITIONS
|
||||||
|
|
||||||
|
|
||||||
def get_setup_env_vars(setup_name, suite_name=None):
|
|
||||||
"""
|
|
||||||
Get environment variables for a setup, with optional suite default fallback.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
setup_name: Name of the setup (e.g., 'ollama', 'gpt')
|
|
||||||
suite_name: Optional suite name to get default setup if setup_name is None
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Dictionary of environment variables
|
|
||||||
"""
|
|
||||||
# If no setup specified, try to get default from suite
|
|
||||||
if not setup_name and suite_name:
|
|
||||||
suite = SUITE_DEFINITIONS.get(suite_name)
|
|
||||||
if suite and suite.default_setup:
|
|
||||||
setup_name = suite.default_setup
|
|
||||||
|
|
||||||
if not setup_name:
|
|
||||||
return {}
|
|
||||||
|
|
||||||
setup = SETUP_DEFINITIONS.get(setup_name)
|
|
||||||
if not setup:
|
|
||||||
print(
|
|
||||||
f"Error: Unknown setup '{setup_name}'. Available: {', '.join(sorted(SETUP_DEFINITIONS.keys()))}",
|
|
||||||
file=sys.stderr,
|
|
||||||
)
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
return setup.env
|
|
||||||
|
|
||||||
|
|
||||||
def get_setup_config(setup_name, suite_name=None):
|
def get_setup_config(setup_name, suite_name=None):
|
||||||
"""
|
"""
|
||||||
Get full configuration (env vars + defaults) for a setup.
|
Get full configuration (env vars + defaults) for a setup.
|
||||||
|
|
@ -83,39 +52,23 @@ def main():
|
||||||
parser.add_argument("--setup", help="Setup name (e.g., ollama, gpt)")
|
parser.add_argument("--setup", help="Setup name (e.g., ollama, gpt)")
|
||||||
parser.add_argument("--suite", help="Suite name to get default setup from if --setup not provided")
|
parser.add_argument("--suite", help="Suite name to get default setup from if --setup not provided")
|
||||||
parser.add_argument("--format", choices=["bash", "json"], default="bash", help="Output format (default: bash)")
|
parser.add_argument("--format", choices=["bash", "json"], default="bash", help="Output format (default: bash)")
|
||||||
parser.add_argument(
|
|
||||||
"--include-defaults",
|
|
||||||
action="store_true",
|
|
||||||
help="Include setup defaults (text_model, embedding_model, etc.) in addition to env vars",
|
|
||||||
)
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
if args.include_defaults:
|
config = get_setup_config(args.setup, args.suite)
|
||||||
config = get_setup_config(args.setup, args.suite)
|
|
||||||
if args.format == "bash":
|
|
||||||
# Output env vars as bash export statements
|
|
||||||
for key, value in config["env"].items():
|
|
||||||
print(f"export {key}='{value}'")
|
|
||||||
# Output defaults as bash export statements with LLAMA_STACK_TEST_ prefix
|
|
||||||
for key, value in config["defaults"].items():
|
|
||||||
env_key = f"LLAMA_STACK_TEST_{key.upper()}"
|
|
||||||
print(f"export {env_key}='{value}'")
|
|
||||||
elif args.format == "json":
|
|
||||||
import json
|
|
||||||
|
|
||||||
print(json.dumps(config))
|
if args.format == "bash":
|
||||||
else:
|
# Output env vars as bash export statements
|
||||||
env_vars = get_setup_env_vars(args.setup, args.suite)
|
for key, value in config["env"].items():
|
||||||
|
print(f"export {key}='{value}'")
|
||||||
|
# Output defaults as bash export statements with LLAMA_STACK_TEST_ prefix
|
||||||
|
for key, value in config["defaults"].items():
|
||||||
|
env_key = f"LLAMA_STACK_TEST_{key.upper()}"
|
||||||
|
print(f"export {env_key}='{value}'")
|
||||||
|
elif args.format == "json":
|
||||||
|
import json
|
||||||
|
|
||||||
if args.format == "bash":
|
print(json.dumps(config))
|
||||||
# Output as bash export statements
|
|
||||||
for key, value in env_vars.items():
|
|
||||||
print(f"export {key}='{value}'")
|
|
||||||
elif args.format == "json":
|
|
||||||
import json
|
|
||||||
|
|
||||||
print(json.dumps(env_vars))
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|
|
||||||
|
|
@ -180,14 +180,10 @@ echo "Setting up environment variables:"
|
||||||
echo "$SETUP_ENV"
|
echo "$SETUP_ENV"
|
||||||
eval "$SETUP_ENV"
|
eval "$SETUP_ENV"
|
||||||
echo ""
|
echo ""
|
||||||
if [[ -n "$TEST_SETUP" ]]; then
|
|
||||||
# Export setup name - TypeScript tests will call get_setup_env.py themselves to get model defaults
|
|
||||||
export LLAMA_STACK_TEST_SETUP="$TEST_SETUP"
|
|
||||||
|
|
||||||
# Export model env vars for Python tests using get_setup_env.py
|
# Export suite and setup names for TypeScript tests
|
||||||
SETUP_DEFAULTS_ENV=$(PYTHONPATH=$THIS_DIR/.. python $THIS_DIR/get_setup_env.py --setup "$TEST_SETUP" --format bash --include-defaults)
|
export LLAMA_STACK_TEST_SUITE="$TEST_SUITE"
|
||||||
eval "$SETUP_DEFAULTS_ENV"
|
export LLAMA_STACK_TEST_SETUP="$TEST_SETUP"
|
||||||
fi
|
|
||||||
|
|
||||||
ROOT_DIR="$THIS_DIR/.."
|
ROOT_DIR="$THIS_DIR/.."
|
||||||
cd $ROOT_DIR
|
cd $ROOT_DIR
|
||||||
|
|
@ -255,18 +251,6 @@ run_client_ts_tests() {
|
||||||
# Then install the client from local directory
|
# Then install the client from local directory
|
||||||
echo "Installing llama-stack-client from: $TS_CLIENT_PATH"
|
echo "Installing llama-stack-client from: $TS_CLIENT_PATH"
|
||||||
npm install "$TS_CLIENT_PATH" --silent
|
npm install "$TS_CLIENT_PATH" --silent
|
||||||
|
|
||||||
# Verify installation
|
|
||||||
echo "Verifying llama-stack-client installation..."
|
|
||||||
if npm list llama-stack-client 2>/dev/null | grep -q llama-stack-client; then
|
|
||||||
echo "✅ llama-stack-client successfully installed"
|
|
||||||
npm list llama-stack-client
|
|
||||||
else
|
|
||||||
echo "❌ llama-stack-client not found in node_modules"
|
|
||||||
echo "Installed packages:"
|
|
||||||
npm list --depth=0
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
else
|
else
|
||||||
# It's an npm version specifier - install from npm
|
# It's an npm version specifier - install from npm
|
||||||
echo "Installing llama-stack-client@${TS_CLIENT_PATH} from npm"
|
echo "Installing llama-stack-client@${TS_CLIENT_PATH} from npm"
|
||||||
|
|
@ -276,23 +260,20 @@ run_client_ts_tests() {
|
||||||
else
|
else
|
||||||
npm install "llama-stack-client@${TS_CLIENT_PATH}" --silent
|
npm install "llama-stack-client@${TS_CLIENT_PATH}" --silent
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Verify installation
|
|
||||||
echo "Verifying llama-stack-client installation..."
|
|
||||||
if npm list llama-stack-client 2>/dev/null | grep -q llama-stack-client; then
|
|
||||||
echo "✅ llama-stack-client successfully installed"
|
|
||||||
npm list llama-stack-client
|
|
||||||
else
|
|
||||||
echo "❌ llama-stack-client not found in node_modules"
|
|
||||||
echo "Installed packages:"
|
|
||||||
npm list --depth=0
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Export env vars for the test runner to read suites.json
|
# Verify installation
|
||||||
export LLAMA_STACK_TEST_SUITE="$TEST_SUITE"
|
echo "Verifying llama-stack-client installation..."
|
||||||
# LLAMA_STACK_TEST_SETUP already exported earlier
|
if npm list llama-stack-client 2>/dev/null | grep -q llama-stack-client; then
|
||||||
|
echo "✅ llama-stack-client successfully installed"
|
||||||
|
npm list llama-stack-client
|
||||||
|
else
|
||||||
|
echo "❌ llama-stack-client not found in node_modules"
|
||||||
|
echo "Installed packages:"
|
||||||
|
npm list --depth=0
|
||||||
|
popd >/dev/null
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
echo "Running TypeScript tests for suite $TEST_SUITE (setup $TEST_SETUP)"
|
echo "Running TypeScript tests for suite $TEST_SUITE (setup $TEST_SETUP)"
|
||||||
npm test
|
npm test
|
||||||
|
|
|
||||||
|
|
@ -9,8 +9,6 @@
|
||||||
* This file mimics pytest's fixture system by providing shared test configuration.
|
* This file mimics pytest's fixture system by providing shared test configuration.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { execSync } from 'child_process';
|
|
||||||
import * as path from 'path';
|
|
||||||
import LlamaStackClient from 'llama-stack-client';
|
import LlamaStackClient from 'llama-stack-client';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -20,6 +18,8 @@ import LlamaStackClient from 'llama-stack-client';
|
||||||
function loadTestConfig() {
|
function loadTestConfig() {
|
||||||
const baseURL = process.env['TEST_API_BASE_URL'];
|
const baseURL = process.env['TEST_API_BASE_URL'];
|
||||||
const setupName = process.env['LLAMA_STACK_TEST_SETUP'];
|
const setupName = process.env['LLAMA_STACK_TEST_SETUP'];
|
||||||
|
const textModel = process.env['LLAMA_STACK_TEST_TEXT_MODEL'];
|
||||||
|
const embeddingModel = process.env['LLAMA_STACK_TEST_EMBEDDING_MODEL'];
|
||||||
|
|
||||||
if (!baseURL) {
|
if (!baseURL) {
|
||||||
throw new Error(
|
throw new Error(
|
||||||
|
|
@ -28,33 +28,6 @@ function loadTestConfig() {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// If setup is specified, load config from Python
|
|
||||||
let textModel = process.env['LLAMA_STACK_TEST_TEXT_MODEL'];
|
|
||||||
let embeddingModel = process.env['LLAMA_STACK_TEST_EMBEDDING_MODEL'];
|
|
||||||
|
|
||||||
if (setupName && !textModel) {
|
|
||||||
try {
|
|
||||||
// Call Python script to get setup configuration
|
|
||||||
const rootDir = path.resolve(__dirname, '../../..');
|
|
||||||
const scriptPath = path.join(rootDir, 'scripts/get_setup_env.py');
|
|
||||||
|
|
||||||
const configJson = execSync(
|
|
||||||
`cd ${rootDir} && PYTHONPATH=. python ${scriptPath} --setup ${setupName} --format json --include-defaults`,
|
|
||||||
{ encoding: 'utf-8' }
|
|
||||||
);
|
|
||||||
|
|
||||||
const config = JSON.parse(configJson);
|
|
||||||
|
|
||||||
// Map Python defaults to TypeScript env vars
|
|
||||||
if (config.defaults) {
|
|
||||||
textModel = config.defaults.text_model;
|
|
||||||
embeddingModel = config.defaults.embedding_model;
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
console.warn(`Warning: Failed to load config for setup "${setupName}":`, error);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
baseURL,
|
baseURL,
|
||||||
textModel,
|
textModel,
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue