feat(tests): add TypeScript client integration test support (#4185)

Integration tests can now validate the TypeScript SDK alongside Python
tests when running against server-mode stacks. Currently, this only adds
a _small_ number of tests. We should extend only if truly needed -- this
smoke check may be sufficient.

When `RUN_CLIENT_TS_TESTS=1` is set, the test script runs TypeScript
tests after Python tests pass. Tests are mapped via
`tests/integration/client-typescript/suites.json` which defines which
TypeScript test files correspond to each Python suite/setup combination.

The fact that we need exact "test_id"s (which are actually generated by
pytest) to be hardcoded inside the Typescript tests (so we hit the
recorded paths) is a big smell and it might become grating, but maybe
the benefit is worth it if we keep this test suite _small_ and targeted.

## Test Plan

Run with TypeScript tests enabled:
```bash
OPENAI_API_KEY=dummy RUN_CLIENT_TS_TESTS=1 \
  scripts/integration-tests.sh --stack-config server:ci-tests --suite responses --setup gpt
```
This commit is contained in:
Ashwin Bharambe 2025-11-19 10:07:53 -08:00 committed by GitHub
parent 4e9633f7c3
commit 40b11efac4
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
15 changed files with 6208 additions and 10 deletions

View file

@ -0,0 +1,35 @@
name: Setup TypeScript client
description: Conditionally checkout and link llama-stack-client-typescript based on client-version
inputs:
client-version:
description: 'Client version (latest or published)'
required: true
outputs:
ts-client-path:
description: 'Path or version to use for TypeScript client'
value: ${{ steps.set-path.outputs.ts-client-path }}
runs:
using: "composite"
steps:
- name: Checkout TypeScript client (latest)
if: ${{ inputs.client-version == 'latest' }}
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with:
repository: llamastack/llama-stack-client-typescript
ref: main
path: .ts-client-checkout
- name: Set TS_CLIENT_PATH
id: set-path
shell: bash
run: |
if [ "${{ inputs.client-version }}" = "latest" ]; then
echo "ts-client-path=${{ github.workspace }}/.ts-client-checkout" >> $GITHUB_OUTPUT
elif [ "${{ inputs.client-version }}" = "published" ]; then
echo "ts-client-path=^0.3.2" >> $GITHUB_OUTPUT
else
echo "::error::Invalid client-version: ${{ inputs.client-version }}"
exit 1
fi

View file

@ -93,11 +93,27 @@ jobs:
suite: ${{ matrix.config.suite }}
inference-mode: 'replay'
- name: Setup Node.js for TypeScript client tests
if: ${{ matrix.client == 'server' }}
uses: actions/setup-node@39370e3970a6d050c480ffad4ff0ed4d3fdee5af # v4.1.0
with:
node-version: '20'
cache: 'npm'
cache-dependency-path: tests/integration/client-typescript/package-lock.json
- name: Setup TypeScript client
if: ${{ matrix.client == 'server' }}
id: setup-ts-client
uses: ./.github/actions/setup-typescript-client
with:
client-version: ${{ matrix.client-version }}
- name: Run tests
if: ${{ matrix.config.allowed_clients == null || contains(matrix.config.allowed_clients, matrix.client) }}
uses: ./.github/actions/run-and-record-tests
env:
OPENAI_API_KEY: dummy
TS_CLIENT_PATH: ${{ steps.setup-ts-client.outputs.ts-client-path || '' }}
with:
stack-config: >-
${{ matrix.config.stack_config

2
.gitignore vendored
View file

@ -35,3 +35,5 @@ docs/static/imported-files/
docs/docs/api-deprecated/
docs/docs/api-experimental/
docs/docs/api/
tests/integration/client-typescript/node_modules/
.ts-client-checkout/

View file

@ -16,16 +16,16 @@ import sys
from tests.integration.suites import SETUP_DEFINITIONS, SUITE_DEFINITIONS
def get_setup_env_vars(setup_name, suite_name=None):
def get_setup_config(setup_name, suite_name=None):
"""
Get environment variables for a setup, with optional suite default fallback.
Get full configuration (env vars + defaults) for a setup.
Args:
setup_name: Name of the setup (e.g., 'ollama', 'gpt')
suite_name: Optional suite name to get default setup if setup_name is None
Returns:
Dictionary of environment variables
Dictionary with 'env' and 'defaults' keys
"""
# If no setup specified, try to get default from suite
if not setup_name and suite_name:
@ -34,7 +34,7 @@ def get_setup_env_vars(setup_name, suite_name=None):
setup_name = suite.default_setup
if not setup_name:
return {}
return {"env": {}, "defaults": {}}
setup = SETUP_DEFINITIONS.get(setup_name)
if not setup:
@ -44,27 +44,31 @@ def get_setup_env_vars(setup_name, suite_name=None):
)
sys.exit(1)
return setup.env
return {"env": setup.env, "defaults": setup.defaults}
def main():
parser = argparse.ArgumentParser(description="Extract environment variables from a test setup")
parser = argparse.ArgumentParser(description="Extract environment variables and defaults from a test setup")
parser.add_argument("--setup", help="Setup name (e.g., ollama, gpt)")
parser.add_argument("--suite", help="Suite name to get default setup from if --setup not provided")
parser.add_argument("--format", choices=["bash", "json"], default="bash", help="Output format (default: bash)")
args = parser.parse_args()
env_vars = get_setup_env_vars(args.setup, args.suite)
config = get_setup_config(args.setup, args.suite)
if args.format == "bash":
# Output as bash export statements
for key, value in env_vars.items():
# Output env vars as bash export statements
for key, value in config["env"].items():
print(f"export {key}='{value}'")
# Output defaults as bash export statements with LLAMA_STACK_TEST_ prefix
for key, value in config["defaults"].items():
env_key = f"LLAMA_STACK_TEST_{key.upper()}"
print(f"export {env_key}='{value}'")
elif args.format == "json":
import json
print(json.dumps(env_vars))
print(json.dumps(config))
if __name__ == "__main__":

View file

@ -181,6 +181,10 @@ echo "$SETUP_ENV"
eval "$SETUP_ENV"
echo ""
# Export suite and setup names for TypeScript tests
export LLAMA_STACK_TEST_SUITE="$TEST_SUITE"
export LLAMA_STACK_TEST_SETUP="$TEST_SETUP"
ROOT_DIR="$THIS_DIR/.."
cd $ROOT_DIR
@ -212,6 +216,71 @@ find_available_port() {
return 1
}
run_client_ts_tests() {
if ! command -v npm &>/dev/null; then
echo "npm could not be found; ensure Node.js is installed"
return 1
fi
pushd tests/integration/client-typescript >/dev/null
# Determine if TS_CLIENT_PATH is a directory path or an npm version
if [[ -d "$TS_CLIENT_PATH" ]]; then
# It's a directory path - use local checkout
if [[ ! -f "$TS_CLIENT_PATH/package.json" ]]; then
echo "Error: $TS_CLIENT_PATH exists but doesn't look like llama-stack-client-typescript (no package.json)"
popd >/dev/null
return 1
fi
echo "Using local llama-stack-client-typescript from: $TS_CLIENT_PATH"
# Build the TypeScript client first
echo "Building TypeScript client..."
pushd "$TS_CLIENT_PATH" >/dev/null
npm install --silent
npm run build --silent
popd >/dev/null
# Install other dependencies first
if [[ "${CI:-}" == "true" || "${CI:-}" == "1" ]]; then
npm ci --silent
else
npm install --silent
fi
# Then install the client from local directory
echo "Installing llama-stack-client from: $TS_CLIENT_PATH"
npm install "$TS_CLIENT_PATH" --silent
else
# It's an npm version specifier - install from npm
echo "Installing llama-stack-client@${TS_CLIENT_PATH} from npm"
if [[ "${CI:-}" == "true" || "${CI:-}" == "1" ]]; then
npm ci --silent
npm install "llama-stack-client@${TS_CLIENT_PATH}" --silent
else
npm install "llama-stack-client@${TS_CLIENT_PATH}" --silent
fi
fi
# Verify installation
echo "Verifying llama-stack-client installation..."
if npm list llama-stack-client 2>/dev/null | grep -q llama-stack-client; then
echo "✅ llama-stack-client successfully installed"
npm list llama-stack-client
else
echo "❌ llama-stack-client not found in node_modules"
echo "Installed packages:"
npm list --depth=0
popd >/dev/null
return 1
fi
echo "Running TypeScript tests for suite $TEST_SUITE (setup $TEST_SETUP)"
npm test
popd >/dev/null
}
# Start Llama Stack Server if needed
if [[ "$STACK_CONFIG" == *"server:"* && "$COLLECT_ONLY" == false ]]; then
# Find an available port for the server
@ -221,6 +290,7 @@ if [[ "$STACK_CONFIG" == *"server:"* && "$COLLECT_ONLY" == false ]]; then
exit 1
fi
export LLAMA_STACK_PORT
export TEST_API_BASE_URL="http://localhost:$LLAMA_STACK_PORT"
echo "Will use port: $LLAMA_STACK_PORT"
stop_server() {
@ -298,6 +368,7 @@ if [[ "$STACK_CONFIG" == *"docker:"* && "$COLLECT_ONLY" == false ]]; then
exit 1
fi
export LLAMA_STACK_PORT
export TEST_API_BASE_URL="http://localhost:$LLAMA_STACK_PORT"
echo "Will use port: $LLAMA_STACK_PORT"
echo "=== Building Docker Image for distribution: $DISTRO ==="
@ -506,5 +577,10 @@ else
exit 1
fi
# Run TypeScript client tests if TS_CLIENT_PATH is set
if [[ $exit_code -eq 0 && -n "${TS_CLIENT_PATH:-}" && "${LLAMA_STACK_TEST_STACK_CONFIG_TYPE:-}" == "server" ]]; then
run_client_ts_tests
fi
echo ""
echo "=== Integration Tests Complete ==="

View file

@ -211,3 +211,23 @@ def test_asymmetric_embeddings(llama_stack_client, embedding_model_id):
assert query_response.embeddings is not None
```
## TypeScript Client Replays
TypeScript SDK tests can run alongside Python tests when testing against `server:<config>` stacks. Set `TS_CLIENT_PATH` to the path or version of `llama-stack-client-typescript` to enable:
```bash
# Use published npm package (responses suite)
TS_CLIENT_PATH=^0.3.2 scripts/integration-tests.sh --stack-config server:ci-tests --suite responses --setup gpt
# Use local checkout from ~/.cache (recommended for development)
git clone https://github.com/llamastack/llama-stack-client-typescript.git ~/.cache/llama-stack-client-typescript
TS_CLIENT_PATH=~/.cache/llama-stack-client-typescript scripts/integration-tests.sh --stack-config server:ci-tests --suite responses --setup gpt
# Run base suite with TypeScript tests
TS_CLIENT_PATH=~/.cache/llama-stack-client-typescript scripts/integration-tests.sh --stack-config server:ci-tests --suite base --setup ollama
```
TypeScript tests run immediately after Python tests pass, using the same replay fixtures. The mapping between Python suites/setups and TypeScript test files is defined in `tests/integration/client-typescript/suites.json`.
If `TS_CLIENT_PATH` is unset, TypeScript tests are skipped entirely.

View file

@ -0,0 +1,104 @@
// Copyright (c) Meta Platforms, Inc. and affiliates.
// All rights reserved.
//
// This source code is licensed under the terms described in the LICENSE file in
// the root directory of this source tree.
/**
* Integration tests for Inference API (Chat Completions).
* Ported from: llama-stack/tests/integration/inference/test_openai_completion.py
*
* IMPORTANT: Test cases must match EXACTLY with Python tests to use recorded API responses.
*/
import { createTestClient, requireTextModel } from '../setup';
describe('Inference API - Chat Completions', () => {
// Test cases matching llama-stack/tests/integration/test_cases/inference/chat_completion.json
const chatCompletionTestCases = [
{
id: 'non_streaming_01',
question: 'Which planet do humans live on?',
expected: 'earth',
testId:
'tests/integration/inference/test_openai_completion.py::test_openai_chat_completion_non_streaming[client_with_models-txt=ollama/llama3.2:3b-instruct-fp16-inference:chat_completion:non_streaming_01]',
},
{
id: 'non_streaming_02',
question: 'Which planet has rings around it with a name starting with letter S?',
expected: 'saturn',
testId:
'tests/integration/inference/test_openai_completion.py::test_openai_chat_completion_non_streaming[client_with_models-txt=ollama/llama3.2:3b-instruct-fp16-inference:chat_completion:non_streaming_02]',
},
];
const streamingTestCases = [
{
id: 'streaming_01',
question: "What's the name of the Sun in latin?",
expected: 'sol',
testId:
'tests/integration/inference/test_openai_completion.py::test_openai_chat_completion_streaming[client_with_models-txt=ollama/llama3.2:3b-instruct-fp16-inference:chat_completion:streaming_01]',
},
{
id: 'streaming_02',
question: 'What is the name of the US captial?',
expected: 'washington',
testId:
'tests/integration/inference/test_openai_completion.py::test_openai_chat_completion_streaming[client_with_models-txt=ollama/llama3.2:3b-instruct-fp16-inference:chat_completion:streaming_02]',
},
];
test.each(chatCompletionTestCases)(
'chat completion non-streaming: $id',
async ({ question, expected, testId }) => {
const client = createTestClient(testId);
const textModel = requireTextModel();
const response = await client.chat.completions.create({
model: textModel,
messages: [
{
role: 'user',
content: question,
},
],
stream: false,
});
// Non-streaming responses have choices with message property
const choice = response.choices[0];
expect(choice).toBeDefined();
if (!choice || !('message' in choice)) {
throw new Error('Expected non-streaming response with message');
}
const content = choice.message.content;
expect(content).toBeDefined();
const messageContent = typeof content === 'string' ? content.toLowerCase().trim() : '';
expect(messageContent.length).toBeGreaterThan(0);
expect(messageContent).toContain(expected.toLowerCase());
},
);
test.each(streamingTestCases)('chat completion streaming: $id', async ({ question, expected, testId }) => {
const client = createTestClient(testId);
const textModel = requireTextModel();
const stream = await client.chat.completions.create({
model: textModel,
messages: [{ role: 'user', content: question }],
stream: true,
});
const streamedContent: string[] = [];
for await (const chunk of stream) {
if (chunk.choices && chunk.choices.length > 0 && chunk.choices[0]?.delta?.content) {
streamedContent.push(chunk.choices[0].delta.content);
}
}
expect(streamedContent.length).toBeGreaterThan(0);
const fullContent = streamedContent.join('').toLowerCase().trim();
expect(fullContent).toContain(expected.toLowerCase());
});
});

View file

@ -0,0 +1,132 @@
// Copyright (c) Meta Platforms, Inc. and affiliates.
// All rights reserved.
//
// This source code is licensed under the terms described in the LICENSE file in
// the root directory of this source tree.
/**
* Integration tests for Responses API.
* Ported from: llama-stack/tests/integration/responses/test_basic_responses.py
*
* IMPORTANT: Test cases and IDs must match EXACTLY with Python tests to use recorded API responses.
*/
import { createTestClient, requireTextModel, getResponseOutputText } from '../setup';
describe('Responses API - Basic', () => {
// Test cases matching llama-stack/tests/integration/responses/fixtures/test_cases.py
const basicTestCases = [
{
id: 'earth',
input: 'Which planet do humans live on?',
expected: 'earth',
// Use client_with_models fixture to match non-streaming recordings
testId:
'tests/integration/responses/test_basic_responses.py::test_response_non_streaming_basic[client_with_models-txt=openai/gpt-4o-earth]',
},
{
id: 'saturn',
input: 'Which planet has rings around it with a name starting with letter S?',
expected: 'saturn',
testId:
'tests/integration/responses/test_basic_responses.py::test_response_non_streaming_basic[client_with_models-txt=openai/gpt-4o-saturn]',
},
];
test.each(basicTestCases)('non-streaming basic response: $id', async ({ input, expected, testId }) => {
// Create client with test_id for all requests
const client = createTestClient(testId);
const textModel = requireTextModel();
// Create a response
const response = await client.responses.create({
model: textModel,
input,
stream: false,
});
// Verify response has content
const outputText = getResponseOutputText(response).toLowerCase().trim();
expect(outputText.length).toBeGreaterThan(0);
expect(outputText).toContain(expected.toLowerCase());
// Verify usage is reported
expect(response.usage).toBeDefined();
expect(response.usage!.input_tokens).toBeGreaterThan(0);
expect(response.usage!.output_tokens).toBeGreaterThan(0);
expect(response.usage!.total_tokens).toBe(response.usage!.input_tokens + response.usage!.output_tokens);
// Verify stored response matches
const retrievedResponse = await client.responses.retrieve(response.id);
expect(getResponseOutputText(retrievedResponse)).toBe(getResponseOutputText(response));
// Test follow-up with previous_response_id
const nextResponse = await client.responses.create({
model: textModel,
input: 'Repeat your previous response in all caps.',
previous_response_id: response.id,
});
const nextOutputText = getResponseOutputText(nextResponse).trim();
expect(nextOutputText).toContain(expected.toUpperCase());
});
test.each(basicTestCases)('streaming basic response: $id', async ({ input, expected, testId }) => {
// Modify test_id for streaming variant
const streamingTestId = testId.replace(
'test_response_non_streaming_basic',
'test_response_streaming_basic',
);
const client = createTestClient(streamingTestId);
const textModel = requireTextModel();
// Create a streaming response
const stream = await client.responses.create({
model: textModel,
input,
stream: true,
});
const events: any[] = [];
let responseId = '';
for await (const chunk of stream) {
events.push(chunk);
if (chunk.type === 'response.created') {
// Verify response.created is the first event
expect(events.length).toBe(1);
expect(chunk.response.status).toBe('in_progress');
responseId = chunk.response.id;
} else if (chunk.type === 'response.completed') {
// Verify response.completed comes after response.created
expect(events.length).toBeGreaterThanOrEqual(2);
expect(chunk.response.status).toBe('completed');
expect(chunk.response.id).toBe(responseId);
// Verify content quality
const outputText = getResponseOutputText(chunk.response).toLowerCase().trim();
expect(outputText.length).toBeGreaterThan(0);
expect(outputText).toContain(expected.toLowerCase());
// Verify usage is reported
expect(chunk.response.usage).toBeDefined();
expect(chunk.response.usage!.input_tokens).toBeGreaterThan(0);
expect(chunk.response.usage!.output_tokens).toBeGreaterThan(0);
expect(chunk.response.usage!.total_tokens).toBe(
chunk.response.usage!.input_tokens + chunk.response.usage!.output_tokens,
);
}
}
// Verify we got both events
expect(events.length).toBeGreaterThanOrEqual(2);
const firstEvent = events[0];
const lastEvent = events[events.length - 1];
expect(firstEvent.type).toBe('response.created');
expect(lastEvent.type).toBe('response.completed');
// Verify stored response matches streamed response
const retrievedResponse = await client.responses.retrieve(responseId);
expect(getResponseOutputText(retrievedResponse)).toBe(getResponseOutputText(lastEvent.response));
});
});

View file

@ -0,0 +1,31 @@
// Copyright (c) Meta Platforms, Inc. and affiliates.
// All rights reserved.
//
// This source code is licensed under the terms described in the LICENSE file in
// the root directory of this source tree.
/** @type {import('ts-jest').JestConfigWithTsJest} */
module.exports = {
preset: 'ts-jest/presets/default-esm',
testEnvironment: 'node',
extensionsToTreatAsEsm: ['.ts'],
moduleNameMapper: {
'^(\\.{1,2}/.*)\\.js$': '$1',
},
transform: {
'^.+\\.tsx?$': [
'ts-jest',
{
useESM: true,
tsconfig: {
module: 'ES2022',
moduleResolution: 'bundler',
},
},
],
},
testMatch: ['<rootDir>/__tests__/**/*.test.ts'],
setupFilesAfterEnv: ['<rootDir>/setup.ts'],
testTimeout: 60000, // 60 seconds (integration tests can be slow)
watchman: false, // Disable watchman to avoid permission issues
};

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,18 @@
{
"name": "llama-stack-typescript-integration-tests",
"version": "0.0.1",
"private": true,
"description": "TypeScript client integration tests for Llama Stack",
"scripts": {
"test": "node run-tests.js"
},
"devDependencies": {
"@swc/core": "^1.3.102",
"@swc/jest": "^0.2.29",
"@types/jest": "^29.4.0",
"@types/node": "^20.0.0",
"jest": "^29.4.0",
"ts-jest": "^29.1.0",
"typescript": "^5.0.0"
}
}

View file

@ -0,0 +1,63 @@
#!/usr/bin/env node
// Copyright (c) Meta Platforms, Inc. and affiliates.
// All rights reserved.
//
// This source code is licensed under the terms described in the LICENSE file in
// the root directory of this source tree.
/**
* Test runner that finds and executes TypeScript tests based on suite/setup mapping.
* Called by integration-tests.sh via npm test.
*/
const fs = require('fs');
const path = require('path');
const { execSync } = require('child_process');
const suite = process.env.LLAMA_STACK_TEST_SUITE;
const setup = process.env.LLAMA_STACK_TEST_SETUP || '';
if (!suite) {
console.error('Error: LLAMA_STACK_TEST_SUITE environment variable is required');
process.exit(1);
}
// Read suites.json to find matching test files
const suitesPath = path.join(__dirname, 'suites.json');
if (!fs.existsSync(suitesPath)) {
console.log(`No TypeScript tests configured (${suitesPath} not found)`);
process.exit(0);
}
const suites = JSON.parse(fs.readFileSync(suitesPath, 'utf-8'));
// Find matching entry
let testFiles = [];
for (const entry of suites) {
if (entry.suite !== suite) {
continue;
}
const entrySetup = entry.setup || '';
if (entrySetup && entrySetup !== setup) {
continue;
}
testFiles = entry.files || [];
break;
}
if (testFiles.length === 0) {
console.log(`No TypeScript integration tests mapped for suite ${suite} (setup ${setup})`);
process.exit(0);
}
console.log(`Running TypeScript tests for suite ${suite} (setup ${setup}): ${testFiles.join(', ')}`);
// Run Jest with the mapped test files
try {
execSync(`npx jest --config jest.integration.config.js ${testFiles.join(' ')}`, {
stdio: 'inherit',
cwd: __dirname,
});
} catch (error) {
process.exit(error.status || 1);
}

View file

@ -0,0 +1,162 @@
// Copyright (c) Meta Platforms, Inc. and affiliates.
// All rights reserved.
//
// This source code is licensed under the terms described in the LICENSE file in
// the root directory of this source tree.
/**
* Global setup for integration tests.
* This file mimics pytest's fixture system by providing shared test configuration.
*/
import LlamaStackClient from 'llama-stack-client';
/**
* Load test configuration from the Python setup system.
* This reads setup definitions from tests/integration/suites.py via get_setup_env.py.
*/
function loadTestConfig() {
const baseURL = process.env['TEST_API_BASE_URL'];
const setupName = process.env['LLAMA_STACK_TEST_SETUP'];
const textModel = process.env['LLAMA_STACK_TEST_TEXT_MODEL'];
const embeddingModel = process.env['LLAMA_STACK_TEST_EMBEDDING_MODEL'];
if (!baseURL) {
throw new Error(
'TEST_API_BASE_URL is required for integration tests. ' +
'Run tests using: ./scripts/integration-test.sh',
);
}
return {
baseURL,
textModel,
embeddingModel,
setupName,
};
}
// Read configuration from environment variables (set by scripts/integration-test.sh)
export const TEST_CONFIG = loadTestConfig();
// Validate required configuration
beforeAll(() => {
console.log('\n=== Integration Test Configuration ===');
console.log(`Base URL: ${TEST_CONFIG.baseURL}`);
console.log(`Setup: ${TEST_CONFIG.setupName || 'NOT SET'}`);
console.log(
`Text Model: ${TEST_CONFIG.textModel || 'NOT SET - tests requiring text model will be skipped'}`,
);
console.log(
`Embedding Model: ${
TEST_CONFIG.embeddingModel || 'NOT SET - tests requiring embedding model will be skipped'
}`,
);
console.log('=====================================\n');
});
/**
* Create a client instance for integration tests.
* Mimics pytest's `llama_stack_client` fixture.
*
* @param testId - Test ID to send in X-LlamaStack-Provider-Data header for replay mode.
* Format: "tests/integration/responses/test_basic_responses.py::test_name[params]"
*/
export function createTestClient(testId?: string): LlamaStackClient {
const headers: Record<string, string> = {};
// In server mode with replay, send test ID for recording isolation
if (process.env['LLAMA_STACK_TEST_STACK_CONFIG_TYPE'] === 'server' && testId) {
headers['X-LlamaStack-Provider-Data'] = JSON.stringify({
__test_id: testId,
});
}
return new LlamaStackClient({
baseURL: TEST_CONFIG.baseURL,
timeout: 60000, // 60 seconds
defaultHeaders: headers,
});
}
/**
* Skip test if required model is not configured.
* Mimics pytest's `skip_if_no_model` autouse fixture.
*/
export function skipIfNoModel(modelType: 'text' | 'embedding'): typeof test {
const model = modelType === 'text' ? TEST_CONFIG.textModel : TEST_CONFIG.embeddingModel;
if (!model) {
const envVar = modelType === 'text' ? 'LLAMA_STACK_TEST_TEXT_MODEL' : 'LLAMA_STACK_TEST_EMBEDDING_MODEL';
const message = `Skipping: ${modelType} model not configured (set ${envVar})`;
return test.skip.bind(test) as typeof test;
}
return test;
}
/**
* Get the configured text model, throwing if not set.
* Use this in tests that absolutely require a text model.
*/
export function requireTextModel(): string {
if (!TEST_CONFIG.textModel) {
throw new Error(
'LLAMA_STACK_TEST_TEXT_MODEL environment variable is required. ' +
'Run tests using: ./scripts/integration-test.sh',
);
}
return TEST_CONFIG.textModel;
}
/**
* Get the configured embedding model, throwing if not set.
* Use this in tests that absolutely require an embedding model.
*/
export function requireEmbeddingModel(): string {
if (!TEST_CONFIG.embeddingModel) {
throw new Error(
'LLAMA_STACK_TEST_EMBEDDING_MODEL environment variable is required. ' +
'Run tests using: ./scripts/integration-test.sh',
);
}
return TEST_CONFIG.embeddingModel;
}
/**
* Extracts aggregated text output from a ResponseObject.
* This concatenates all text content from the response's output array.
*
* Copied from llama-stack-client's response-helpers until it's available in published version.
*/
export function getResponseOutputText(response: any): string {
const pieces: string[] = [];
for (const output of response.output ?? []) {
if (!output || output.type !== 'message') {
continue;
}
const content = output.content;
if (typeof content === 'string') {
pieces.push(content);
continue;
}
if (!Array.isArray(content)) {
continue;
}
for (const item of content) {
if (typeof item === 'string') {
pieces.push(item);
continue;
}
if (item && item.type === 'output_text' && 'text' in item && typeof item.text === 'string') {
pieces.push(item.text);
}
}
}
return pieces.join('');
}

View file

@ -0,0 +1,12 @@
[
{
"suite": "responses",
"setup": "gpt",
"files": ["__tests__/responses.test.ts"]
},
{
"suite": "base",
"setup": "ollama",
"files": ["__tests__/inference.test.ts"]
}
]

View file

@ -0,0 +1,16 @@
{
"compilerOptions": {
"target": "ES2022",
"module": "ES2022",
"lib": ["ES2022"],
"moduleResolution": "bundler",
"esModuleInterop": true,
"allowSyntheticDefaultImports": true,
"strict": true,
"skipLibCheck": true,
"resolveJsonModule": true,
"types": ["jest", "node"]
},
"include": ["**/*.ts"],
"exclude": ["node_modules"]
}