fix: correct env var names and build TS client before install

This commit is contained in:
Ashwin Bharambe 2025-11-19 08:54:06 -08:00
parent 6d52a656df
commit afa663e683
6 changed files with 59 additions and 25 deletions

View file

@ -217,15 +217,15 @@ def test_asymmetric_embeddings(llama_stack_client, embedding_model_id):
TypeScript SDK tests can run alongside Python tests when testing against `server:<config>` stacks. Set `TS_CLIENT_PATH` to the path or version of `llama-stack-client-typescript` to enable:
```bash
# Use published npm package
# Use published npm package (responses suite)
TS_CLIENT_PATH=^0.3.2 scripts/integration-tests.sh --stack-config server:ci-tests --suite responses --setup gpt
# Use local checkout from ~/.cache (recommended for development)
git clone https://github.com/llamastack/llama-stack-client-typescript.git ~/.cache/llama-stack-client-typescript
TS_CLIENT_PATH=~/.cache/llama-stack-client-typescript scripts/integration-tests.sh --stack-config server:ci-tests --suite responses --setup gpt
# Use any local path
TS_CLIENT_PATH=/path/to/llama-stack-client-typescript scripts/integration-tests.sh --stack-config server:ci-tests --suite responses --setup gpt
# Run base suite with TypeScript tests
TS_CLIENT_PATH=~/.cache/llama-stack-client-typescript scripts/integration-tests.sh --stack-config server:ci-tests --suite base --setup ollama
```
TypeScript tests run immediately after Python tests pass, using the same replay fixtures. The mapping between Python suites/setups and TypeScript test files is defined in `tests/integration/client-typescript/suites.json`.

View file

@ -12,6 +12,7 @@
*/
import { createTestClient, requireTextModel } from '../setup';
import { getResponseOutputText } from 'llama-stack-client';
describe('Responses API - Basic', () => {
// Test cases matching llama-stack/tests/integration/responses/fixtures/test_cases.py
@ -46,7 +47,7 @@ describe('Responses API - Basic', () => {
});
// Verify response has content
const outputText = response.output_text.toLowerCase().trim();
const outputText = getResponseOutputText(response).toLowerCase().trim();
expect(outputText.length).toBeGreaterThan(0);
expect(outputText).toContain(expected.toLowerCase());
@ -58,7 +59,7 @@ describe('Responses API - Basic', () => {
// Verify stored response matches
const retrievedResponse = await client.responses.retrieve(response.id);
expect(retrievedResponse.output_text).toBe(response.output_text);
expect(getResponseOutputText(retrievedResponse)).toBe(getResponseOutputText(response));
// Test follow-up with previous_response_id
const nextResponse = await client.responses.create({
@ -66,7 +67,7 @@ describe('Responses API - Basic', () => {
input: 'Repeat your previous response in all caps.',
previous_response_id: response.id,
});
const nextOutputText = nextResponse.output_text.trim();
const nextOutputText = getResponseOutputText(nextResponse).trim();
expect(nextOutputText).toContain(expected.toUpperCase());
});
@ -104,7 +105,7 @@ describe('Responses API - Basic', () => {
expect(chunk.response.id).toBe(responseId);
// Verify content quality
const outputText = chunk.response.output_text.toLowerCase().trim();
const outputText = getResponseOutputText(chunk.response).toLowerCase().trim();
expect(outputText.length).toBeGreaterThan(0);
expect(outputText).toContain(expected.toLowerCase());
@ -127,6 +128,6 @@ describe('Responses API - Basic', () => {
// Verify stored response matches streamed response
const retrievedResponse = await client.responses.retrieve(responseId);
expect(retrievedResponse.output_text).toBe(lastEvent.response.output_text);
expect(getResponseOutputText(retrievedResponse)).toBe(getResponseOutputText(lastEvent.response));
});
});

View file

@ -29,7 +29,7 @@ function loadTestConfig() {
}
// If setup is specified, load config from Python
let textModel = process.env['LLAMA_STACK_TEST_MODEL'];
let textModel = process.env['LLAMA_STACK_TEST_TEXT_MODEL'];
let embeddingModel = process.env['LLAMA_STACK_TEST_EMBEDDING_MODEL'];
if (setupName && !textModel) {
@ -114,7 +114,8 @@ export function skipIfNoModel(modelType: 'text' | 'embedding'): typeof test {
const model = modelType === 'text' ? TEST_CONFIG.textModel : TEST_CONFIG.embeddingModel;
if (!model) {
const message = `Skipping: ${modelType} model not configured (set LLAMA_STACK_TEST_${modelType.toUpperCase()}_MODEL)`;
const envVar = modelType === 'text' ? 'LLAMA_STACK_TEST_TEXT_MODEL' : 'LLAMA_STACK_TEST_EMBEDDING_MODEL';
const message = `Skipping: ${modelType} model not configured (set ${envVar})`;
return test.skip.bind(test) as typeof test;
}
@ -128,7 +129,7 @@ export function skipIfNoModel(modelType: 'text' | 'embedding'): typeof test {
export function requireTextModel(): string {
if (!TEST_CONFIG.textModel) {
throw new Error(
'LLAMA_STACK_TEST_MODEL environment variable is required. ' +
'LLAMA_STACK_TEST_TEXT_MODEL environment variable is required. ' +
'Run tests using: ./scripts/integration-test.sh',
);
}

View file

@ -5,16 +5,8 @@
"files": ["__tests__/responses.test.ts"]
},
{
"suite": "responses",
"files": ["__tests__/responses.test.ts"]
},
{
"suite": "inference",
"suite": "base",
"setup": "ollama",
"files": ["__tests__/inference.test.ts"]
},
{
"suite": "inference",
"files": ["__tests__/inference.test.ts"]
}
]