mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-03 01:48:05 +00:00
fix: correct env var names and build TS client before install
This commit is contained in:
parent
6d52a656df
commit
afa663e683
6 changed files with 59 additions and 25 deletions
|
|
@ -33,4 +33,3 @@ runs:
|
||||||
echo "::error::Invalid client-version: ${{ inputs.client-version }}"
|
echo "::error::Invalid client-version: ${{ inputs.client-version }}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -237,15 +237,56 @@ run_client_ts_tests() {
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
echo "Using local llama-stack-client-typescript from: $TS_CLIENT_PATH"
|
echo "Using local llama-stack-client-typescript from: $TS_CLIENT_PATH"
|
||||||
npm install --install-links "$TS_CLIENT_PATH"
|
|
||||||
|
# Build the TypeScript client first
|
||||||
|
echo "Building TypeScript client..."
|
||||||
|
pushd "$TS_CLIENT_PATH" >/dev/null
|
||||||
|
npm install --silent
|
||||||
|
npm run build --silent
|
||||||
|
popd >/dev/null
|
||||||
|
|
||||||
|
# Install other dependencies first
|
||||||
|
if [[ "${CI:-}" == "true" || "${CI:-}" == "1" ]]; then
|
||||||
|
npm ci --silent
|
||||||
|
else
|
||||||
|
npm install --silent
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Then install the client from local directory
|
||||||
|
echo "Installing llama-stack-client from: $TS_CLIENT_PATH"
|
||||||
|
npm install "$TS_CLIENT_PATH" --silent
|
||||||
|
|
||||||
|
# Verify installation
|
||||||
|
echo "Verifying llama-stack-client installation..."
|
||||||
|
if npm list llama-stack-client 2>/dev/null | grep -q llama-stack-client; then
|
||||||
|
echo "✅ llama-stack-client successfully installed"
|
||||||
|
npm list llama-stack-client
|
||||||
|
else
|
||||||
|
echo "❌ llama-stack-client not found in node_modules"
|
||||||
|
echo "Installed packages:"
|
||||||
|
npm list --depth=0
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
else
|
else
|
||||||
# It's an npm version specifier - install from npm
|
# It's an npm version specifier - install from npm
|
||||||
echo "Installing llama-stack-client@${TS_CLIENT_PATH} from npm"
|
echo "Installing llama-stack-client@${TS_CLIENT_PATH} from npm"
|
||||||
if [[ "${CI:-}" == "true" || "${CI:-}" == "1" ]]; then
|
if [[ "${CI:-}" == "true" || "${CI:-}" == "1" ]]; then
|
||||||
npm ci
|
npm ci --silent
|
||||||
npm install "llama-stack-client@${TS_CLIENT_PATH}"
|
npm install "llama-stack-client@${TS_CLIENT_PATH}" --silent
|
||||||
else
|
else
|
||||||
npm install "llama-stack-client@${TS_CLIENT_PATH}"
|
npm install "llama-stack-client@${TS_CLIENT_PATH}" --silent
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Verify installation
|
||||||
|
echo "Verifying llama-stack-client installation..."
|
||||||
|
if npm list llama-stack-client 2>/dev/null | grep -q llama-stack-client; then
|
||||||
|
echo "✅ llama-stack-client successfully installed"
|
||||||
|
npm list llama-stack-client
|
||||||
|
else
|
||||||
|
echo "❌ llama-stack-client not found in node_modules"
|
||||||
|
echo "Installed packages:"
|
||||||
|
npm list --depth=0
|
||||||
|
return 1
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -217,15 +217,15 @@ def test_asymmetric_embeddings(llama_stack_client, embedding_model_id):
|
||||||
TypeScript SDK tests can run alongside Python tests when testing against `server:<config>` stacks. Set `TS_CLIENT_PATH` to the path or version of `llama-stack-client-typescript` to enable:
|
TypeScript SDK tests can run alongside Python tests when testing against `server:<config>` stacks. Set `TS_CLIENT_PATH` to the path or version of `llama-stack-client-typescript` to enable:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Use published npm package
|
# Use published npm package (responses suite)
|
||||||
TS_CLIENT_PATH=^0.3.2 scripts/integration-tests.sh --stack-config server:ci-tests --suite responses --setup gpt
|
TS_CLIENT_PATH=^0.3.2 scripts/integration-tests.sh --stack-config server:ci-tests --suite responses --setup gpt
|
||||||
|
|
||||||
# Use local checkout from ~/.cache (recommended for development)
|
# Use local checkout from ~/.cache (recommended for development)
|
||||||
git clone https://github.com/llamastack/llama-stack-client-typescript.git ~/.cache/llama-stack-client-typescript
|
git clone https://github.com/llamastack/llama-stack-client-typescript.git ~/.cache/llama-stack-client-typescript
|
||||||
TS_CLIENT_PATH=~/.cache/llama-stack-client-typescript scripts/integration-tests.sh --stack-config server:ci-tests --suite responses --setup gpt
|
TS_CLIENT_PATH=~/.cache/llama-stack-client-typescript scripts/integration-tests.sh --stack-config server:ci-tests --suite responses --setup gpt
|
||||||
|
|
||||||
# Use any local path
|
# Run base suite with TypeScript tests
|
||||||
TS_CLIENT_PATH=/path/to/llama-stack-client-typescript scripts/integration-tests.sh --stack-config server:ci-tests --suite responses --setup gpt
|
TS_CLIENT_PATH=~/.cache/llama-stack-client-typescript scripts/integration-tests.sh --stack-config server:ci-tests --suite base --setup ollama
|
||||||
```
|
```
|
||||||
|
|
||||||
TypeScript tests run immediately after Python tests pass, using the same replay fixtures. The mapping between Python suites/setups and TypeScript test files is defined in `tests/integration/client-typescript/suites.json`.
|
TypeScript tests run immediately after Python tests pass, using the same replay fixtures. The mapping between Python suites/setups and TypeScript test files is defined in `tests/integration/client-typescript/suites.json`.
|
||||||
|
|
|
||||||
|
|
@ -12,6 +12,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { createTestClient, requireTextModel } from '../setup';
|
import { createTestClient, requireTextModel } from '../setup';
|
||||||
|
import { getResponseOutputText } from 'llama-stack-client';
|
||||||
|
|
||||||
describe('Responses API - Basic', () => {
|
describe('Responses API - Basic', () => {
|
||||||
// Test cases matching llama-stack/tests/integration/responses/fixtures/test_cases.py
|
// Test cases matching llama-stack/tests/integration/responses/fixtures/test_cases.py
|
||||||
|
|
@ -46,7 +47,7 @@ describe('Responses API - Basic', () => {
|
||||||
});
|
});
|
||||||
|
|
||||||
// Verify response has content
|
// Verify response has content
|
||||||
const outputText = response.output_text.toLowerCase().trim();
|
const outputText = getResponseOutputText(response).toLowerCase().trim();
|
||||||
expect(outputText.length).toBeGreaterThan(0);
|
expect(outputText.length).toBeGreaterThan(0);
|
||||||
expect(outputText).toContain(expected.toLowerCase());
|
expect(outputText).toContain(expected.toLowerCase());
|
||||||
|
|
||||||
|
|
@ -58,7 +59,7 @@ describe('Responses API - Basic', () => {
|
||||||
|
|
||||||
// Verify stored response matches
|
// Verify stored response matches
|
||||||
const retrievedResponse = await client.responses.retrieve(response.id);
|
const retrievedResponse = await client.responses.retrieve(response.id);
|
||||||
expect(retrievedResponse.output_text).toBe(response.output_text);
|
expect(getResponseOutputText(retrievedResponse)).toBe(getResponseOutputText(response));
|
||||||
|
|
||||||
// Test follow-up with previous_response_id
|
// Test follow-up with previous_response_id
|
||||||
const nextResponse = await client.responses.create({
|
const nextResponse = await client.responses.create({
|
||||||
|
|
@ -66,7 +67,7 @@ describe('Responses API - Basic', () => {
|
||||||
input: 'Repeat your previous response in all caps.',
|
input: 'Repeat your previous response in all caps.',
|
||||||
previous_response_id: response.id,
|
previous_response_id: response.id,
|
||||||
});
|
});
|
||||||
const nextOutputText = nextResponse.output_text.trim();
|
const nextOutputText = getResponseOutputText(nextResponse).trim();
|
||||||
expect(nextOutputText).toContain(expected.toUpperCase());
|
expect(nextOutputText).toContain(expected.toUpperCase());
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
@ -104,7 +105,7 @@ describe('Responses API - Basic', () => {
|
||||||
expect(chunk.response.id).toBe(responseId);
|
expect(chunk.response.id).toBe(responseId);
|
||||||
|
|
||||||
// Verify content quality
|
// Verify content quality
|
||||||
const outputText = chunk.response.output_text.toLowerCase().trim();
|
const outputText = getResponseOutputText(chunk.response).toLowerCase().trim();
|
||||||
expect(outputText.length).toBeGreaterThan(0);
|
expect(outputText.length).toBeGreaterThan(0);
|
||||||
expect(outputText).toContain(expected.toLowerCase());
|
expect(outputText).toContain(expected.toLowerCase());
|
||||||
|
|
||||||
|
|
@ -127,6 +128,6 @@ describe('Responses API - Basic', () => {
|
||||||
|
|
||||||
// Verify stored response matches streamed response
|
// Verify stored response matches streamed response
|
||||||
const retrievedResponse = await client.responses.retrieve(responseId);
|
const retrievedResponse = await client.responses.retrieve(responseId);
|
||||||
expect(retrievedResponse.output_text).toBe(lastEvent.response.output_text);
|
expect(getResponseOutputText(retrievedResponse)).toBe(getResponseOutputText(lastEvent.response));
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
|
||||||
|
|
@ -29,7 +29,7 @@ function loadTestConfig() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// If setup is specified, load config from Python
|
// If setup is specified, load config from Python
|
||||||
let textModel = process.env['LLAMA_STACK_TEST_MODEL'];
|
let textModel = process.env['LLAMA_STACK_TEST_TEXT_MODEL'];
|
||||||
let embeddingModel = process.env['LLAMA_STACK_TEST_EMBEDDING_MODEL'];
|
let embeddingModel = process.env['LLAMA_STACK_TEST_EMBEDDING_MODEL'];
|
||||||
|
|
||||||
if (setupName && !textModel) {
|
if (setupName && !textModel) {
|
||||||
|
|
@ -114,7 +114,8 @@ export function skipIfNoModel(modelType: 'text' | 'embedding'): typeof test {
|
||||||
const model = modelType === 'text' ? TEST_CONFIG.textModel : TEST_CONFIG.embeddingModel;
|
const model = modelType === 'text' ? TEST_CONFIG.textModel : TEST_CONFIG.embeddingModel;
|
||||||
|
|
||||||
if (!model) {
|
if (!model) {
|
||||||
const message = `Skipping: ${modelType} model not configured (set LLAMA_STACK_TEST_${modelType.toUpperCase()}_MODEL)`;
|
const envVar = modelType === 'text' ? 'LLAMA_STACK_TEST_TEXT_MODEL' : 'LLAMA_STACK_TEST_EMBEDDING_MODEL';
|
||||||
|
const message = `Skipping: ${modelType} model not configured (set ${envVar})`;
|
||||||
return test.skip.bind(test) as typeof test;
|
return test.skip.bind(test) as typeof test;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -128,7 +129,7 @@ export function skipIfNoModel(modelType: 'text' | 'embedding'): typeof test {
|
||||||
export function requireTextModel(): string {
|
export function requireTextModel(): string {
|
||||||
if (!TEST_CONFIG.textModel) {
|
if (!TEST_CONFIG.textModel) {
|
||||||
throw new Error(
|
throw new Error(
|
||||||
'LLAMA_STACK_TEST_MODEL environment variable is required. ' +
|
'LLAMA_STACK_TEST_TEXT_MODEL environment variable is required. ' +
|
||||||
'Run tests using: ./scripts/integration-test.sh',
|
'Run tests using: ./scripts/integration-test.sh',
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -5,16 +5,8 @@
|
||||||
"files": ["__tests__/responses.test.ts"]
|
"files": ["__tests__/responses.test.ts"]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"suite": "responses",
|
"suite": "base",
|
||||||
"files": ["__tests__/responses.test.ts"]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"suite": "inference",
|
|
||||||
"setup": "ollama",
|
"setup": "ollama",
|
||||||
"files": ["__tests__/inference.test.ts"]
|
"files": ["__tests__/inference.test.ts"]
|
||||||
},
|
|
||||||
{
|
|
||||||
"suite": "inference",
|
|
||||||
"files": ["__tests__/inference.test.ts"]
|
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue