diff --git a/.github/actions/run-and-record-tests/action.yml b/.github/actions/run-and-record-tests/action.yml index da78f6543..834528685 100644 --- a/.github/actions/run-and-record-tests/action.yml +++ b/.github/actions/run-and-record-tests/action.yml @@ -32,25 +32,25 @@ runs: - name: Set environment variables shell: bash run: | - export LLAMA_STACK_CLIENT_TIMEOUT="300" - export LLAMA_STACK_TEST_INFERENCE_MODE="${{ inputs.inference-mode }}" - export LLAMA_STACK_TEST_RECORDING_DIR="tests/integration/recordings" + echo "LLAMA_STACK_CLIENT_TIMEOUT=300" >> $GITHUB_ENV + echo "LLAMA_STACK_TEST_INFERENCE_MODE=${{ inputs.inference-mode }}" >> $GITHUB_ENV # Configure provider-specific settings if [ "${{ inputs.provider }}" == "ollama" ]; then - export OLLAMA_URL="http://0.0.0.0:11434" - export TEXT_MODEL="ollama/llama3.2:3b-instruct-fp16" - export SAFETY_MODEL="ollama/llama-guard3:1b" + echo "OLLAMA_URL=http://0.0.0.0:11434" >> $GITHUB_ENV + echo "TEXT_MODEL=ollama/llama3.2:3b-instruct-fp16" >> $GITHUB_ENV + echo "SAFETY_MODEL=ollama/llama-guard3:1b" >> $GITHUB_ENV else - export VLLM_URL="http://localhost:8000/v1" - export TEXT_MODEL="vllm/meta-llama/Llama-3.2-1B-Instruct" + echo "VLLM_URL=http://localhost:8000/v1" >> $GITHUB_ENV + echo "TEXT_MODEL=vllm/meta-llama/Llama-3.2-1B-Instruct" >> $GITHUB_ENV fi if [ "${{ inputs.run-vision-tests }}" == "true" ]; then - export LLAMA_STACK_TEST_RECORDING_DIR="tests/integration/recordings/vision" + echo "LLAMA_STACK_TEST_RECORDING_DIR=tests/integration/recordings/vision" >> $GITHUB_ENV + else + echo "LLAMA_STACK_TEST_RECORDING_DIR=tests/integration/recordings" >> $GITHUB_ENV fi - - name: Run Llama Stack Server if: ${{ contains(inputs.stack-config, 'server:') }} shell: bash