use GITHUB_ENV

This commit is contained in:
Ashwin Bharambe 2025-07-31 10:35:56 -07:00
parent 12ea3cb442
commit 6360f2a424

View file

@ -32,25 +32,25 @@ runs:
- name: Set environment variables - name: Set environment variables
shell: bash shell: bash
run: | run: |
export LLAMA_STACK_CLIENT_TIMEOUT="300" echo "LLAMA_STACK_CLIENT_TIMEOUT=300" >> $GITHUB_ENV
export LLAMA_STACK_TEST_INFERENCE_MODE="${{ inputs.inference-mode }}" echo "LLAMA_STACK_TEST_INFERENCE_MODE=${{ inputs.inference-mode }}" >> $GITHUB_ENV
export LLAMA_STACK_TEST_RECORDING_DIR="tests/integration/recordings"
# Configure provider-specific settings # Configure provider-specific settings
if [ "${{ inputs.provider }}" == "ollama" ]; then if [ "${{ inputs.provider }}" == "ollama" ]; then
export OLLAMA_URL="http://0.0.0.0:11434" echo "OLLAMA_URL=http://0.0.0.0:11434" >> $GITHUB_ENV
export TEXT_MODEL="ollama/llama3.2:3b-instruct-fp16" echo "TEXT_MODEL=ollama/llama3.2:3b-instruct-fp16" >> $GITHUB_ENV
export SAFETY_MODEL="ollama/llama-guard3:1b" echo "SAFETY_MODEL=ollama/llama-guard3:1b" >> $GITHUB_ENV
else else
export VLLM_URL="http://localhost:8000/v1" echo "VLLM_URL=http://localhost:8000/v1" >> $GITHUB_ENV
export TEXT_MODEL="vllm/meta-llama/Llama-3.2-1B-Instruct" echo "TEXT_MODEL=vllm/meta-llama/Llama-3.2-1B-Instruct" >> $GITHUB_ENV
fi fi
if [ "${{ inputs.run-vision-tests }}" == "true" ]; then if [ "${{ inputs.run-vision-tests }}" == "true" ]; then
export LLAMA_STACK_TEST_RECORDING_DIR="tests/integration/recordings/vision" echo "LLAMA_STACK_TEST_RECORDING_DIR=tests/integration/recordings/vision" >> $GITHUB_ENV
else
echo "LLAMA_STACK_TEST_RECORDING_DIR=tests/integration/recordings" >> $GITHUB_ENV
fi fi
- name: Run Llama Stack Server - name: Run Llama Stack Server
if: ${{ contains(inputs.stack-config, 'server:') }} if: ${{ contains(inputs.stack-config, 'server:') }}
shell: bash shell: bash