name: auto-tests on: # pull_request: workflow_dispatch: inputs: commit_sha: description: 'Specific Commit SHA to trigger on' required: false default: $GITHUB_SHA # default to the last commit of $GITHUB_REF branch jobs: test-llama-stack-as-library: runs-on: ubuntu-latest env: TOGETHER_API_KEY: ${{ secrets.TOGETHER_API_KEY }} FIREWORKS_API_KEY: ${{ secrets.FIREWORKS_API_KEY }} TAVILY_SEARCH_API_KEY: ${{ secrets.TAVILY_SEARCH_API_KEY }} strategy: matrix: provider: [fireworks, together] steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: ref: ${{ github.event.inputs.commit_sha }} - name: Echo commit SHA run: | echo "Triggered on commit SHA: ${{ github.event.inputs.commit_sha }}" git rev-parse HEAD - name: Install dependencies run: | python -m pip install --upgrade pip pip install -r requirements.txt pytest pip install -e . - name: Build providers run: | llama stack build --template ${{ matrix.provider }} --image-type venv - name: Install the latest llama-stack-client & llama-models packages run: | pip install -e git+https://github.com/meta-llama/llama-stack-client-python.git#egg=llama-stack-client pip install -e git+https://github.com/meta-llama/llama-models.git#egg=llama-models - name: Run client-sdk test working-directory: "${{ github.workspace }}" env: REPORT_OUTPUT: md_report.md shell: bash run: | pip install --upgrade pytest-md-report echo "REPORT_FILE=${REPORT_OUTPUT}" >> "$GITHUB_ENV" export INFERENCE_MODEL=meta-llama/Llama-3.1-8B-Instruct LLAMA_STACK_CONFIG=./llama_stack/templates/${{ matrix.provider }}/run.yaml pytest --md-report --md-report-verbose=1 ./tests/client-sdk/inference/ --md-report-output "$REPORT_OUTPUT" - name: Output reports to the job summary if: always() shell: bash run: | if [ -f "$REPORT_FILE" ]; then echo "
Test Report for ${{ matrix.provider }} " >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY cat "$REPORT_FILE" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY echo "
" >> $GITHUB_STEP_SUMMARY fi