fix(ci): simplify integration tests replay mode (#2997)

We are going to split record and replay workflows completely to simplify
the concurrency key design.

We can add vision tests by just adding to our matrix.
This commit is contained in:
Ashwin Bharambe 2025-07-31 15:18:18 -07:00 committed by GitHub
parent 218c89fff1
commit f4489eeb83
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 19 additions and 220 deletions

View file

@ -192,7 +192,7 @@ runs:
if: ${{ always() }}
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: ${{ inputs.inference-mode }}-logs-${{ github.run_id }}-${{ github.run_attempt || '' }}-${{ inputs.provider }}-${{ inputs.run-vision-tests }}-${{ inputs.stack-config }}
name: logs-${{ github.run_id }}-${{ github.run_attempt || '' }}-${{ strategy.job-index }}
path: |
*.log
retention-days: 1

View file

@ -16,6 +16,9 @@ inputs:
description: 'Whether to setup provider for vision tests'
required: false
default: 'false'
inference-mode:
description: 'Inference mode (record or replay)'
required: true
runs:
using: 'composite'
@ -27,13 +30,13 @@ runs:
client-version: ${{ inputs.client-version }}
- name: Setup ollama
if: ${{ inputs.provider == 'ollama' }}
if: ${{ inputs.provider == 'ollama' && inputs.inference-mode == 'record' }}
uses: ./.github/actions/setup-ollama
with:
run-vision-tests: ${{ inputs.run-vision-tests }}
- name: Setup vllm
if: ${{ inputs.provider == 'vllm' }}
if: ${{ inputs.provider == 'vllm' && inputs.inference-mode == 'record' }}
uses: ./.github/actions/setup-vllm
- name: Build Llama Stack