diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 2157fb6a9..42e170a28 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -2,7 +2,6 @@ name: auto-tests on: pull_request: - push: workflow_dispatch: jobs: @@ -14,20 +13,30 @@ jobs: TAVILY_SEARCH_API_KEY: ${{ secrets.TAVILY_SEARCH_API_KEY }} strategy: matrix: - provider: [fireworks, ollama] + provider: [fireworks, together] steps: - uses: actions/checkout@v4 + - name: Echo branch name run: echo "Running on branch ${{ github.ref }}" + - name: Install dependencies run: | python -m pip install --upgrade pip pip install -r requirements.txt pytest pip install -e . + - name: Build providers run: | llama stack build --template ${{ matrix.provider }} --image-type venv + + - name: Install the latest llama-stack-client & llama-models packages + run: | + pip install -e git+https://github.com/meta-llama/llama-stack-client-python.git#egg=llama-stack-client + pip install -e git+https://github.com/meta-llama/llama-models.git#egg=llama-models + - name: Run client-sdk test working-directory: "${{ github.workspace }}" - run: + run: | + export INFERENCE_MODEL=meta-llama/Llama-3.1-8B-Instruct LLAMA_STACK_CONFIG=./llama_stack/templates/${{ matrix.provider }}/run.yaml pytest ./tests/client-sdk/inference/test_inference.py