From 12ea3cb442c2dc80eb7524f26801bb61ee578820 Mon Sep 17 00:00:00 2001 From: Ashwin Bharambe Date: Thu, 31 Jul 2025 10:21:06 -0700 Subject: [PATCH] setup env vars, fix ollama model reference --- .../actions/run-and-record-tests/action.yml | 33 ++++++++++++------- .github/actions/setup-ollama/action.yml | 2 +- .../ollama-with-models.containerfile} | 3 +- .../ollama-with-vision-model.containerfile} | 5 +-- 4 files changed, 28 insertions(+), 15 deletions(-) rename tests/{Containerfile => containers/ollama-with-models.containerfile} (80%) rename tests/{VisionContainerfile => containers/ollama-with-vision-model.containerfile} (59%) diff --git a/.github/actions/run-and-record-tests/action.yml b/.github/actions/run-and-record-tests/action.yml index c85b36e08..da78f6543 100644 --- a/.github/actions/run-and-record-tests/action.yml +++ b/.github/actions/run-and-record-tests/action.yml @@ -29,6 +29,28 @@ runs: free -h df -h + - name: Set environment variables + shell: bash + run: | + export LLAMA_STACK_CLIENT_TIMEOUT="300" + export LLAMA_STACK_TEST_INFERENCE_MODE="${{ inputs.inference-mode }}" + export LLAMA_STACK_TEST_RECORDING_DIR="tests/integration/recordings" + + # Configure provider-specific settings + if [ "${{ inputs.provider }}" == "ollama" ]; then + export OLLAMA_URL="http://0.0.0.0:11434" + export TEXT_MODEL="ollama/llama3.2:3b-instruct-fp16" + export SAFETY_MODEL="ollama/llama-guard3:1b" + else + export VLLM_URL="http://localhost:8000/v1" + export TEXT_MODEL="vllm/meta-llama/Llama-3.2-1B-Instruct" + fi + + if [ "${{ inputs.run-vision-tests }}" == "true" ]; then + export LLAMA_STACK_TEST_RECORDING_DIR="tests/integration/recordings/vision" + fi + + - name: Run Llama Stack Server if: ${{ contains(inputs.stack-config, 'server:') }} shell: bash @@ -51,30 +73,20 @@ runs: exit 1 - name: Run Integration Tests - env: - LLAMA_STACK_CLIENT_TIMEOUT: "300" shell: bash run: | stack_config="${{ inputs.stack-config }}" EXCLUDE_TESTS="builtin_tool or safety_with_image or code_interpreter or test_rag" - export LLAMA_STACK_TEST_INFERENCE_MODE="${{ inputs.inference-mode }}" - # Configure provider-specific settings if [ "${{ inputs.provider }}" == "ollama" ]; then - export OLLAMA_URL="http://0.0.0.0:11434" - export TEXT_MODEL="ollama/llama3.2:3b-instruct-fp16" - export SAFETY_MODEL="ollama/llama-guard3:1b" EXTRA_PARAMS="--safety-shield=llama-guard" else - export VLLM_URL="http://localhost:8000/v1" - export TEXT_MODEL="vllm/meta-llama/Llama-3.2-1B-Instruct" EXTRA_PARAMS="" EXCLUDE_TESTS="${EXCLUDE_TESTS} or test_inference_store_tool_calls" fi if [ "${{ inputs.run-vision-tests }}" == "true" ]; then - export LLAMA_STACK_TEST_RECORDING_DIR="tests/integration/recordings/vision" if uv run pytest -s -v tests/integration/inference/test_vision_inference.py --stack-config=${stack_config} \ -k "not( ${EXCLUDE_TESTS} )" \ --vision-model=ollama/llama3.2-vision:11b \ @@ -91,7 +103,6 @@ runs: fi # Run non-vision tests - export LLAMA_STACK_TEST_RECORDING_DIR="tests/integration/recordings" TEST_TYPES='${{ inputs.test-types }}' echo "Test types to run: $TEST_TYPES" diff --git a/.github/actions/setup-ollama/action.yml b/.github/actions/setup-ollama/action.yml index 7f8b96d68..e57876cb0 100644 --- a/.github/actions/setup-ollama/action.yml +++ b/.github/actions/setup-ollama/action.yml @@ -12,7 +12,7 @@ runs: shell: bash run: | if [ "${{ inputs.run-vision-tests }}" == "true" ]; then - image="ollama/ollama-with-vision-model" + image="ollama-with-vision-model" else image="ollama-with-models" fi diff --git a/tests/Containerfile b/tests/containers/ollama-with-models.containerfile similarity index 80% rename from tests/Containerfile rename to tests/containers/ollama-with-models.containerfile index d58216810..438fcec38 100644 --- a/tests/Containerfile +++ b/tests/containers/ollama-with-models.containerfile @@ -1,5 +1,6 @@ # Containerfile used to build our all in one ollama image to run tests in CI -# podman build --platform linux/amd64 -f Containerfile -t ollama-with-models . +# +# podman build --platform linux/amd64 -f ./ollama-with-models.containerfile -t ollama-with-models . # FROM --platform=linux/amd64 ollama/ollama:latest diff --git a/tests/VisionContainerfile b/tests/containers/ollama-with-vision-model.containerfile similarity index 59% rename from tests/VisionContainerfile rename to tests/containers/ollama-with-vision-model.containerfile index 952a86a95..c51bd568e 100644 --- a/tests/VisionContainerfile +++ b/tests/containers/ollama-with-vision-model.containerfile @@ -1,5 +1,6 @@ -# Containerfile used to build our all in one ollama image to run tests in CI -# podman build --platform linux/amd64 -f VisionContainerfile -t ollama-with-vision-model . +# Containerfile used to build our Ollama image with vision model to run tests in CI +# +# podman build --platform linux/amd64 -f ./ollama-with-vision-model.containerfile -t ollama-with-vision-model . # FROM --platform=linux/amd64 ollama/ollama:latest