feat(ci): add support for running vision inference tests

This commit is contained in:
Ashwin Bharambe 2025-07-30 12:37:50 -07:00
parent d6ae2b0f47
commit 0b02af792d
4 changed files with 227 additions and 0 deletions

13
tests/VisionContainerfile Normal file
View file

@ -0,0 +1,13 @@
# Containerfile used to build our all in one ollama image to run tests in CI
# podman build --platform linux/amd64 -f VisionContainerfile -t ollama-with-vision-model .
#
FROM --platform=linux/amd64 ollama/ollama:latest
# Start ollama and pull models in a single layer
RUN ollama serve & \
sleep 5 && \
ollama pull llama3.2-vision:11b && \
ollama pull all-minilm:l6-v2
# Set the entrypoint to start ollama serve
ENTRYPOINT ["ollama", "serve"]