mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-22 18:46:16 +00:00
13 lines
457 B
Text
13 lines
457 B
Text
# Containerfile used to build our all in one ollama image to run tests in CI
|
|
# podman build --platform linux/amd64 -f VisionContainerfile -t ollama-with-vision-model .
|
|
#
|
|
FROM --platform=linux/amd64 ollama/ollama:latest
|
|
|
|
# Start ollama and pull models in a single layer
|
|
RUN ollama serve & \
|
|
sleep 5 && \
|
|
ollama pull llama3.2-vision:11b && \
|
|
ollama pull all-minilm:l6-v2
|
|
|
|
# Set the entrypoint to start ollama serve
|
|
ENTRYPOINT ["ollama", "serve"]
|