Docker Compose for Ollama
Ollama local LLM server. Run Llama, Mistral, and other models locally.
ollama llm ai inference
compose.yaml
services:
ollama:
image: ollama/ollama:latest
ports:
- target: 11434
mode: host
healthcheck:
test:
- CMD
- curl
- -f
- http://localhost:11434/
interval: 30s
timeout: 10s
retries: 3
deploy:
resources:
reservations:
cpus: "4"
memory: 4G
restart: unless-stopped
Services
- ollama ollama/ollama:latest