Increase the cache TTL for loaded models

This commit is contained in:
Ohad Livne 2025-12-24 00:41:19 +02:00
parent 6f3c6f0409
commit 5716e68abd
Signed by: libohad-dev
GPG key ID: 34FDC68B51191A4D

View file

@ -4,6 +4,7 @@ Description=A local LLM server
[Container]
# keep-sorted start
ContainerName=ollama
Environment=OLLAMA_KEEP_ALIVE=10m
Image=docker.io/ollama/ollama:latest
Network=ollama.network
PodmanArgs=--transient-store