1
0
Fork 0
local-llm/docker-compose.nvidia.yml

25 lines
455 B
YAML

include:
- docker-compose.base.yml
services:
# Begin Ollama service
ollama:
image: ollama/ollama:latest
ports:
- 11434:11434
# begin for NVIDIA GPU support
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
# end of section for NVIDIA GPU support
volumes:
- ollama_data:/root/.ollama
volumes:
ollama_data: