version: '3.8' services: youlama: build: . ports: - "7860:7860" volumes: - .:/app - ./models:/app/models environment: - NVIDIA_VISIBLE_DEVICES=all - OLLAMA_HOST=host.docker.internal deploy: resources: reservations: devices: - driver: nvidia count: all capabilities: [gpu] extra_hosts: - "host.docker.internal:host-gateway" volumes: ollama_data: