services: ai: build: context: . dockerfile: Dockerfile ports: - "8012:8012" # STT server data. - "65432:65432" # TTS client server. volumes: - .:/app - ./models:/home/user/models - ./chat:/home/user/chat depends_on: - ollama command: ["bash", "-c", " sudo chown user:user -R /home/user/models && \ sudo chown user:user -R /home/user/chat && \ source venv/bin/activate && \ python /app/server.py \ "] stdin_open: true tty: true deploy: resources: reservations: devices: - driver: nvidia count: 1 capabilities: [gpu] ollama: volumes: - ./ollama/ollama:/root/.ollama image: ollama/ollama:latest ports: - 7869:11434 environment: - OLLAMA_KEEP_ALIVE=24h deploy: resources: reservations: devices: - driver: nvidia count: 1 capabilities: [gpu] ollama-webui: image: ghcr.io/open-webui/open-webui:main volumes: - ./ollama/ollama-webui:/app/backend/data depends_on: - ollama ports: - 8080:8080 environment: - OLLAMA_BASE_URLS=http://host.docker.internal:7869 - ENV=dev - WEBUI_AUTH=False - WEBUI_NAME=WebUI - WEBUI_URL=http://localhost:8080 - WEBUI_SECRET_KEY=t0p-s3cr3t extra_hosts: - host.docker.internal:host-gateway