Mailcow: Move to Nix and Docker-Compose.
diff --git a/images.nix b/images.nix
new file mode 100644
index 0000000..186dcf1
--- /dev/null
+++ b/images.nix
@@ -0,0 +1,417 @@
+{ system ? builtins.currentSystem }:
+let
+  pkgs = import <nixpkgs> { inherit system; };
+
+in
+let
+  img = spec: {
+    streamed = pkgs.dockerTools.streamLayeredImage spec;
+    layered = pkgs.dockerTools.buildLayeredImage spec;
+    image = pkgs.dockerTools.buildImage spec;
+  };
+
+in
+{
+
+  #  ejabberd = pkgs.dockerTools.buildImage {
+  #    name = "docker.benkard.de/mulk/ejabberd";
+  #    tag = "latest";
+  #    contents = [
+  #      pkgs.ejabberd
+  #      pkgs.bash
+  #      pkgs.nano
+  #    ];
+  #    config = {
+  #      Env = [ ];
+  #      ExposedPorts = { };
+  #      WorkingDir = "/";
+  #      Volumes = {
+  #        "/data" = { };
+  #      };
+  #    };
+  #  };
+
+  prosody = img {
+    name = "docker.benkard.de/mulk/prosody";
+    #tag = "latest";
+    contents = with pkgs; [
+      prosody
+      bash
+      coreutils
+      nano
+    ];
+    config = {
+      Entrypoint = [ "/bin/bash" ];
+      Cmd = [ ];
+      Env = [ ];
+      ExposedPorts = { };
+      WorkingDir = "/";
+      Volumes = {
+        "/data" = { };
+      };
+    };
+  };
+
+  mailcow =
+    let
+      dockerComposeOverrideYaml =
+        pkgs.writeTextDir "docker-compose.override.yml" ''
+          services:
+            mysql-mailcow:
+              image: alpine/socat:1.0.3
+              command:
+                - UNIX-LISTEN:/var/run/mysqld/mysqld.sock,reuseaddr,fork,unlink-early,mode=0777
+                - TCP-CONNECT:mysql.system.svc.cluster.local.:3306
+              volumes:
+                - mysql-socket-vol-1:/var/run/mysqld/:Z
+              restart: always
+
+          volumes:
+            vmail-vol-1:                {driver: local, driver_opts: {o: bind, type: none, device: "/vol/vmail"}}
+            vmail-index-vol-1:          {driver: local, driver_opts: {o: bind, type: none, device: "/vol/vmail-index"}}
+            mysql-vol-1:                {driver: local, driver_opts: {o: bind, type: none, device: "/run/mysql"}}
+            mysql-socket-vol-1:         {driver: local, driver_opts: {o: bind, type: none, device: "/run/mysql-socket"}}
+            redis-vol-1:                {driver: local, driver_opts: {o: bind, type: none, device: "/vol/redis-data"}}
+            rspamd-vol-1:               {driver: local, driver_opts: {o: bind, type: none, device: "/vol/rspamd-data"}}
+            solr-vol-1:                 {driver: local, driver_opts: {o: bind, type: none, device: "/vol/solr-data"}}
+            postfix-vol-1:              {driver: local, driver_opts: {o: bind, type: none, device: "/vol/postfix-data"}}
+            crypt-vol-1:                {driver: local, driver_opts: {o: bind, type: none, device: "/vol/crypt-data"}}
+            sogo-web-vol-1:             {driver: local, driver_opts: {o: bind, type: none, device: "/vol/sogo-web"}}
+            sogo-userdata-backup-vol-1: {driver: local, driver_opts: {o: bind, type: none, device: "/vol/sogo-userdata-backup"}}
+        '';
+
+      init =
+        pkgs.writeShellScriptBin "init" ''
+          set -xeuo pipefail
+
+          if ! [ -e /vol/docker-data/docker.ext4 ]; then
+              ${pkgs.busybox}/bin/dd if=/dev/zero of=/vol/docker-data/docker.ext4 bs=1G count=0 seek=30
+              ${pkgs.e2fsprogs}/bin/mkfs.ext4 /vol/docker-data/docker.ext4
+          fi
+          ${pkgs.e2fsprogs}/bin/e2fsck -y /vol/docker-data/docker.ext4
+          ${pkgs.busybox}/bin/mkdir -p /var/lib/docker
+          ${pkgs.busybox}/bin/mount -o loop,rw /vol/docker-data/docker.ext4 /var/lib/docker
+
+          ${pkgs.docker}/bin/dockerd --storage-driver=overlay2 &
+          sleep 10s
+
+          ${pkgs.docker}/bin/docker kill $(${pkgs.docker}/bin/docker ps -a -q) || :
+          ${pkgs.docker}/bin/docker system prune --volumes --force || :
+
+          ${pkgs.busybox}/bin/mkdir -p /tmp /run/{mysql,mysql-socket}
+          exec ${pkgs.docker-compose}/bin/docker-compose --env-file /mailcow-dockerized/mailcow.conf -f /mailcow-dockerized/docker-compose.yml -f ${dockerComposeOverrideYaml}/docker-compose.override.yml up --remove-orphans
+        '';
+
+      src = ./mailcow/src;
+
+      extraDeps = with pkgs; [
+        # for Docker
+        cacert
+
+        # for update.sh
+        bash
+        coreutils
+        curl
+        docker
+        docker-compose
+        findutils
+        gawk
+        gitMinimal
+      ];
+
+      maintenanceDeps = with pkgs; [
+        bash
+        busybox
+        coreutils
+        findutils
+        pxattr
+        strace
+      ];
+    in
+    img {
+      name = "docker.benkard.de/mulk/mailcow";
+      tag = "latest";
+      maxLayers = 125;
+      contents = extraDeps ++ maintenanceDeps;
+      extraCommands =
+        ''
+          #!${pkgs.runtimeShell}
+
+          install -dm755 vol/{crypt-data,postfix-data,redis-data,rspamd-data,sogo-web,sogo-userdata-backup,solr-data,vmail,vmail-index,web-data}
+
+          cp -a ${src}/* .
+        '';
+      config = {
+        Entrypoint = [ "${init}/bin/init" ];
+        Cmd = [ ];
+        Workdir = "/mailcow-dockerized";
+        Volumes = {
+          "/mailcow-dockerized/data/conf" = { };
+          "/mailcow-dockerized/data/assets/ssl" = { };
+          "/vol/crypt-data" = { };
+          "/vol/docker-data" = { };
+          "/vol/postfix-data" = { };
+          "/vol/redis-data" = { };
+          "/vol/rspamd-data" = { };
+          "/vol/sogo-web" = { };
+          "/vol/sogo-userdata-backup" = { };
+          "/vol/solr-data" = { };
+          "/vol/vmail" = { };
+          "/vol/vmail-index" = { };
+          "/vol/web-data" = { };
+        };
+      };
+    };
+
+  nextcloud = img {
+    name = "docker.benkard.de/mulk/nextcloud";
+    contents =
+      let
+        baseDependencies = with pkgs; [
+          # Service dependencies.
+          apacheHttpd
+          apacheHttpdPackages.php
+
+          # Optional dependencies.
+          ffmpeg
+
+          # Maintenance and manual upgrades.
+          bash
+          coreutils
+          php
+          unzip
+        ];
+
+        phpModules = with pkgs.php74Extensions; [
+          # Required dependencies.
+          ctype
+          curl
+          dom
+          gd
+          iconv
+          json
+          mbstring
+          openssl
+          pdo_pgsql
+          posix
+          session
+          simplexml
+          xml
+          xmlreader
+          xmlwriter
+          zip
+          zlib
+
+          # Recommended dependencies.
+          bz2
+          intl
+          fileinfo
+
+          # Optional dependencies.
+          apcu
+          bcmath
+          ftp
+          gmp
+          imagick
+          memcached
+          pcntl
+          redis
+          #smbclient
+        ];
+      in
+      baseDependencies ++ phpModules;
+    config = {
+      WorkingDir = "/var/www/html";
+      Volumes = {
+        "/var/www/html" = { };
+      };
+    };
+  };
+
+  webcron = img {
+    name = "docker.benkard.de/mulk/webcron";
+    contents =
+      with pkgs; [
+        # Entry points.
+        curl
+      ];
+    config = {
+      Entrypoint = [ "curl" "-fsS" ];
+      Cmd = [ ];
+      Volumes = { };
+    };
+  };
+
+  samba =
+    let
+      runner =
+        pkgs.stdenv.mkDerivation {
+          name = "mulk-samba-runner";
+          buildInputs = with pkgs; [ bash ];
+          src = ./samba;
+          builder = builtins.toFile "builder.sh" ''
+            source $stdenv/setup
+            set -euo pipefail
+            set -x
+
+            install -Dm755 $src/init $out/init
+
+            for svc in avahi dbus nmbd smbd; do
+                install -Dm755 $src/service/$svc/run $out/service/$svc/run
+            done
+
+            set +x
+          '';
+        };
+
+    in
+    img {
+      name = "docker.benkard.de/mulk/samba";
+      contents = with pkgs; [
+        # Services.
+        avahi
+        dbus
+        #samba4Full
+        (samba.override { enableMDNS = true; enableProfiling = false; enableRegedit = false; })
+
+        # Control.
+        execline
+        gnused
+        runner
+        s6
+
+        # Maintenance.
+        busybox
+      ];
+      extraCommands =
+        let
+          dbusSystemConf =
+            builtins.toFile "dbus-1-system.conf" ''
+              <!DOCTYPE busconfig PUBLIC "-//freedesktop//DTD D-Bus Bus Configuration 1.0//EN"
+                                         "http://www.freedesktop.org/standards/dbus/1.0/busconfig.dtd">
+              <busconfig>
+                <type>system</type>
+                <auth>ANONYMOUS</auth>
+                <!-- <auth>EXTERNAL</auth> -->
+                <allow_anonymous/>
+                <listen>unix:path=/run/dbus/system_bus_socket</listen>
+                <standard_system_servicedirs/>
+
+                <policy context="default">
+                  <allow user="*"/>
+
+                  <deny own="*"/>
+                  <deny send_type="method_call"/>
+              
+                  <allow send_type="signal"/>
+                  <allow send_requested_reply="true" send_type="method_return"/>
+                  <allow send_requested_reply="true" send_type="error"/>
+              
+                  <allow receive_type="method_call"/>
+                  <allow receive_type="method_return"/>
+                  <allow receive_type="error"/>
+                  <allow receive_type="signal"/>
+              
+                  <allow send_destination="org.freedesktop.DBus"
+                         send_interface="org.freedesktop.DBus" />
+                  <allow send_destination="org.freedesktop.DBus"
+                         send_interface="org.freedesktop.DBus.Introspectable"/>
+                  <allow send_destination="org.freedesktop.DBus"
+                         send_interface="org.freedesktop.DBus.Properties"/>
+
+                  <deny send_destination="org.freedesktop.DBus"
+                        send_interface="org.freedesktop.DBus"
+                        send_member="UpdateActivationEnvironment"/>
+                  <deny send_destination="org.freedesktop.DBus"
+                        send_interface="org.freedesktop.DBus.Debug.Stats"/>
+                  <deny send_destination="org.freedesktop.DBus"
+                        send_interface="org.freedesktop.systemd1.Activator"/>
+                </policy>
+
+                <policy context="default">
+                  <allow own="org.freedesktop.Avahi"/>
+                </policy>
+
+                <includedir>/share/dbus-1/system.d</includedir>
+              </busconfig>
+            '';
+
+          avahiDaemonConf =
+            builtins.toFile "avahi-daemon.conf" ''
+              [server]
+              use-ipv4=yes
+              use-ipv6=yes
+              enable-dbus=yes
+              ratelimit-interval-usec=1000000
+              ratelimit-burst=1000
+              
+              [wide-area]
+              enable-wide-area=no
+              
+              [publish]
+              add-service-cookie=no
+              publish-addresses=no
+              publish-hinfo=no
+              publish-workstation=no
+              publish-domain=no
+              publish-aaaa-on-ipv4=yes
+              publish-a-on-ipv6=no
+              
+              [reflector]
+              
+              [rlimits]
+            '';
+
+          group =
+            builtins.toFile "group" ''
+              dbus::997:
+              avahi::998:
+            '';
+
+          passwd =
+            builtins.toFile "passwd" ''
+              dbus::997:997::/tmp:/nonexistent
+              avahi::998:998::/tmp:/nonexistent
+              nobody::999:999::/tmp:/nonexistent
+            '';
+        in
+        ''
+          #!${pkgs.runtimeShell}
+
+          rm -rf -- etc/avahi/services/*
+
+          install -dm755 tmp run run/dbus var/run/samba var/log/samba var/lock/samba var/locks/samba var/lib/samba/private var/cache/samba
+
+          touch var/lib/samba/registry.tdb var/lib/samba/account_policy.tdb
+
+          install -Dm644 ${dbusSystemConf} etc/dbus-1/system.conf
+          install -Dm644 ${avahiDaemonConf} etc/avahi/avahi-daemon.conf
+          install -Dm644 ${group} etc/group
+          install -Dm644 ${passwd} etc/passwd
+        '';
+      config = {
+        Entrypoint = [ "/init" ];
+        Cmd = [ ];
+        Volumes = {
+          "/vol/shares" = { };
+        };
+      };
+    };
+
+  #  nano = img {
+  #    name = "docker.benkard.de/mulk/nano";
+  #    tag = "latest";
+  #    contents = [
+  #      pkgs.nano
+  #    ];
+  #  };
+  #
+  #  vim = img {
+  #    name = "docker.benkard.de/mulk/vim";
+  #    tag = "latest";
+  #    contents = [
+  #      pkgs.vim
+  #    ];
+  #  };
+
+}
diff --git a/mailcow/mailcow.yaml b/mailcow/mailcow.yaml
new file mode 100644
index 0000000..a403259
--- /dev/null
+++ b/mailcow/mailcow.yaml
@@ -0,0 +1,475 @@
+---
+apiVersion: extensions/v1beta1
+kind: Ingress
+metadata:
+  name: mailcow
+  namespace: mulk
+  labels:
+    name: mailcow
+    k8s-app: mailcow
+  annotations:
+    kubernetes.io/ingress.class: traefik
+    traefik.ingress.kubernetes.io/preserve-host: "true"
+spec:
+  rules:
+    - host: mail.benkard.de
+      http:
+        paths:
+          - path: /
+            backend:
+              serviceName: mailcow
+              servicePort: 80
+    - host: autodiscover.benkard.de
+      http:
+        paths:
+          - path: /
+            backend:
+              serviceName: mailcow
+              servicePort: 80
+
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: mailcow-pub
+  namespace: mulk
+  labels:
+    name: mailcow-pub
+    k8s-app: mailcow
+spec:
+  selector:
+    name: mailcow
+  type: NodePort
+  externalTrafficPolicy: Local
+  ports:
+    - name: smtp-alt
+      port: 31025
+      targetPort: 25
+      protocol: TCP
+      nodePort: 31025
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: mailcow
+  namespace: mulk
+  labels:
+    name: mailcow
+    k8s-app: mailcow
+spec:
+  selector:
+    name: mailcow
+  type: ClusterIP
+  ports:
+    - name: http
+      port: 80
+      targetPort: 80
+      protocol: TCP
+
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: mailcow
+  namespace: mulk
+  labels:
+    name: mailcow
+    k8s-app: mailcow
+
+spec:
+  replicas: 1
+
+  strategy:
+    rollingUpdate:
+      maxSurge: 1
+      maxUnavailable: 1
+
+  selector:
+    matchLabels:
+      k8s-app: mailcow
+      name: mailcow
+
+  template:
+    metadata:
+      labels:
+        name: mailcow
+        k8s-app: mailcow
+
+    spec:
+      imagePullSecrets:
+        - name: portus-token
+
+      runtimeClassName: kata
+
+      containers:
+        - name: master
+          image: docker.benkard.de/mulk/mailcow:latest
+
+          securityContext:
+            # In a Kata container, this only gives the container full
+            # access to the guest VM rather than the host.  (To ensure
+            # this, it is important to set privileged_without_host_devices
+            # = true in the [plugins.cri.containerd.runtimes.kata] section
+            # of containerd's config.toml.)
+            privileged: true
+
+          env:
+            - name: COMPOSE_HTTP_TIMEOUT
+              value: "600"
+
+          ports:
+            - name: http
+              containerPort: 80
+            - name: smtp
+              hostPort: 25
+              containerPort: 25
+            - name: pop
+              hostPort: 110
+              containerPort: 110
+            - name: imap
+              hostPort: 143
+              containerPort: 143
+            - name: smtps
+              hostPort: 465
+              containerPort: 465
+            - name: submission
+              hostPort: 587
+              containerPort: 587
+            - name: imaps
+              hostPort: 993
+              containerPort: 993
+            - name: pops
+              hostPort: 995
+              containerPort: 995
+            - name: sieve
+              hostPort: 4190
+              containerPort: 4190
+            - name: doveadm
+              hostPort: 19991
+              containerPort: 12345
+
+          volumeMounts:
+            # Configuration data.
+            - name: assets
+              subPath: ssl
+              mountPath: /mailcow-dockerized/data/assets/ssl
+            - name: config
+              mountPath: /mailcow-dockerized/data/conf
+            - name: secrets
+              subPath: mailcow.conf
+              mountPath: /mailcow-dockerized/mailcow.conf
+
+            # State.
+            - name: crypt-data
+              mountPath: /vol/crypt-data
+            - name: postfix-data
+              mountPath: /vol/postfix-data
+            - name: redis-data
+              mountPath: /vol/redis-data
+            - name: rspamd-data
+              mountPath: /vol/rspamd-data
+            - name: solr-data
+              mountPath: /vol/solr-data
+            - name: sogo-web
+              mountPath: /vol/sogo-web
+            - name: sogo-userdata-backup
+              mountPath: /vol/sogo-userdata-backup
+            - name: vmail
+              mountPath: /vol/vmail
+            - name: vmail-index
+              mountPath: /vol/vmail-index
+            - name: web-data
+              mountPath: /vol/web-data
+            #- name: docker-data
+            #  subPath: vfs
+            #  mountPath: /var/lib/docker/vfs
+            #- name: docker-data
+            #  subPath: image
+            #  mountPath: /var/lib/docker/image
+            #- name: docker-data
+            #  subPath: overlay2
+            #  mountPath: /var/lib/docker/overlay2
+            #- name: docker-data
+            #  mountPath: /var/lib/docker
+            - name: docker-data
+              mountPath: /vol/docker-data
+
+      volumes:
+        - name: assets
+          persistentVolumeClaim:
+            claimName: mailcow-assets
+        - name: config
+          persistentVolumeClaim:
+            claimName: mailcow-config-v2
+        - name: crypt-data
+          persistentVolumeClaim:
+            claimName: mailcow-crypt
+        - name: postfix-data
+          persistentVolumeClaim:
+            claimName: mailcow-postfix
+        - name: redis-data
+          persistentVolumeClaim:
+            claimName: mailcow-redis
+        - name: rspamd-data
+          persistentVolumeClaim:
+            claimName: mailcow-rspamd
+        - name: solr-data
+          persistentVolumeClaim:
+            claimName: mailcow-solr
+        - name: sogo-web
+          persistentVolumeClaim:
+            claimName: mailcow-sogo-web
+        - name: sogo-userdata-backup
+          persistentVolumeClaim:
+            claimName: mailcow-sogo-userdata-backup
+        - name: vmail
+          persistentVolumeClaim:
+            claimName: mailcow-vmail
+        - name: vmail-index
+          persistentVolumeClaim:
+            claimName: mailcow-vmail-index
+        - name: web-data
+          persistentVolumeClaim:
+            claimName: mailcow-web
+        - name: docker-data
+          persistentVolumeClaim:
+            claimName: mailcow-docker
+        - name: secrets
+          secret:
+            secretName: mailcow-secrets
+
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: mailcow-web
+  namespace: mulk
+  labels:
+    k8s-app: mailcow
+  annotations:
+    volume.beta.kubernetes.io/storage-provisioner: rancher.io/local-path
+    volume.kubernetes.io/selected-node: ifirn
+spec:
+  storageClassName: local-path
+  accessModes:
+    - ReadWriteOnce
+  resources:
+    requests:
+      storage: 1Gi
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: mailcow-docker
+  namespace: mulk
+  labels:
+    k8s-app: mailcow
+  annotations:
+    volume.beta.kubernetes.io/storage-provisioner: rancher.io/local-path
+    volume.kubernetes.io/selected-node: ifirn
+spec:
+  storageClassName: local-path
+  accessModes:
+    - ReadWriteOnce
+  resources:
+    requests:
+      storage: 1Gi
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: mailcow-assets
+  namespace: mulk
+  labels:
+    k8s-app: mailcow
+  annotations:
+    volume.beta.kubernetes.io/storage-provisioner: rancher.io/local-path
+    volume.kubernetes.io/selected-node: ifirn
+spec:
+  storageClassName: local-path
+  accessModes:
+    - ReadWriteOnce
+  resources:
+    requests:
+      storage: 1Gi
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: mailcow-solr
+  namespace: mulk
+  labels:
+    k8s-app: mailcow
+  annotations:
+    volume.beta.kubernetes.io/storage-provisioner: rancher.io/local-path
+    volume.kubernetes.io/selected-node: ifirn
+spec:
+  storageClassName: local-path
+  accessModes:
+    - ReadWriteOnce
+  resources:
+    requests:
+      storage: 1Gi
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: mailcow-sogo-web
+  namespace: mulk
+  labels:
+    k8s-app: mailcow
+  annotations:
+    volume.beta.kubernetes.io/storage-provisioner: rancher.io/local-path
+    volume.kubernetes.io/selected-node: ifirn
+spec:
+  storageClassName: local-path
+  accessModes:
+    - ReadWriteOnce
+  resources:
+    requests:
+      storage: 1Gi
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: mailcow-sogo-userdata-backup
+  namespace: mulk
+  labels:
+    k8s-app: mailcow
+  annotations:
+    volume.beta.kubernetes.io/storage-provisioner: rancher.io/local-path
+    volume.kubernetes.io/selected-node: ifirn
+spec:
+  storageClassName: local-path
+  accessModes:
+    - ReadWriteOnce
+  resources:
+    requests:
+      storage: 1Gi
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: mailcow-vmail
+  namespace: mulk
+  labels:
+    k8s-app: mailcow
+  annotations:
+    volume.beta.kubernetes.io/storage-provisioner: rancher.io/local-path
+    volume.kubernetes.io/selected-node: ifirn
+spec:
+  storageClassName: local-path
+  accessModes:
+    - ReadWriteOnce
+  resources:
+    requests:
+      storage: 1Gi
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: mailcow-vmail-index
+  namespace: mulk
+  labels:
+    k8s-app: mailcow
+  annotations:
+    volume.beta.kubernetes.io/storage-provisioner: rancher.io/local-path
+    volume.kubernetes.io/selected-node: ifirn
+spec:
+  storageClassName: local-path
+  accessModes:
+    - ReadWriteOnce
+  resources:
+    requests:
+      storage: 1Gi
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: mailcow-redis
+  namespace: mulk
+  labels:
+    k8s-app: mailcow
+  annotations:
+    volume.beta.kubernetes.io/storage-provisioner: rancher.io/local-path
+    volume.kubernetes.io/selected-node: ifirn
+spec:
+  storageClassName: local-path
+  accessModes:
+    - ReadWriteOnce
+  resources:
+    requests:
+      storage: 1Gi
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: mailcow-rspamd
+  namespace: mulk
+  labels:
+    k8s-app: mailcow
+  annotations:
+    volume.beta.kubernetes.io/storage-provisioner: rancher.io/local-path
+    volume.kubernetes.io/selected-node: ifirn
+spec:
+  storageClassName: local-path
+  accessModes:
+    - ReadWriteOnce
+  resources:
+    requests:
+      storage: 1Gi
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: mailcow-postfix
+  namespace: mulk
+  labels:
+    k8s-app: mailcow
+  annotations:
+    volume.beta.kubernetes.io/storage-provisioner: rancher.io/local-path
+    volume.kubernetes.io/selected-node: ifirn
+spec:
+  storageClassName: local-path
+  accessModes:
+    - ReadWriteOnce
+  resources:
+    requests:
+      storage: 1Gi
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: mailcow-crypt
+  namespace: mulk
+  labels:
+    k8s-app: mailcow
+  annotations:
+    volume.beta.kubernetes.io/storage-provisioner: rancher.io/local-path
+    volume.kubernetes.io/selected-node: ifirn
+spec:
+  storageClassName: local-path
+  accessModes:
+    - ReadWriteOnce
+  resources:
+    requests:
+      storage: 1Gi
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: mailcow-config-v2
+  namespace: mulk
+  labels:
+    k8s-app: mailcow
+  annotations:
+    volume.beta.kubernetes.io/storage-provisioner: rancher.io/local-path
+    volume.kubernetes.io/selected-node: ifirn
+spec:
+  storageClassName: local-path
+  accessModes:
+    - ReadWriteOnce
+  resources:
+    requests:
+      storage: 1Gi
+---
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/dockerapi/dockerapi.py b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dockerapi/dockerapi.py
index 20e9d0e..32824a2 100644
--- a/mailcow/src/mailcow-dockerized/data/Dockerfiles/dockerapi/dockerapi.py
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dockerapi/dockerapi.py
@@ -227,29 +227,11 @@
 
   # api call: container_post - post_action: exec - cmd: system - task: mysql_upgrade
   def container_post__exec__system__mysql_upgrade(self, container_id):
-    for container in docker_client.containers.list(filters={"id": container_id}):
-      sql_return = container.exec_run(["/bin/bash", "-c", "/usr/bin/mysql_upgrade -uroot -p'" + os.environ['DBROOT'].replace("'", "'\\''") + "'\n"], user='mysql')
-      if sql_return.exit_code == 0:
-        matched = False
-        for line in sql_return.output.decode('utf-8').split("\n"):
-          if 'is already upgraded to' in line:
-            matched = True
-        if matched:
-          return jsonify(type='success', msg='mysql_upgrade: already upgraded', text=sql_return.output.decode('utf-8'))
-        else:
-          container.restart()
-          return jsonify(type='warning', msg='mysql_upgrade: upgrade was applied', text=sql_return.output.decode('utf-8'))
-      else:
-        return jsonify(type='error', msg='mysql_upgrade: error running command', text=sql_return.output.decode('utf-8'))
+    return jsonify(type='success', msg='mysql_upgrade: not touching fake MySQL', text='')
 
   # api call: container_post - post_action: exec - cmd: system - task: mysql_tzinfo_to_sql
   def container_post__exec__system__mysql_tzinfo_to_sql(self, container_id):
-    for container in docker_client.containers.list(filters={"id": container_id}):
-      sql_return = container.exec_run(["/bin/bash", "-c", "/usr/bin/mysql_tzinfo_to_sql /usr/share/zoneinfo | /bin/sed 's/Local time zone must be set--see zic manual page/FCTY/' | /usr/bin/mysql -uroot -p'" + os.environ['DBROOT'].replace("'", "'\\''") + "' mysql \n"], user='mysql')
-      if sql_return.exit_code == 0:
-        return jsonify(type='info', msg='mysql_tzinfo_to_sql: command completed successfully', text=sql_return.output.decode('utf-8'))
-      else:
-        return jsonify(type='error', msg='mysql_tzinfo_to_sql: error running command', text=sql_return.output.decode('utf-8'))
+    return jsonify(type='success', msg='mysql_tzinfo_to_sql: not touching fake MySQL', text='')
 
   # api call: container_post - post_action: exec - cmd: reload - task: dovecot
   def container_post__exec__reload__dovecot(self, container_id):
@@ -284,7 +266,7 @@
   def container_post__exec__sieve__print(self, container_id):
     if 'username' in request.json and 'script_name' in request.json:
       for container in docker_client.containers.list(filters={"id": container_id}):
-        cmd = ["/bin/bash", "-c", "/usr/bin/doveadm sieve get -u '" + request.json['username'].replace("'", "'\\''") + "' '" + request.json['script_name'].replace("'", "'\\''") + "'"]  
+        cmd = ["/bin/bash", "-c", "/usr/bin/doveadm sieve get -u '" + request.json['username'].replace("'", "'\\''") + "' '" + request.json['script_name'].replace("'", "'\\''") + "'"]
         sieve_return = container.exec_run(cmd)
         return exec_run_handler('utf8_text_only', sieve_return)
 
diff --git a/mailcow/src/mailcow-dockerized/data/conf/postfix/main.cf b/mailcow/src/mailcow-dockerized/data/conf/postfix/main.cf
index e8da794..3e4b2b1 100644
--- a/mailcow/src/mailcow-dockerized/data/conf/postfix/main.cf
+++ b/mailcow/src/mailcow-dockerized/data/conf/postfix/main.cf
@@ -16,7 +16,7 @@
 alias_maps = hash:/etc/aliases
 alias_database = hash:/etc/aliases
 relayhost =
-mynetworks = 127.0.0.0/8 [::ffff:127.0.0.0]/104 [::1]/128 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 [fe80::]/10 [fc00::]/7
+mynetworks = 127.0.0.0/8 [::ffff:127.0.0.0]/104 [::1]/128 172.22.1.0/24 [fe80::]/10 [fc00::]/7
 mailbox_size_limit = 0
 recipient_delimiter = +
 inet_interfaces = all
diff --git a/mailcow/src/mailcow-dockerized/docker-compose.yml b/mailcow/src/mailcow-dockerized/docker-compose.yml
index 2c9c93a..ff1e5eb 100644
--- a/mailcow/src/mailcow-dockerized/docker-compose.yml
+++ b/mailcow/src/mailcow-dockerized/docker-compose.yml
@@ -552,7 +552,7 @@
     driver: bridge
     driver_opts:
       com.docker.network.bridge.name: br-mailcow
-    enable_ipv6: true
+    enable_ipv6: false
     ipam:
       driver: default
       config:
diff --git a/makefile b/makefile
new file mode 100644
index 0000000..a4ca869
--- /dev/null
+++ b/makefile
@@ -0,0 +1,41 @@
+RM_F = rm -f
+
+DOCKER = docker
+NIX_DOCKER = $(DOCKER) run --rm --mount source=type=volume,source=nix,target=/nix --volume $$(pwd):$$(pwd) --workdir $$(pwd) nixos/nix
+
+#NIX_BUILD = vctl run -it --rm --volume $$(pwd)/nix:/nix --volume $$(pwd):$$(pwd) --workdir $$(pwd) nixos/nix nix-build
+NIX_BUILD = $(NIX_DOCKER) nix-build
+NIX_BUILD_OUT = $(NIX_DOCKER) sh -c '$$(nix-build)'
+
+IMAGES = mailcow nano nextcloud prosody samba vim webcron
+IMAGES_BIN = $(patsubst %,%.bin,$(IMAGES))
+IMAGES_LOAD = $(patsubst %,%.load,$(IMAGES))
+
+.PHONY: all images clean #$(IMAGES_LOAD)
+
+all: images
+
+clean:
+	$(RM_F) $(IMAGES_BIN)
+
+images: $(IMAGES_LOAD)
+
+# The following approaches were all considered and discarded:
+#
+# Takes ~240 seconds (nextcloud image):
+#
+#   $(IMAGES):
+#   	$(NIX_DOCKER) sh -c '$$(nix-build images.nix -A $@.streamed)' | $(DOCKER) load
+#   
+# Takes ~60 seconds (nextcloud image):
+#
+#   $(IMAGES):
+#   	$(NIX_DOCKER) sh -c 'cat $$(nix-build images.nix -A $@.layered)' | $(DOCKER) load
+#
+# The final approach takes ~20 seconds to build the nextcloud image.
+
+%.bin: images.nix
+	$(NIX_DOCKER) sh -c 'cp $$(nix-build images.nix -A $(patsubst %.bin,%,$@).layered) $@'
+
+%.load: %.bin
+	$(DOCKER) load --input=$<