git subrepo clone https://github.com/mailcow/mailcow-dockerized.git mailcow/src/mailcow-dockerized
subrepo: subdir: "mailcow/src/mailcow-dockerized"
merged: "a832becb"
upstream: origin: "https://github.com/mailcow/mailcow-dockerized.git"
branch: "master"
commit: "a832becb"
git-subrepo: version: "0.4.3"
origin: "???"
commit: "???"
Change-Id: If5be2d621a211e164c9b6577adaa7884449f16b5
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/acme/Dockerfile b/mailcow/src/mailcow-dockerized/data/Dockerfiles/acme/Dockerfile
new file mode 100644
index 0000000..8369ce3
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/acme/Dockerfile
@@ -0,0 +1,28 @@
+FROM alpine:3.11
+
+LABEL maintainer "Andre Peters <andre.peters@servercow.de>"
+
+RUN apk upgrade --no-cache \
+ && apk add --update --no-cache \
+ bash \
+ curl \
+ openssl \
+ bind-tools \
+ jq \
+ mariadb-client \
+ redis \
+ tini \
+ tzdata \
+ python3 \
+ && python3 -m pip install --upgrade pip \
+ && python3 -m pip install acme-tiny
+
+COPY acme.sh /srv/acme.sh
+COPY functions.sh /srv/functions.sh
+COPY obtain-certificate.sh /srv/obtain-certificate.sh
+COPY reload-configurations.sh /srv/reload-configurations.sh
+COPY expand6.sh /srv/expand6.sh
+
+RUN chmod +x /srv/*.sh
+
+CMD ["/sbin/tini", "-g", "--", "/srv/acme.sh"]
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/acme/acme.sh b/mailcow/src/mailcow-dockerized/data/Dockerfiles/acme/acme.sh
new file mode 100755
index 0000000..5d5da1e
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/acme/acme.sh
@@ -0,0 +1,422 @@
+#!/bin/bash
+set -o pipefail
+exec 5>&1
+
+# Do not attempt to write to slave
+if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
+ export REDIS_CMDLINE="redis-cli -h ${REDIS_SLAVEOF_IP} -p ${REDIS_SLAVEOF_PORT}"
+else
+ export REDIS_CMDLINE="redis-cli -h redis -p 6379"
+fi
+
+until [[ $(${REDIS_CMDLINE} PING) == "PONG" ]]; do
+ echo "Waiting for Redis..."
+ sleep 2
+done
+
+source /srv/functions.sh
+# Thanks to https://github.com/cvmiller -> https://github.com/cvmiller/expand6
+source /srv/expand6.sh
+
+# Skipping IP check when we like to live dangerously
+if [[ "${SKIP_IP_CHECK}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
+ SKIP_IP_CHECK=y
+fi
+
+# Skipping HTTP check when we like to live dangerously
+if [[ "${SKIP_HTTP_VERIFICATION}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
+ SKIP_HTTP_VERIFICATION=y
+fi
+
+# Request certificate for MAILCOW_HOSTNAME only
+if [[ "${ONLY_MAILCOW_HOSTNAME}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
+ ONLY_MAILCOW_HOSTNAME=y
+fi
+
+# Request individual certificate for every domain
+if [[ "${ENABLE_SSL_SNI}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
+ ENABLE_SSL_SNI=y
+fi
+
+if [[ "${SKIP_LETS_ENCRYPT}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
+ log_f "SKIP_LETS_ENCRYPT=y, skipping Let's Encrypt..."
+ sleep 365d
+ exec $(readlink -f "$0")
+fi
+
+log_f "Waiting for Docker API..."
+until ping dockerapi -c1 > /dev/null; do
+ sleep 1
+done
+log_f "Docker API OK"
+
+log_f "Waiting for Postfix..."
+until ping postfix -c1 > /dev/null; do
+ sleep 1
+done
+log_f "Postfix OK"
+
+log_f "Waiting for Dovecot..."
+until ping dovecot -c1 > /dev/null; do
+ sleep 1
+done
+log_f "Dovecot OK"
+
+ACME_BASE=/var/lib/acme
+SSL_EXAMPLE=/var/lib/ssl-example
+
+mkdir -p ${ACME_BASE}/acme
+
+# Migrate
+[[ -f ${ACME_BASE}/acme/private/privkey.pem ]] && mv ${ACME_BASE}/acme/private/privkey.pem ${ACME_BASE}/acme/key.pem
+[[ -f ${ACME_BASE}/acme/private/account.key ]] && mv ${ACME_BASE}/acme/private/account.key ${ACME_BASE}/acme/account.pem
+if [[ -f ${ACME_BASE}/acme/key.pem && -f ${ACME_BASE}/acme/cert.pem ]]; then
+ if verify_hash_match ${ACME_BASE}/acme/cert.pem ${ACME_BASE}/acme/key.pem; then
+ log_f "Migrating to SNI folder structure..."
+ CERT_DOMAIN=($(openssl x509 -noout -text -in ${ACME_BASE}/acme/cert.pem | grep "Subject:" | sed -e 's/\(Subject:\)\|\(CN = \)\|\(CN=\)//g' | sed -e 's/^[[:space:]]*//'))
+ CERT_DOMAINS=(${CERT_DOMAIN} $(openssl x509 -noout -text -in ${ACME_BASE}/acme/cert.pem | grep "DNS:" | sed -e 's/\(DNS:\)\|,//g' | sed "s/${CERT_DOMAIN}//" | sed -e 's/^[[:space:]]*//'))
+ mkdir -p ${ACME_BASE}/${CERT_DOMAIN}
+ mv ${ACME_BASE}/acme/cert.pem ${ACME_BASE}/${CERT_DOMAIN}/cert.pem
+ # key is only copied, not moved, because it is used by all other requests too
+ cp ${ACME_BASE}/acme/key.pem ${ACME_BASE}/${CERT_DOMAIN}/key.pem
+ chmod 600 ${ACME_BASE}/${CERT_DOMAIN}/key.pem
+ echo -n ${CERT_DOMAINS[*]} > ${ACME_BASE}/${CERT_DOMAIN}/domains
+ mv ${ACME_BASE}/acme/acme.csr ${ACME_BASE}/${CERT_DOMAIN}/acme.csr
+ log_f "OK" no_date
+ fi
+fi
+
+[[ ! -f ${ACME_BASE}/dhparams.pem ]] && cp ${SSL_EXAMPLE}/dhparams.pem ${ACME_BASE}/dhparams.pem
+
+if [[ -f ${ACME_BASE}/cert.pem ]] && [[ -f ${ACME_BASE}/key.pem ]] && [[ $(stat -c%s ${ACME_BASE}/cert.pem) != 0 ]]; then
+ ISSUER=$(openssl x509 -in ${ACME_BASE}/cert.pem -noout -issuer)
+ if [[ ${ISSUER} != *"Let's Encrypt"* && ${ISSUER} != *"mailcow"* && ${ISSUER} != *"Fake LE Intermediate"* ]]; then
+ log_f "Found certificate with issuer other than mailcow snake-oil CA and Let's Encrypt, skipping ACME client..."
+ sleep 3650d
+ exec $(readlink -f "$0")
+ fi
+else
+ if [[ -f ${ACME_BASE}/${MAILCOW_HOSTNAME}/cert.pem ]] && [[ -f ${ACME_BASE}/${MAILCOW_HOSTNAME}/key.pem ]] && verify_hash_match ${ACME_BASE}/${MAILCOW_HOSTNAME}/cert.pem ${ACME_BASE}/${MAILCOW_HOSTNAME}/key.pem; then
+ log_f "Restoring previous acme certificate and restarting script..."
+ cp ${ACME_BASE}/${MAILCOW_HOSTNAME}/cert.pem ${ACME_BASE}/cert.pem
+ cp ${ACME_BASE}/${MAILCOW_HOSTNAME}/key.pem ${ACME_BASE}/key.pem
+ # Restarting with env var set to trigger a restart,
+ exec env TRIGGER_RESTART=1 $(readlink -f "$0")
+ else
+ log_f "Restoring mailcow snake-oil certificates and restarting script..."
+ cp ${SSL_EXAMPLE}/cert.pem ${ACME_BASE}/cert.pem
+ cp ${SSL_EXAMPLE}/key.pem ${ACME_BASE}/key.pem
+ exec env TRIGGER_RESTART=1 $(readlink -f "$0")
+ fi
+fi
+
+chmod 600 ${ACME_BASE}/key.pem
+
+log_f "Waiting for database..."
+while ! mysqladmin status --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent > /dev/null; do
+ sleep 2
+done
+log_f "Database OK"
+
+log_f "Waiting for Nginx..."
+until $(curl --output /dev/null --silent --head --fail http://nginx:8081); do
+ sleep 2
+done
+log_f "Nginx OK"
+
+log_f "Waiting for resolver..."
+until dig letsencrypt.org +time=3 +tries=1 @unbound > /dev/null; do
+ sleep 2
+done
+log_f "Resolver OK"
+
+# Waiting for domain table
+log_f "Waiting for domain table..."
+while [[ -z ${DOMAIN_TABLE} ]]; do
+ curl --silent http://nginx/ >/dev/null 2>&1
+ DOMAIN_TABLE=$(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SHOW TABLES LIKE 'domain'" -Bs)
+ [[ -z ${DOMAIN_TABLE} ]] && sleep 10
+done
+log_f "OK" no_date
+
+log_f "Initializing, please wait..."
+
+while true; do
+ POSTFIX_CERT_SERIAL="$(echo | openssl s_client -connect postfix:25 -starttls smtp 2>/dev/null | openssl x509 -inform pem -noout -serial | cut -d "=" -f 2)"
+ DOVECOT_CERT_SERIAL="$(echo | openssl s_client -connect dovecot:143 -starttls imap 2>/dev/null | openssl x509 -inform pem -noout -serial | cut -d "=" -f 2)"
+ POSTFIX_CERT_SERIAL_NEW="$(echo | openssl s_client -connect postfix:25 -starttls smtp 2>/dev/null | openssl x509 -inform pem -noout -serial | cut -d "=" -f 2)"
+ DOVECOT_CERT_SERIAL_NEW="$(echo | openssl s_client -connect dovecot:143 -starttls imap 2>/dev/null | openssl x509 -inform pem -noout -serial | cut -d "=" -f 2)"
+ # Re-using previous acme-mailcow account and domain keys
+ if [[ ! -f ${ACME_BASE}/acme/key.pem ]]; then
+ log_f "Generating missing domain private rsa key..."
+ openssl genrsa 4096 > ${ACME_BASE}/acme/key.pem
+ else
+ log_f "Using existing domain rsa key ${ACME_BASE}/acme/key.pem"
+ fi
+ if [[ ! -f ${ACME_BASE}/acme/account.pem ]]; then
+ log_f "Generating missing Lets Encrypt account key..."
+ openssl genrsa 4096 > ${ACME_BASE}/acme/account.pem
+ else
+ log_f "Using existing Lets Encrypt account key ${ACME_BASE}/acme/account.pem"
+ fi
+
+ chmod 600 ${ACME_BASE}/acme/key.pem
+ chmod 600 ${ACME_BASE}/acme/account.pem
+
+ unset EXISTING_CERTS
+ declare -a EXISTING_CERTS
+ for cert_dir in ${ACME_BASE}/*/ ; do
+ if [[ ! -f ${cert_dir}domains ]] || [[ ! -f ${cert_dir}cert.pem ]] || [[ ! -f ${cert_dir}key.pem ]]; then
+ continue
+ fi
+ EXISTING_CERTS+=("$(basename ${cert_dir})")
+ done
+
+ # Cleaning up and init validation arrays
+ unset SQL_DOMAIN_ARR
+ unset VALIDATED_CONFIG_DOMAINS
+ unset ADDITIONAL_VALIDATED_SAN
+ unset ADDITIONAL_WC_ARR
+ unset ADDITIONAL_SAN_ARR
+ unset CERT_ERRORS
+ unset CERT_CHANGED
+ unset CERT_AMOUNT_CHANGED
+ unset VALIDATED_CERTIFICATES
+ CERT_ERRORS=0
+ CERT_CHANGED=0
+ CERT_AMOUNT_CHANGED=0
+ declare -a SQL_DOMAIN_ARR
+ declare -a VALIDATED_CONFIG_DOMAINS
+ declare -a ADDITIONAL_VALIDATED_SAN
+ declare -a ADDITIONAL_WC_ARR
+ declare -a ADDITIONAL_SAN_ARR
+ declare -a VALIDATED_CERTIFICATES
+ IFS=',' read -r -a TMP_ARR <<< "${ADDITIONAL_SAN}"
+ for i in "${TMP_ARR[@]}" ; do
+ if [[ "$i" =~ \.\*$ ]]; then
+ ADDITIONAL_WC_ARR+=(${i::-2})
+ else
+ ADDITIONAL_SAN_ARR+=($i)
+ fi
+ done
+ ADDITIONAL_WC_ARR+=('autodiscover' 'autoconfig')
+
+ # Start IP detection
+ log_f "Detecting IP addresses..."
+ IPV4=$(get_ipv4)
+ IPV6=$(get_ipv6)
+ log_f "OK: ${IPV4}, ${IPV6:-"0000:0000:0000:0000:0000:0000:0000:0000"}"
+
+ # Hard-fail on CAA errors for MAILCOW_HOSTNAME
+ MH_PARENT_DOMAIN=$(echo ${MAILCOW_HOSTNAME} | cut -d. -f2-)
+ MH_CAAS=( $(dig CAA ${MH_PARENT_DOMAIN} +short | sed -n 's/\d issue "\(.*\)"/\1/p') )
+ if [[ ! -z ${MH_CAAS} ]]; then
+ if [[ ${MH_CAAS[@]} =~ "letsencrypt.org" ]]; then
+ log_f "Validated CAA for parent domain ${MH_PARENT_DOMAIN}"
+ else
+ log_f "Skipping ACME validation: Lets Encrypt disallowed for ${MAILCOW_HOSTNAME} by CAA record, retrying in 1h..."
+ sleep 1h
+ exec $(readlink -f "$0")
+ fi
+ fi
+
+ #########################################
+ # IP and webroot challenge verification #
+ SQL_DOMAINS=$(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT domain FROM domain WHERE backupmx=0" -Bs)
+ if [[ ! $? -eq 0 ]]; then
+ log_f "Failed to read SQL domains, retrying in 1 minute..."
+ sleep 1m
+ exec $(readlink -f "$0")
+ fi
+ while read domains; do
+ if [[ -z "${domains}" ]]; then
+ # ignore empty lines
+ continue
+ fi
+ SQL_DOMAIN_ARR+=("${domains}")
+ done <<< "${SQL_DOMAINS}"
+
+ if [[ ${ONLY_MAILCOW_HOSTNAME} != "y" ]]; then
+ for SQL_DOMAIN in "${SQL_DOMAIN_ARR[@]}"; do
+ unset VALIDATED_CONFIG_DOMAINS_SUBDOMAINS
+ declare -a VALIDATED_CONFIG_DOMAINS_SUBDOMAINS
+ for SUBDOMAIN in "${ADDITIONAL_WC_ARR[@]}"; do
+ if [[ "${SUBDOMAIN}.${SQL_DOMAIN}" != "${MAILCOW_HOSTNAME}" ]]; then
+ if check_domain "${SUBDOMAIN}.${SQL_DOMAIN}"; then
+ VALIDATED_CONFIG_DOMAINS_SUBDOMAINS+=("${SUBDOMAIN}.${SQL_DOMAIN}")
+ fi
+ fi
+ done
+ VALIDATED_CONFIG_DOMAINS+=("${VALIDATED_CONFIG_DOMAINS_SUBDOMAINS[*]}")
+ done
+ fi
+
+ if check_domain ${MAILCOW_HOSTNAME}; then
+ VALIDATED_MAILCOW_HOSTNAME="${MAILCOW_HOSTNAME}"
+ fi
+
+ if [[ ${ONLY_MAILCOW_HOSTNAME} != "y" ]]; then
+ for SAN in "${ADDITIONAL_SAN_ARR[@]}"; do
+ # Skip on CAA errors for SAN
+ SAN_PARENT_DOMAIN=$(echo ${SAN} | cut -d. -f2-)
+ SAN_CAAS=( $(dig CAA ${SAN_PARENT_DOMAIN} +short | sed -n 's/\d issue "\(.*\)"/\1/p') )
+ if [[ ! -z ${SAN_CAAS} ]]; then
+ if [[ ${SAN_CAAS[@]} =~ "letsencrypt.org" ]]; then
+ log_f "Validated CAA for parent domain ${SAN_PARENT_DOMAIN} of ${SAN}"
+ else
+ log_f "Skipping ACME validation for ${SAN}: Lets Encrypt disallowed for ${SAN} by CAA record"
+ continue
+ fi
+ fi
+ if [[ ${SAN} == ${MAILCOW_HOSTNAME} ]]; then
+ continue
+ fi
+ if check_domain ${SAN}; then
+ ADDITIONAL_VALIDATED_SAN+=("${SAN}")
+ fi
+ done
+ fi
+
+ # Unique domains for server certificate
+ if [[ ${ENABLE_SSL_SNI} == "y" ]]; then
+ # create certificate for server name and fqdn SANs only
+ SERVER_SAN_VALIDATED=(${VALIDATED_MAILCOW_HOSTNAME} $(echo ${ADDITIONAL_VALIDATED_SAN[*]} | xargs -n1 | sort -u | xargs))
+ else
+ # create certificate for all domains, including all subdomains from other domains [*]
+ SERVER_SAN_VALIDATED=(${VALIDATED_MAILCOW_HOSTNAME} $(echo ${VALIDATED_CONFIG_DOMAINS[*]} ${ADDITIONAL_VALIDATED_SAN[*]} | xargs -n1 | sort -u | xargs))
+ fi
+ if [[ ! -z ${SERVER_SAN_VALIDATED[*]} ]]; then
+ CERT_NAME=${SERVER_SAN_VALIDATED[0]}
+ VALIDATED_CERTIFICATES+=("${CERT_NAME}")
+
+ # obtain server certificate if required
+ DOMAINS=${SERVER_SAN_VALIDATED[@]} /srv/obtain-certificate.sh rsa
+ RETURN="$?"
+ if [[ "$RETURN" == "0" ]]; then # 0 = cert created successfully
+ CERT_AMOUNT_CHANGED=1
+ CERT_CHANGED=1
+ elif [[ "$RETURN" == "1" ]]; then # 1 = cert renewed successfully
+ CERT_CHANGED=1
+ elif [[ "$RETURN" == "2" ]]; then # 2 = cert not due for renewal
+ :
+ else
+ CERT_ERRORS=1
+ fi
+ # copy hostname certificate to default/server certificate
+ # do not a key when cert is missing, this can lead to a mismatch of cert/key
+ if [[ -f ${ACME_BASE}/${CERT_NAME}/cert.pem ]]; then
+ cp ${ACME_BASE}/${CERT_NAME}/cert.pem ${ACME_BASE}/cert.pem
+ cp ${ACME_BASE}/${CERT_NAME}/key.pem ${ACME_BASE}/key.pem
+ fi
+ fi
+
+ # individual certificates for SNI [@]
+ if [[ ${ENABLE_SSL_SNI} == "y" ]]; then
+ for VALIDATED_DOMAINS in "${VALIDATED_CONFIG_DOMAINS[@]}"; do
+ VALIDATED_DOMAINS_ARR=(${VALIDATED_DOMAINS})
+
+ unset VALIDATED_DOMAINS_SORTED
+ declare -a VALIDATED_DOMAINS_SORTED
+ VALIDATED_DOMAINS_SORTED=(${VALIDATED_DOMAINS_ARR[0]} $(echo ${VALIDATED_DOMAINS_ARR[@]:1} | xargs -n1 | sort -u | xargs))
+
+ # remove all domain names that are already inside the server certificate (SERVER_SAN_VALIDATED)
+ for domain in "${SERVER_SAN_VALIDATED[@]}"; do
+ for i in "${!VALIDATED_DOMAINS_SORTED[@]}"; do
+ if [[ ${VALIDATED_DOMAINS_SORTED[i]} = $domain ]]; then
+ unset 'VALIDATED_DOMAINS_SORTED[i]'
+ fi
+ done
+ done
+
+ if [[ ! -z ${VALIDATED_DOMAINS_SORTED[*]} ]]; then
+ CERT_NAME=${VALIDATED_DOMAINS_SORTED[0]}
+ VALIDATED_CERTIFICATES+=("${CERT_NAME}")
+ # obtain certificate if required
+ DOMAINS=${VALIDATED_DOMAINS_SORTED[@]} /srv/obtain-certificate.sh rsa
+ RETURN="$?"
+ if [[ "$RETURN" == "0" ]]; then # 0 = cert created successfully
+ CERT_AMOUNT_CHANGED=1
+ CERT_CHANGED=1
+ elif [[ "$RETURN" == "1" ]]; then # 1 = cert renewed successfully
+ CERT_CHANGED=1
+ elif [[ "$RETURN" == "2" ]]; then # 2 = cert not due for renewal
+ :
+ else
+ CERT_ERRORS=1
+ fi
+ fi
+ done
+ fi
+
+ if [[ -z ${VALIDATED_CERTIFICATES[*]} ]]; then
+ log_f "Cannot validate any hostnames, skipping Let's Encrypt for 1 hour."
+ log_f "Use SKIP_LETS_ENCRYPT=y in mailcow.conf to skip it permanently."
+ ${REDIS_CMDLINE} SET ACME_FAIL_TIME "$(date +%s)"
+ sleep 1h
+ exec $(readlink -f "$0")
+ fi
+
+ # find orphaned certificates if no errors occurred
+ if [[ "${CERT_ERRORS}" == "0" ]]; then
+ for EXISTING_CERT in "${EXISTING_CERTS[@]}"; do
+ if [[ ! "`printf '_%s_\n' "${VALIDATED_CERTIFICATES[@]}"`" == *"_${EXISTING_CERT}_"* ]]; then
+ DATE=$(date +%Y-%m-%d_%H_%M_%S)
+ log_f "Found orphaned certificate: ${EXISTING_CERT} - archiving it at ${ACME_BASE}/backups/${EXISTING_CERT}/"
+ BACKUP_DIR=${ACME_BASE}/backups/${EXISTING_CERT}/${DATE}
+ # archive rsa cert and any other files
+ mkdir -p ${ACME_BASE}/backups/${EXISTING_CERT}
+ mv ${ACME_BASE}/${EXISTING_CERT} ${BACKUP_DIR}
+ CERT_CHANGED=1
+ CERT_AMOUNT_CHANGED=1
+ fi
+ done
+ fi
+
+ # reload on new or changed certificates
+ if [[ "${CERT_CHANGED}" == "1" ]]; then
+ rm -f "${ACME_BASE}/force_renew" 2> /dev/null
+ RELOAD_LOOP_C=1
+ while [[ "${POSTFIX_CERT_SERIAL}" == "${POSTFIX_CERT_SERIAL_NEW}" ]] || [[ "${DOVECOT_CERT_SERIAL}" == "${DOVECOT_CERT_SERIAL_NEW}" ]] || [[ ${#POSTFIX_CERT_SERIAL_NEW} -ne 36 ]] || [[ ${#DOVECOT_CERT_SERIAL_NEW} -ne 36 ]]; do
+ log_f "Reloading or restarting services... (${RELOAD_LOOP_C})"
+ RELOAD_LOOP_C=$((RELOAD_LOOP_C + 1))
+ CERT_AMOUNT_CHANGED=${CERT_AMOUNT_CHANGED} /srv/reload-configurations.sh
+ log_f "Waiting for containers to settle..."
+ sleep 10
+ until nc -z dovecot 143; do
+ sleep 1
+ done
+ until nc -z postfix 25; do
+ sleep 1
+ done
+ POSTFIX_CERT_SERIAL_NEW="$(echo | openssl s_client -connect postfix:25 -starttls smtp 2>/dev/null | openssl x509 -inform pem -noout -serial | cut -d "=" -f 2)"
+ DOVECOT_CERT_SERIAL_NEW="$(echo | openssl s_client -connect dovecot:143 -starttls imap 2>/dev/null | openssl x509 -inform pem -noout -serial | cut -d "=" -f 2)"
+ if [[ ${RELOAD_LOOP_C} -gt 3 ]]; then
+ log_f "Some services do return old end dates, something went wrong!"
+ ${REDIS_CMDLINE} SET ACME_FAIL_TIME "$(date +%s)"
+ break;
+ fi
+ done
+ fi
+
+ case "$CERT_ERRORS" in
+ 0) # all successful
+ if [[ "${CERT_CHANGED}" == "1" ]]; then
+ if [[ "${CERT_AMOUNT_CHANGED}" == "1" ]]; then
+ log_f "Certificates successfully requested and renewed where required, sleeping one day"
+ else
+ log_f "Certificates were successfully renewed where required, sleeping for another day."
+ fi
+ else
+ log_f "Certificates were successfully validated, no changes or renewals required, sleeping for another day."
+ fi
+ sleep 1d
+ ;;
+ *) # non-zero
+ log_f "Some errors occurred, retrying in 30 minutes..."
+ ${REDIS_CMDLINE} SET ACME_FAIL_TIME "$(date +%s)"
+ sleep 30m
+ exec $(readlink -f "$0")
+ ;;
+ esac
+
+done
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/acme/expand6.sh b/mailcow/src/mailcow-dockerized/data/Dockerfiles/acme/expand6.sh
new file mode 100755
index 0000000..a781722
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/acme/expand6.sh
@@ -0,0 +1,131 @@
+#!/bin/bash
+
+##################################################################################
+#
+# Copyright (C) 2017 Craig Miller
+#
+# See the file "LICENSE" for information on usage and redistribution
+# of this file, and for a DISCLAIMER OF ALL WARRANTIES.
+# Distributed under GPLv2 License
+#
+##################################################################################
+
+
+# IPv6 Address Expansion functions
+#
+# by Craig Miller 19 Feb 2017
+#
+# 16 Nov 2017 v0.93 - added CLI functionality
+
+
+VERSION=0.93
+
+empty_addr="0000:0000:0000:0000:0000:0000:0000:0000"
+empty_addr_len=${#empty_addr}
+
+function usage {
+ echo " $0 - expand compressed IPv6 addresss "
+ echo " e.g. $0 2001:db8:1:12:123::456 "
+ echo " "
+ echo " -t self test"
+ echo " "
+ echo " By Craig Miller - Version: $VERSION"
+ exit 1
+ }
+
+if [ "$1" == "-h" ]; then
+ #call help
+ usage
+fi
+
+#
+# Expands IPv6 quibble to 4 digits with leading zeros e.g. db8 -> 0db8
+#
+# Returns string with expanded quibble
+
+function expand_quibble() {
+ addr=$1
+ # create array of quibbles
+ addr_array=(${addr//:/ })
+ addr_array_len=${#addr_array[@]}
+ # step thru quibbles
+ for ((i=0; i< $addr_array_len ; i++ ))
+ do
+ quibble=${addr_array[$i]}
+ quibble_len=${#quibble}
+ case $quibble_len in
+ 1) quibble="000$quibble";;
+ 2) quibble="00$quibble";;
+ 3) quibble="0$quibble";;
+ esac
+ addr_array[$i]=$quibble
+ done
+ # reconstruct addr from quibbles
+ return_str=${addr_array[*]}
+ return_str="${return_str// /:}"
+ echo $return_str
+}
+
+#
+# Expands IPv6 address :: format to full zeros
+#
+# Returns string with expanded address
+
+function expand() {
+ if [[ $1 == *"::"* ]]; then
+ # check for leading zeros on front_addr
+ if [[ $1 == "::"* ]]; then
+ front_addr=0
+ else
+ front_addr=$(echo $1 | sed -r 's;([^ ]+)::.*;\1;')
+ fi
+ # check for trailing zeros on back_addr
+ if [[ $1 == *"::" ]]; then
+ back_addr=0
+ else
+ back_addr=$(echo $1 | sed -r 's;.*::([^ ]+);\1;')
+ fi
+ front_addr=$(expand_quibble $front_addr)
+ back_addr=$(expand_quibble $back_addr)
+
+ new_addr=$empty_addr
+ front_addr_len=${#front_addr}
+ back_addr_len=${#back_addr}
+ # calculate fill needed
+ num_zeros=$(($empty_addr_len - $front_addr_len - $back_addr_len - 1))
+
+ #fill_str=${empty_addr[0]:0:$num_zeros}
+ new_addr="$front_addr:${empty_addr[0]:0:$num_zeros}$back_addr"
+
+ # return expanded address
+ echo $new_addr
+ else
+ # return input with expandd quibbles
+ expand_quibble $1
+ fi
+}
+
+# self test - call with '-t' parameter
+if [ "$1" == "-t" ]; then
+ # add address examples to test
+ expand fd11::1d70:cf84:18ef:d056
+ expand 2a01::1
+ expand fe80::f203:8cff:fe3f:f041
+ expand 2001:db8:123::5
+ expand 2001:470:ebbd:0:f203:8cff:fe3f:f041
+ # special cases
+ expand ::1
+ expand fd32:197d:3022:1101::
+ exit 1
+fi
+
+# allow script to be sourced (with no arguements)
+if [[ $1 != "" ]]; then
+ # validate input is an IPv6 address
+ if [[ $1 == *":"* ]]; then
+ expand $1
+ else
+ echo "ERROR: unregcognized IPv6 address $1"
+ exit 1
+ fi
+fi
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/acme/functions.sh b/mailcow/src/mailcow-dockerized/data/Dockerfiles/acme/functions.sh
new file mode 100644
index 0000000..454946d
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/acme/functions.sh
@@ -0,0 +1,112 @@
+#!/bin/bash
+
+log_f() {
+ if [[ ${2} == "no_nl" ]]; then
+ echo -n "$(date) - ${1}"
+ elif [[ ${2} == "no_date" ]]; then
+ echo "${1}"
+ elif [[ ${2} != "redis_only" ]]; then
+ echo "$(date) - ${1}"
+ fi
+ if [[ ${3} == "b64" ]]; then
+ ${REDIS_CMDLINE} LPUSH ACME_LOG "{\"time\":\"$(date +%s)\",\"message\":\"base64,$(printf '%s' "${MAILCOW_HOSTNAME} - ${1}")\"}" > /dev/null
+ else
+ ${REDIS_CMDLINE} LPUSH ACME_LOG "{\"time\":\"$(date +%s)\",\"message\":\"$(printf '%s' "${MAILCOW_HOSTNAME} - ${1}" | \
+ tr '%&;$"[]{}-\r\n' ' ')\"}" > /dev/null
+ fi
+}
+
+verify_hash_match(){
+ CERT_HASH=$(openssl x509 -in "${1}" -noout -pubkey | openssl md5)
+ KEY_HASH=$(openssl pkey -in "${2}" -pubout | openssl md5)
+ if [[ ${CERT_HASH} != ${KEY_HASH} ]]; then
+ log_f "Certificate and key hashes do not match!"
+ return 1
+ else
+ log_f "Verified hashes."
+ return 0
+ fi
+}
+
+get_ipv4(){
+ local IPV4=
+ local IPV4_SRCS=
+ local TRY=
+ IPV4_SRCS[0]="ip4.mailcow.email"
+ IPV4_SRCS[1]="ip4.korves.net"
+ until [[ ! -z ${IPV4} ]] || [[ ${TRY} -ge 10 ]]; do
+ IPV4=$(curl --connect-timeout 3 -m 10 -L4s ${IPV4_SRCS[$RANDOM % ${#IPV4_SRCS[@]} ]} | grep -E "^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$")
+ [[ ! -z ${TRY} ]] && sleep 1
+ TRY=$((TRY+1))
+ done
+ echo ${IPV4}
+}
+
+get_ipv6(){
+ local IPV6=
+ local IPV6_SRCS=
+ local TRY=
+ IPV6_SRCS[0]="ip6.korves.net"
+ IPV6_SRCS[1]="ip6.mailcow.email"
+ until [[ ! -z ${IPV6} ]] || [[ ${TRY} -ge 10 ]]; do
+ IPV6=$(curl --connect-timeout 3 -m 10 -L6s ${IPV6_SRCS[$RANDOM % ${#IPV6_SRCS[@]} ]} | grep "^\([0-9a-fA-F]\{0,4\}:\)\{1,7\}[0-9a-fA-F]\{0,4\}$")
+ [[ ! -z ${TRY} ]] && sleep 1
+ TRY=$((TRY+1))
+ done
+ echo ${IPV6}
+}
+
+check_domain(){
+ DOMAIN=$1
+ A_DOMAIN=$(dig A ${DOMAIN} +short | tail -n 1)
+ AAAA_DOMAIN=$(dig AAAA ${DOMAIN} +short | tail -n 1)
+ # Check if CNAME without v6 enabled target
+ if [[ ! -z ${AAAA_DOMAIN} ]] && [[ -z $(echo ${AAAA_DOMAIN} | grep "^\([0-9a-fA-F]\{0,4\}:\)\{1,7\}[0-9a-fA-F]\{0,4\}$") ]]; then
+ AAAA_DOMAIN=
+ fi
+ if [[ ! -z ${AAAA_DOMAIN} ]]; then
+ log_f "Found AAAA record for ${DOMAIN}: ${AAAA_DOMAIN} - skipping A record check"
+ if [[ $(expand ${IPV6:-"0000:0000:0000:0000:0000:0000:0000:0000"}) == $(expand ${AAAA_DOMAIN}) ]] || [[ ${SKIP_IP_CHECK} == "y" ]] || [[ ${SNAT6_TO_SOURCE} != "n" ]]; then
+ if verify_challenge_path "${DOMAIN}" 6; then
+ log_f "Confirmed AAAA record with IP $(expand ${AAAA_DOMAIN})"
+ return 0
+ else
+ log_f "Confirmed AAAA record with IP $(expand ${AAAA_DOMAIN}), but HTTP validation failed"
+ fi
+ else
+ log_f "Cannot match your IP $(expand ${IPV6:-"0000:0000:0000:0000:0000:0000:0000:0000"}) against hostname ${DOMAIN} (DNS returned $(expand ${AAAA_DOMAIN}))"
+ fi
+ elif [[ ! -z ${A_DOMAIN} ]]; then
+ log_f "Found A record for ${DOMAIN}: ${A_DOMAIN}"
+ if [[ ${IPV4:-ERR} == ${A_DOMAIN} ]] || [[ ${SKIP_IP_CHECK} == "y" ]] || [[ ${SNAT_TO_SOURCE} != "n" ]]; then
+ if verify_challenge_path "${DOMAIN}" 4; then
+ log_f "Confirmed A record ${A_DOMAIN}"
+ return 0
+ else
+ log_f "Confirmed A record with IP ${A_DOMAIN}, but HTTP validation failed"
+ fi
+ else
+ log_f "Cannot match your IP ${IPV4} against hostname ${DOMAIN} (DNS returned ${A_DOMAIN})"
+ fi
+ else
+ log_f "No A or AAAA record found for hostname ${DOMAIN}"
+ fi
+ return 1
+}
+
+verify_challenge_path(){
+ if [[ ${SKIP_HTTP_VERIFICATION} == "y" ]]; then
+ echo '(skipping check, returning 0)'
+ return 0
+ fi
+ # verify_challenge_path URL 4|6
+ RANDOM_N=${RANDOM}${RANDOM}${RANDOM}
+ echo ${RANDOM_N} > /var/www/acme/${RANDOM_N}
+ if [[ "$(curl --insecure -${2} -L http://${1}/.well-known/acme-challenge/${RANDOM_N} --silent)" == "${RANDOM_N}" ]]; then
+ rm /var/www/acme/${RANDOM_N}
+ return 0
+ else
+ rm /var/www/acme/${RANDOM_N}
+ return 1
+ fi
+}
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/acme/obtain-certificate.sh b/mailcow/src/mailcow-dockerized/data/Dockerfiles/acme/obtain-certificate.sh
new file mode 100644
index 0000000..8264a2c
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/acme/obtain-certificate.sh
@@ -0,0 +1,130 @@
+#!/bin/bash
+
+# Return values / exit codes
+# 0 = cert created successfully
+# 1 = cert renewed successfully
+# 2 = cert not due for renewal
+# * = errors
+
+
+source /srv/functions.sh
+
+CERT_DOMAINS=(${DOMAINS[@]})
+CERT_DOMAIN=${CERT_DOMAINS[0]}
+ACME_BASE=/var/lib/acme
+
+TYPE=${1}
+PREFIX=""
+# only support rsa certificates for now
+if [[ "${TYPE}" != "rsa" ]]; then
+ log_f "Unknown certificate type '${TYPE}' requested"
+ exit 5
+fi
+DOMAINS_FILE=${ACME_BASE}/${CERT_DOMAIN}/domains
+CERT=${ACME_BASE}/${CERT_DOMAIN}/${PREFIX}cert.pem
+SHARED_KEY=${ACME_BASE}/acme/${PREFIX}key.pem # must already exist
+KEY=${ACME_BASE}/${CERT_DOMAIN}/${PREFIX}key.pem
+CSR=${ACME_BASE}/${CERT_DOMAIN}/${PREFIX}acme.csr
+
+if [[ -z ${CERT_DOMAINS[*]} ]]; then
+ log_f "Missing CERT_DOMAINS to obtain a certificate"
+ exit 3
+fi
+
+if [[ "${LE_STAGING}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
+ if [[ ! -z "${DIRECTORY_URL}" ]]; then
+ log_f "Cannot use DIRECTORY_URL with LE_STAGING=y - ignoring DIRECTORY_URL"
+ fi
+ log_f "Using Let's Encrypt staging servers"
+ DIRECTORY_URL='--directory-url https://acme-staging-v02.api.letsencrypt.org/directory'
+elif [[ ! -z "${DIRECTORY_URL}" ]]; then
+ log_f "Using custom directory URL ${DIRECTORY_URL}"
+ DIRECTORY_URL="--directory-url ${DIRECTORY_URL}"
+fi
+
+if [[ -f ${DOMAINS_FILE} && "$(cat ${DOMAINS_FILE})" == "${CERT_DOMAINS[*]}" ]]; then
+ if [[ ! -f ${CERT} || ! -f "${KEY}" || -f "${ACME_BASE}/force_renew" ]]; then
+ log_f "Certificate ${CERT} doesn't exist yet or forced renewal - start obtaining"
+ # Certificate exists and did not change but could be due for renewal (30 days)
+ elif ! openssl x509 -checkend 2592000 -noout -in ${CERT} > /dev/null; then
+ log_f "Certificate ${CERT} is due for renewal (< 30 days) - start renewing"
+ else
+ log_f "Certificate ${CERT} validation done, neither changed nor due for renewal."
+ exit 2
+ fi
+else
+ log_f "Certificate ${CERT} missing or changed domains '${CERT_DOMAINS[*]}' - start obtaining"
+fi
+
+
+# Make backup
+if [[ -f ${CERT} ]]; then
+ DATE=$(date +%Y-%m-%d_%H_%M_%S)
+ BACKUP_DIR=${ACME_BASE}/backups/${CERT_DOMAIN}/${PREFIX}${DATE}
+ log_f "Creating backups in ${BACKUP_DIR} ..."
+ mkdir -p ${BACKUP_DIR}/
+ [[ -f ${DOMAINS_FILE} ]] && cp ${DOMAINS_FILE} ${BACKUP_DIR}/
+ [[ -f ${CERT} ]] && cp ${CERT} ${BACKUP_DIR}/
+ [[ -f ${KEY} ]] && cp ${KEY} ${BACKUP_DIR}/
+ [[ -f ${CSR} ]] && cp ${CSR} ${BACKUP_DIR}/
+fi
+
+mkdir -p ${ACME_BASE}/${CERT_DOMAIN}
+if [[ ! -f ${KEY} ]]; then
+ log_f "Copying shared private key for this certificate..."
+ cp ${SHARED_KEY} ${KEY}
+ chmod 600 ${KEY}
+fi
+
+# Generating CSR
+printf "[SAN]\nsubjectAltName=" > /tmp/_SAN
+printf "DNS:%s," "${CERT_DOMAINS[@]}" >> /tmp/_SAN
+sed -i '$s/,$//' /tmp/_SAN
+openssl req -new -sha256 -key ${KEY} -subj "/" -reqexts SAN -config <(cat /etc/ssl/openssl.cnf /tmp/_SAN) > ${CSR}
+
+# acme-tiny writes info to stderr and ceritifcate to stdout
+# The redirects will do the following:
+# - redirect stdout to temp certificate file
+# - redirect acme-tiny stderr to stdout (logs to variable ACME_RESPONSE)
+# - tee stderr to get live output and log to dockerd
+
+log_f "Checking resolver..."
+until dig letsencrypt.org +time=3 +tries=1 @unbound > /dev/null; do
+ sleep 2
+done
+log_f "Resolver OK"
+
+ACME_RESPONSE=$(acme-tiny ${DIRECTORY_URL} \
+ --account-key ${ACME_BASE}/acme/account.pem \
+ --disable-check \
+ --csr ${CSR} \
+ --acme-dir /var/www/acme/ 2>&1 > /tmp/_cert.pem | tee /dev/fd/5; exit ${PIPESTATUS[0]})
+SUCCESS="$?"
+ACME_RESPONSE_B64=$(echo "${ACME_RESPONSE}" | openssl enc -e -A -base64)
+log_f "${ACME_RESPONSE_B64}" redis_only b64
+case "$SUCCESS" in
+ 0) # cert requested
+ log_f "Deploying certificate ${CERT}..."
+ # Deploy the new certificate and key
+ # Moving temp cert to {domain} folder
+ if verify_hash_match /tmp/_cert.pem ${KEY}; then
+ RETURN=0 # certificate created
+ if [[ -f ${CERT} ]]; then
+ RETURN=1 # certificate renewed
+ fi
+ mv -f /tmp/_cert.pem ${CERT}
+ echo -n ${CERT_DOMAINS[*]} > ${DOMAINS_FILE}
+ rm /var/www/acme/* 2> /dev/null
+ log_f "Certificate successfully obtained"
+ exit ${RETURN}
+ else
+ log_f "Certificate was successfully requested, but key and certificate have non-matching hashes, ignoring certificate"
+ exit 4
+ fi
+ ;;
+ *) # non-zero is non-fun
+ log_f "Failed to obtain certificate ${CERT} for domains '${CERT_DOMAINS[*]}'"
+ redis-cli -h redis SET ACME_FAIL_TIME "$(date +%s)"
+ exit 100${SUCCESS}
+ ;;
+esac
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/acme/reload-configurations.sh b/mailcow/src/mailcow-dockerized/data/Dockerfiles/acme/reload-configurations.sh
new file mode 100644
index 0000000..d5461a4
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/acme/reload-configurations.sh
@@ -0,0 +1,45 @@
+#!/bin/bash
+
+# Reading container IDs
+# Wrapping as array to ensure trimmed content when calling $NGINX etc.
+NGINX=($(curl --silent --insecure https://dockerapi/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | jq -rc "select( .name | tostring | contains(\"nginx-mailcow\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id" | tr "\n" " "))
+DOVECOT=($(curl --silent --insecure https://dockerapi/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | jq -rc "select( .name | tostring | contains(\"dovecot-mailcow\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id" | tr "\n" " "))
+POSTFIX=($(curl --silent --insecure https://dockerapi/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | jq -rc "select( .name | tostring | contains(\"postfix-mailcow\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id" | tr "\n" " "))
+
+reload_nginx(){
+ echo "Reloading Nginx..."
+ NGINX_RELOAD_RET=$(curl -X POST --insecure https://dockerapi/containers/${NGINX}/exec -d '{"cmd":"reload", "task":"nginx"}' --silent -H 'Content-type: application/json' | jq -r .type)
+ [[ ${NGINX_RELOAD_RET} != 'success' ]] && { echo "Could not reload Nginx, restarting container..."; restart_container ${NGINX} ; }
+}
+
+reload_dovecot(){
+ echo "Reloading Dovecot..."
+ DOVECOT_RELOAD_RET=$(curl -X POST --insecure https://dockerapi/containers/${DOVECOT}/exec -d '{"cmd":"reload", "task":"dovecot"}' --silent -H 'Content-type: application/json' | jq -r .type)
+ [[ ${DOVECOT_RELOAD_RET} != 'success' ]] && { echo "Could not reload Dovecot, restarting container..."; restart_container ${DOVECOT} ; }
+}
+
+reload_postfix(){
+ echo "Reloading Postfix..."
+ POSTFIX_RELOAD_RET=$(curl -X POST --insecure https://dockerapi/containers/${POSTFIX}/exec -d '{"cmd":"reload", "task":"postfix"}' --silent -H 'Content-type: application/json' | jq -r .type)
+ [[ ${POSTFIX_RELOAD_RET} != 'success' ]] && { echo "Could not reload Postfix, restarting container..."; restart_container ${POSTFIX} ; }
+}
+
+restart_container(){
+ for container in $*; do
+ echo "Restarting ${container}..."
+ C_REST_OUT=$(curl -X POST --insecure https://dockerapi/containers/${container}/restart --silent | jq -r '.msg')
+ echo "${C_REST_OUT}"
+ done
+}
+
+if [[ "${CERT_AMOUNT_CHANGED}" == "1" ]]; then
+ restart_container ${NGINX}
+ restart_container ${DOVECOT}
+ restart_container ${POSTFIX}
+else
+ reload_nginx
+ #reload_dovecot
+ restart_container ${DOVECOT}
+ #reload_postfix
+ restart_container ${POSTFIX}
+fi
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/clamd/Dockerfile b/mailcow/src/mailcow-dockerized/data/Dockerfiles/clamd/Dockerfile
new file mode 100644
index 0000000..4c30cf2
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/clamd/Dockerfile
@@ -0,0 +1,61 @@
+FROM debian:buster-slim
+
+LABEL maintainer "André Peters <andre.peters@servercow.de>"
+
+ARG CLAMAV=0.103.0
+
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ ca-certificates \
+ zlib1g-dev \
+ libcurl4-openssl-dev \
+ libncurses5-dev \
+ libzip-dev \
+ libpcre2-dev \
+ libxml2-dev \
+ libssl-dev \
+ build-essential \
+ libjson-c-dev \
+ curl \
+ bash \
+ wget \
+ tzdata \
+ dnsutils \
+ rsync \
+ dos2unix \
+ netcat \
+ && rm -rf /var/lib/apt/lists/* \
+ && wget -O - https://www.clamav.net/downloads/production/clamav-${CLAMAV}.tar.gz | tar xfvz - \
+ && cd clamav-${CLAMAV} \
+ && ./configure \
+ --prefix=/usr \
+ --libdir=/usr/lib \
+ --sysconfdir=/etc/clamav \
+ --mandir=/usr/share/man \
+ --infodir=/usr/share/info \
+ --disable-llvm \
+ --with-user=clamav \
+ --with-group=clamav \
+ --with-dbdir=/var/lib/clamav \
+ --enable-clamdtop \
+ --enable-bigstack \
+ --with-pcre \
+ && make -j4 \
+ && make install \
+ && make clean \
+ && cd .. && rm -rf clamav-${CLAMAV} \
+ && apt-get -y --auto-remove purge build-essential \
+ && apt-get -y purge zlib1g-dev \
+ libncurses5-dev \
+ libzip-dev \
+ libpcre2-dev \
+ libxml2-dev \
+ libssl-dev \
+ libjson-c-dev \
+ && addgroup --system --gid 700 clamav \
+ && adduser --system --no-create-home --home /var/lib/clamav --uid 700 --gid 700 --disabled-login clamav \
+ && rm -rf /tmp/* /var/tmp/*
+
+COPY clamd.sh ./
+COPY tini /sbin/tini
+
+CMD ["/sbin/tini", "-g", "--", "/clamd.sh"]
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/clamd/clamd.sh b/mailcow/src/mailcow-dockerized/data/Dockerfiles/clamd/clamd.sh
new file mode 100755
index 0000000..10df807
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/clamd/clamd.sh
@@ -0,0 +1,105 @@
+#!/bin/bash
+
+if [[ "${SKIP_CLAMD}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
+ echo "SKIP_CLAMD=y, skipping ClamAV..."
+ sleep 365d
+ exit 0
+fi
+
+# Cleaning up garbage
+echo "Cleaning up tmp files..."
+rm -rf /var/lib/clamav/clamav-*.tmp
+
+# Prepare whitelist
+
+mkdir -p /run/clamav /var/lib/clamav
+
+if [[ -s /etc/clamav/whitelist.ign2 ]]; then
+ echo "Copying non-empty whitelist.ign2 to /var/lib/clamav/whitelist.ign2"
+ cp /etc/clamav/whitelist.ign2 /var/lib/clamav/whitelist.ign2
+fi
+
+if [[ ! -f /var/lib/clamav/whitelist.ign2 ]]; then
+ echo "Creating /var/lib/clamav/whitelist.ign2"
+ cat <<EOF > /var/lib/clamav/whitelist.ign2
+# Please restart ClamAV after changing signatures
+Example-Signature.Ignore-1
+PUA.Win.Trojan.EmbeddedPDF-1
+PUA.Pdf.Trojan.EmbeddedJavaScript-1
+PUA.Pdf.Trojan.OpenActionObjectwithJavascript-1
+EOF
+fi
+
+chown clamav:clamav -R /var/lib/clamav /run/clamav
+
+chmod 755 /var/lib/clamav
+chmod 644 -R /var/lib/clamav/*
+chmod 750 /run/clamav
+
+stat /var/lib/clamav/whitelist.ign2
+dos2unix /var/lib/clamav/whitelist.ign2
+sed -i '/^\s*$/d' /var/lib/clamav/whitelist.ign2
+# Copying to /etc/clamav to expose file as-is to administrator
+cp -p /var/lib/clamav/whitelist.ign2 /etc/clamav/whitelist.ign2
+
+
+BACKGROUND_TASKS=()
+
+echo "Running freshclam..."
+freshclam
+
+(
+while true; do
+ sleep 12600
+ freshclam
+done
+) &
+BACKGROUND_TASKS+=($!)
+
+(
+while true; do
+ sleep 10m
+ SANE_MIRRORS="$(dig +ignore +short rsync.sanesecurity.net)"
+ for sane_mirror in ${SANE_MIRRORS}; do
+ CE=
+ rsync -avp --chown=clamav:clamav --chmod=Du=rwx,Dgo=rx,Fu=rw,Fog=r --timeout=5 rsync://${sane_mirror}/sanesecurity/ \
+ --include 'blurl.ndb' \
+ --include 'junk.ndb' \
+ --include 'jurlbl.ndb' \
+ --include 'jurbla.ndb' \
+ --include 'phishtank.ndb' \
+ --include 'phish.ndb' \
+ --include 'spamimg.hdb' \
+ --include 'scam.ndb' \
+ --include 'rogue.hdb' \
+ --include 'sanesecurity.ftm' \
+ --include 'sigwhitelist.ign2' \
+ --exclude='*' /var/lib/clamav/
+ CE=$?
+ chmod 755 /var/lib/clamav/
+ if [ ${CE} -eq 0 ]; then
+ while [ ! -z "$(pidof freshclam)" ]; do
+ echo "Freshclam is active, waiting..."
+ sleep 5
+ done
+ echo RELOAD | nc clamd-mailcow 3310
+ break
+ fi
+ done
+ sleep 12h
+done
+) &
+BACKGROUND_TASKS+=($!)
+
+nice -n10 clamd &
+BACKGROUND_TASKS+=($!)
+
+while true; do
+ for bg_task in ${BACKGROUND_TASKS[*]}; do
+ if ! kill -0 ${bg_task} 1>&2; then
+ echo "Worker ${bg_task} died, stopping container waiting for respawn..."
+ kill -TERM 1
+ fi
+ sleep 10
+ done
+done
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/clamd/tini b/mailcow/src/mailcow-dockerized/data/Dockerfiles/clamd/tini
new file mode 100755
index 0000000..03af82f
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/clamd/tini
Binary files differ
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/dockerapi/Dockerfile b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dockerapi/Dockerfile
new file mode 100644
index 0000000..16c1af5
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dockerapi/Dockerfile
@@ -0,0 +1,12 @@
+FROM alpine:3.11
+LABEL maintainer "Andre Peters <andre.peters@servercow.de>"
+
+WORKDIR /app
+
+RUN apk add --update --no-cache python3 openssl tzdata \
+ && pip3 install --upgrade pip \
+ && pip3 install --upgrade docker flask flask-restful
+
+COPY dockerapi.py /app/
+
+CMD ["python3", "-u", "/app/dockerapi.py"]
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/dockerapi/dockerapi.py b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dockerapi/dockerapi.py
new file mode 100644
index 0000000..20e9d0e
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dockerapi/dockerapi.py
@@ -0,0 +1,419 @@
+#!/usr/bin/env python3
+
+from flask import Flask
+from flask_restful import Resource, Api
+from flask import jsonify
+from flask import Response
+from flask import request
+from threading import Thread
+import docker
+import uuid
+import signal
+import time
+import os
+import re
+import sys
+import ssl
+import socket
+import subprocess
+import traceback
+
+docker_client = docker.DockerClient(base_url='unix://var/run/docker.sock', version='auto')
+app = Flask(__name__)
+api = Api(app)
+
+class containers_get(Resource):
+ def get(self):
+ containers = {}
+ try:
+ for container in docker_client.containers.list(all=True):
+ containers.update({container.attrs['Id']: container.attrs})
+ return containers
+ except Exception as e:
+ return jsonify(type='danger', msg=str(e))
+
+class container_get(Resource):
+ def get(self, container_id):
+ if container_id and container_id.isalnum():
+ try:
+ for container in docker_client.containers.list(all=True, filters={"id": container_id}):
+ return container.attrs
+ except Exception as e:
+ return jsonify(type='danger', msg=str(e))
+ else:
+ return jsonify(type='danger', msg='no or invalid id defined')
+
+class container_post(Resource):
+ def post(self, container_id, post_action):
+ if container_id and container_id.isalnum() and post_action:
+ try:
+ """Dispatch container_post api call"""
+ if post_action == 'exec':
+ if not request.json or not 'cmd' in request.json:
+ return jsonify(type='danger', msg='cmd is missing')
+ if not request.json or not 'task' in request.json:
+ return jsonify(type='danger', msg='task is missing')
+
+ api_call_method_name = '__'.join(['container_post', str(post_action), str(request.json['cmd']), str(request.json['task']) ])
+ else:
+ api_call_method_name = '__'.join(['container_post', str(post_action) ])
+
+ api_call_method = getattr(self, api_call_method_name, lambda container_id: jsonify(type='danger', msg='container_post - unknown api call'))
+
+
+ print("api call: %s, container_id: %s" % (api_call_method_name, container_id))
+ return api_call_method(container_id)
+ except Exception as e:
+ print("error - container_post: %s" % str(e))
+ return jsonify(type='danger', msg=str(e))
+
+ else:
+ return jsonify(type='danger', msg='invalid container id or missing action')
+
+
+ # api call: container_post - post_action: stop
+ def container_post__stop(self, container_id):
+ for container in docker_client.containers.list(all=True, filters={"id": container_id}):
+ container.stop()
+ return jsonify(type='success', msg='command completed successfully')
+
+
+ # api call: container_post - post_action: start
+ def container_post__start(self, container_id):
+ for container in docker_client.containers.list(all=True, filters={"id": container_id}):
+ container.start()
+ return jsonify(type='success', msg='command completed successfully')
+
+
+ # api call: container_post - post_action: restart
+ def container_post__restart(self, container_id):
+ for container in docker_client.containers.list(all=True, filters={"id": container_id}):
+ container.restart()
+ return jsonify(type='success', msg='command completed successfully')
+
+
+ # api call: container_post - post_action: top
+ def container_post__top(self, container_id):
+ for container in docker_client.containers.list(all=True, filters={"id": container_id}):
+ return jsonify(type='success', msg=container.top())
+
+
+ # api call: container_post - post_action: stats
+ def container_post__stats(self, container_id):
+ for container in docker_client.containers.list(all=True, filters={"id": container_id}):
+ for stat in container.stats(decode=True, stream=True):
+ return jsonify(type='success', msg=stat )
+
+
+ # api call: container_post - post_action: exec - cmd: mailq - task: delete
+ def container_post__exec__mailq__delete(self, container_id):
+ if 'items' in request.json:
+ r = re.compile("^[0-9a-fA-F]+$")
+ filtered_qids = filter(r.match, request.json['items'])
+ if filtered_qids:
+ flagged_qids = ['-d %s' % i for i in filtered_qids]
+ sanitized_string = str(' '.join(flagged_qids));
+
+ for container in docker_client.containers.list(filters={"id": container_id}):
+ postsuper_r = container.exec_run(["/bin/bash", "-c", "/usr/sbin/postsuper " + sanitized_string])
+ return exec_run_handler('generic', postsuper_r)
+
+ # api call: container_post - post_action: exec - cmd: mailq - task: hold
+ def container_post__exec__mailq__hold(self, container_id):
+ if 'items' in request.json:
+ r = re.compile("^[0-9a-fA-F]+$")
+ filtered_qids = filter(r.match, request.json['items'])
+ if filtered_qids:
+ flagged_qids = ['-h %s' % i for i in filtered_qids]
+ sanitized_string = str(' '.join(flagged_qids));
+
+ for container in docker_client.containers.list(filters={"id": container_id}):
+ postsuper_r = container.exec_run(["/bin/bash", "-c", "/usr/sbin/postsuper " + sanitized_string])
+ return exec_run_handler('generic', postsuper_r)
+
+ # api call: container_post - post_action: exec - cmd: mailq - task: cat
+ def container_post__exec__mailq__cat(self, container_id):
+ if 'items' in request.json:
+ r = re.compile("^[0-9a-fA-F]+$")
+ filtered_qids = filter(r.match, request.json['items'])
+ if filtered_qids:
+ sanitized_string = str(' '.join(filtered_qids));
+
+ for container in docker_client.containers.list(filters={"id": container_id}):
+ postcat_return = container.exec_run(["/bin/bash", "-c", "/usr/sbin/postcat -q " + sanitized_string], user='postfix')
+ if not postcat_return:
+ postcat_return = 'err: invalid'
+ return exec_run_handler('utf8_text_only', postcat_return)
+
+ # api call: container_post - post_action: exec - cmd: mailq - task: unhold
+ def container_post__exec__mailq__unhold(self, container_id):
+ if 'items' in request.json:
+ r = re.compile("^[0-9a-fA-F]+$")
+ filtered_qids = filter(r.match, request.json['items'])
+ if filtered_qids:
+ flagged_qids = ['-H %s' % i for i in filtered_qids]
+ sanitized_string = str(' '.join(flagged_qids));
+
+ for container in docker_client.containers.list(filters={"id": container_id}):
+ postsuper_r = container.exec_run(["/bin/bash", "-c", "/usr/sbin/postsuper " + sanitized_string])
+ return exec_run_handler('generic', postsuper_r)
+
+
+ # api call: container_post - post_action: exec - cmd: mailq - task: deliver
+ def container_post__exec__mailq__deliver(self, container_id):
+ if 'items' in request.json:
+ r = re.compile("^[0-9a-fA-F]+$")
+ filtered_qids = filter(r.match, request.json['items'])
+ if filtered_qids:
+ flagged_qids = ['-i %s' % i for i in filtered_qids]
+
+ for container in docker_client.containers.list(filters={"id": container_id}):
+ for i in flagged_qids:
+ postqueue_r = container.exec_run(["/bin/bash", "-c", "/usr/sbin/postqueue " + i], user='postfix')
+ # todo: check each exit code
+ return jsonify(type='success', msg=str("Scheduled immediate delivery"))
+
+
+ # api call: container_post - post_action: exec - cmd: mailq - task: list
+ def container_post__exec__mailq__list(self, container_id):
+ for container in docker_client.containers.list(filters={"id": container_id}):
+ mailq_return = container.exec_run(["/usr/sbin/postqueue", "-j"], user='postfix')
+ return exec_run_handler('utf8_text_only', mailq_return)
+
+
+ # api call: container_post - post_action: exec - cmd: mailq - task: flush
+ def container_post__exec__mailq__flush(self, container_id):
+ for container in docker_client.containers.list(filters={"id": container_id}):
+ postqueue_r = container.exec_run(["/usr/sbin/postqueue", "-f"], user='postfix')
+ return exec_run_handler('generic', postqueue_r)
+
+
+ # api call: container_post - post_action: exec - cmd: mailq - task: super_delete
+ def container_post__exec__mailq__super_delete(self, container_id):
+ for container in docker_client.containers.list(filters={"id": container_id}):
+ postsuper_r = container.exec_run(["/usr/sbin/postsuper", "-d", "ALL"])
+ return exec_run_handler('generic', postsuper_r)
+
+
+ # api call: container_post - post_action: exec - cmd: system - task: fts_rescan
+ def container_post__exec__system__fts_rescan(self, container_id):
+ if 'username' in request.json:
+ for container in docker_client.containers.list(filters={"id": container_id}):
+ rescan_return = container.exec_run(["/bin/bash", "-c", "/usr/bin/doveadm fts rescan -u '" + request.json['username'].replace("'", "'\\''") + "'"], user='vmail')
+ if rescan_return.exit_code == 0:
+ return jsonify(type='success', msg='fts_rescan: rescan triggered')
+ else:
+ return jsonify(type='warning', msg='fts_rescan error')
+
+ if 'all' in request.json:
+ for container in docker_client.containers.list(filters={"id": container_id}):
+ rescan_return = container.exec_run(["/bin/bash", "-c", "/usr/bin/doveadm fts rescan -A"], user='vmail')
+ if rescan_return.exit_code == 0:
+ return jsonify(type='success', msg='fts_rescan: rescan triggered')
+ else:
+ return jsonify(type='warning', msg='fts_rescan error')
+
+
+ # api call: container_post - post_action: exec - cmd: system - task: df
+ def container_post__exec__system__df(self, container_id):
+ if 'dir' in request.json:
+ for container in docker_client.containers.list(filters={"id": container_id}):
+ df_return = container.exec_run(["/bin/bash", "-c", "/bin/df -H '" + request.json['dir'].replace("'", "'\\''") + "' | /usr/bin/tail -n1 | /usr/bin/tr -s [:blank:] | /usr/bin/tr ' ' ','"], user='nobody')
+ if df_return.exit_code == 0:
+ return df_return.output.decode('utf-8').rstrip()
+ else:
+ return "0,0,0,0,0,0"
+
+
+ # api call: container_post - post_action: exec - cmd: system - task: mysql_upgrade
+ def container_post__exec__system__mysql_upgrade(self, container_id):
+ for container in docker_client.containers.list(filters={"id": container_id}):
+ sql_return = container.exec_run(["/bin/bash", "-c", "/usr/bin/mysql_upgrade -uroot -p'" + os.environ['DBROOT'].replace("'", "'\\''") + "'\n"], user='mysql')
+ if sql_return.exit_code == 0:
+ matched = False
+ for line in sql_return.output.decode('utf-8').split("\n"):
+ if 'is already upgraded to' in line:
+ matched = True
+ if matched:
+ return jsonify(type='success', msg='mysql_upgrade: already upgraded', text=sql_return.output.decode('utf-8'))
+ else:
+ container.restart()
+ return jsonify(type='warning', msg='mysql_upgrade: upgrade was applied', text=sql_return.output.decode('utf-8'))
+ else:
+ return jsonify(type='error', msg='mysql_upgrade: error running command', text=sql_return.output.decode('utf-8'))
+
+ # api call: container_post - post_action: exec - cmd: system - task: mysql_tzinfo_to_sql
+ def container_post__exec__system__mysql_tzinfo_to_sql(self, container_id):
+ for container in docker_client.containers.list(filters={"id": container_id}):
+ sql_return = container.exec_run(["/bin/bash", "-c", "/usr/bin/mysql_tzinfo_to_sql /usr/share/zoneinfo | /bin/sed 's/Local time zone must be set--see zic manual page/FCTY/' | /usr/bin/mysql -uroot -p'" + os.environ['DBROOT'].replace("'", "'\\''") + "' mysql \n"], user='mysql')
+ if sql_return.exit_code == 0:
+ return jsonify(type='info', msg='mysql_tzinfo_to_sql: command completed successfully', text=sql_return.output.decode('utf-8'))
+ else:
+ return jsonify(type='error', msg='mysql_tzinfo_to_sql: error running command', text=sql_return.output.decode('utf-8'))
+
+ # api call: container_post - post_action: exec - cmd: reload - task: dovecot
+ def container_post__exec__reload__dovecot(self, container_id):
+ for container in docker_client.containers.list(filters={"id": container_id}):
+ reload_return = container.exec_run(["/bin/bash", "-c", "/usr/sbin/dovecot reload"])
+ return exec_run_handler('generic', reload_return)
+
+
+ # api call: container_post - post_action: exec - cmd: reload - task: postfix
+ def container_post__exec__reload__postfix(self, container_id):
+ for container in docker_client.containers.list(filters={"id": container_id}):
+ reload_return = container.exec_run(["/bin/bash", "-c", "/usr/sbin/postfix reload"])
+ return exec_run_handler('generic', reload_return)
+
+
+ # api call: container_post - post_action: exec - cmd: reload - task: nginx
+ def container_post__exec__reload__nginx(self, container_id):
+ for container in docker_client.containers.list(filters={"id": container_id}):
+ reload_return = container.exec_run(["/bin/sh", "-c", "/usr/sbin/nginx -s reload"])
+ return exec_run_handler('generic', reload_return)
+
+
+ # api call: container_post - post_action: exec - cmd: sieve - task: list
+ def container_post__exec__sieve__list(self, container_id):
+ if 'username' in request.json:
+ for container in docker_client.containers.list(filters={"id": container_id}):
+ sieve_return = container.exec_run(["/bin/bash", "-c", "/usr/bin/doveadm sieve list -u '" + request.json['username'].replace("'", "'\\''") + "'"])
+ return exec_run_handler('utf8_text_only', sieve_return)
+
+
+ # api call: container_post - post_action: exec - cmd: sieve - task: print
+ def container_post__exec__sieve__print(self, container_id):
+ if 'username' in request.json and 'script_name' in request.json:
+ for container in docker_client.containers.list(filters={"id": container_id}):
+ cmd = ["/bin/bash", "-c", "/usr/bin/doveadm sieve get -u '" + request.json['username'].replace("'", "'\\''") + "' '" + request.json['script_name'].replace("'", "'\\''") + "'"]
+ sieve_return = container.exec_run(cmd)
+ return exec_run_handler('utf8_text_only', sieve_return)
+
+
+ # api call: container_post - post_action: exec - cmd: maildir - task: cleanup
+ def container_post__exec__maildir__cleanup(self, container_id):
+ if 'maildir' in request.json:
+ for container in docker_client.containers.list(filters={"id": container_id}):
+ sane_name = re.sub(r'\W+', '', request.json['maildir'])
+ cmd = ["/bin/bash", "-c", "if [[ -d '/var/vmail/" + request.json['maildir'].replace("'", "'\\''") + "' ]]; then /bin/mv '/var/vmail/" + request.json['maildir'].replace("'", "'\\''") + "' '/var/vmail/_garbage/" + str(int(time.time())) + "_" + sane_name + "'; fi"]
+ maildir_cleanup = container.exec_run(cmd, user='vmail')
+ return exec_run_handler('generic', maildir_cleanup)
+
+
+
+ # api call: container_post - post_action: exec - cmd: rspamd - task: worker_password
+ def container_post__exec__rspamd__worker_password(self, container_id):
+ if 'raw' in request.json:
+ for container in docker_client.containers.list(filters={"id": container_id}):
+ cmd = "/usr/bin/rspamadm pw -e -p '" + request.json['raw'].replace("'", "'\\''") + "' 2> /dev/null"
+ cmd_response = exec_cmd_container(container, cmd, user="_rspamd")
+ matched = False
+ for line in cmd_response.split("\n"):
+ if '$2$' in line:
+ hash = line.strip()
+ hash_out = re.search('\$2\$.+$', hash).group(0)
+ rspamd_passphrase_hash = re.sub('[^0-9a-zA-Z\$]+', '', hash_out.rstrip())
+
+ rspamd_password_filename = "/etc/rspamd/override.d/worker-controller-password.inc"
+ cmd = '''/bin/echo 'enable_password = "%s";' > %s && cat %s''' % (rspamd_passphrase_hash, rspamd_password_filename, rspamd_password_filename)
+ cmd_response = exec_cmd_container(container, cmd, user="_rspamd")
+
+ if rspamd_passphrase_hash.startswith("$2$") and rspamd_passphrase_hash in cmd_response:
+ container.restart()
+ matched = True
+
+ if matched:
+ return jsonify(type='success', msg='command completed successfully')
+ else:
+ return jsonify(type='danger', msg='command did not complete')
+
+def exec_cmd_container(container, cmd, user, timeout=2, shell_cmd="/bin/bash"):
+
+ def recv_socket_data(c_socket, timeout):
+ c_socket.setblocking(0)
+ total_data=[];
+ data='';
+ begin=time.time()
+ while True:
+ if total_data and time.time()-begin > timeout:
+ break
+ elif time.time()-begin > timeout*2:
+ break
+ try:
+ data = c_socket.recv(8192)
+ if data:
+ total_data.append(data.decode('utf-8'))
+ #change the beginning time for measurement
+ begin=time.time()
+ else:
+ #sleep for sometime to indicate a gap
+ time.sleep(0.1)
+ break
+ except:
+ pass
+ return ''.join(total_data)
+
+ try :
+ socket = container.exec_run([shell_cmd], stdin=True, socket=True, user=user).output._sock
+ if not cmd.endswith("\n"):
+ cmd = cmd + "\n"
+ socket.send(cmd.encode('utf-8'))
+ data = recv_socket_data(socket, timeout)
+ socket.close()
+ return data
+
+ except Exception as e:
+ print("error - exec_cmd_container: %s" % str(e))
+ traceback.print_exc(file=sys.stdout)
+
+def exec_run_handler(type, output):
+ if type == 'generic':
+ if output.exit_code == 0:
+ return jsonify(type='success', msg='command completed successfully')
+ else:
+ return jsonify(type='danger', msg='command failed: ' + output.output.decode('utf-8'))
+ if type == 'utf8_text_only':
+ r = Response(response=output.output.decode('utf-8'), status=200, mimetype="text/plain")
+ r.headers["Content-Type"] = "text/plain; charset=utf-8"
+ return r
+
+class GracefulKiller:
+ kill_now = False
+ def __init__(self):
+ signal.signal(signal.SIGINT, self.exit_gracefully)
+ signal.signal(signal.SIGTERM, self.exit_gracefully)
+
+ def exit_gracefully(self, signum, frame):
+ self.kill_now = True
+
+def create_self_signed_cert():
+ process = subprocess.Popen(
+ "openssl req -x509 -newkey rsa:4096 -sha256 -days 3650 -nodes -keyout /app/dockerapi_key.pem -out /app/dockerapi_cert.pem -subj /CN=dockerapi/O=mailcow -addext subjectAltName=DNS:dockerapi".split(),
+ stdout = subprocess.PIPE, stderr = subprocess.PIPE, shell=False
+ )
+ process.wait()
+
+def startFlaskAPI():
+ create_self_signed_cert()
+ try:
+ ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
+ ctx.check_hostname = False
+ ctx.load_cert_chain(certfile='/app/dockerapi_cert.pem', keyfile='/app/dockerapi_key.pem')
+ except:
+ print ("Cannot initialize TLS, retrying in 5s...")
+ time.sleep(5)
+ app.run(debug=False, host='0.0.0.0', port=443, threaded=True, ssl_context=ctx)
+
+api.add_resource(containers_get, '/containers/json')
+api.add_resource(container_get, '/containers/<string:container_id>/json')
+api.add_resource(container_post, '/containers/<string:container_id>/<string:post_action>')
+
+if __name__ == '__main__':
+ api_thread = Thread(target=startFlaskAPI)
+ api_thread.daemon = True
+ api_thread.start()
+ killer = GracefulKiller()
+ while True:
+ time.sleep(1)
+ if killer.kill_now:
+ break
+ print ("Stopping dockerapi-mailcow")
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/Dockerfile b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/Dockerfile
new file mode 100644
index 0000000..691f9a7
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/Dockerfile
@@ -0,0 +1,126 @@
+FROM debian:buster-slim
+LABEL maintainer "Andre Peters <andre.peters@servercow.de>"
+
+ARG DEBIAN_FRONTEND=noninteractive
+ARG DOVECOT=2.3.11.3
+ENV LC_ALL C
+
+# Add groups and users before installing Dovecot to not break compatibility
+RUN groupadd -g 5000 vmail \
+ && groupadd -g 401 dovecot \
+ && groupadd -g 402 dovenull \
+ && groupadd -g 999 sogo \
+ && usermod -a -G sogo nobody \
+ && useradd -g vmail -u 5000 vmail -d /var/vmail \
+ && useradd -c "Dovecot unprivileged user" -d /dev/null -u 401 -g dovecot -s /bin/false dovecot \
+ && useradd -c "Dovecot login user" -d /dev/null -u 402 -g dovenull -s /bin/false dovenull \
+ && touch /etc/default/locale \
+ && apt-get update \
+ && apt-get -y --no-install-recommends install \
+ apt-transport-https \
+ ca-certificates \
+ cpanminus \
+ cron \
+ curl \
+ dnsutils \
+ dirmngr \
+ gettext \
+ gnupg2 \
+ jq \
+ libauthen-ntlm-perl \
+ libcgi-pm-perl \
+ libcrypt-openssl-rsa-perl \
+ libcrypt-ssleay-perl \
+ libdata-uniqid-perl \
+ libdbd-mysql-perl \
+ libdbi-perl \
+ libdigest-hmac-perl \
+ libdist-checkconflicts-perl \
+ libencode-imaputf7-perl \
+ libfile-copy-recursive-perl \
+ libfile-tail-perl \
+ libhtml-parser-perl \
+ libio-compress-perl \
+ libio-socket-inet6-perl \
+ libio-socket-ssl-perl \
+ libio-tee-perl \
+ libipc-run-perl \
+ libjson-webtoken-perl \
+ liblockfile-simple-perl \
+ libmail-imapclient-perl \
+ libmodule-implementation-perl \
+ libmodule-scandeps-perl \
+ libnet-ssleay-perl \
+ libpackage-stash-perl \
+ libpackage-stash-xs-perl \
+ libpar-packer-perl \
+ libparse-recdescent-perl \
+ libproc-processtable-perl \
+ libreadonly-perl \
+ libregexp-common-perl \
+ libsys-meminfo-perl \
+ libterm-readkey-perl \
+ libtest-deep-perl \
+ libtest-fatal-perl \
+ libtest-mock-guard-perl \
+ libtest-mockobject-perl \
+ libtest-nowarnings-perl \
+ libtest-pod-perl \
+ libtest-requires-perl \
+ libtest-simple-perl \
+ libtest-warn-perl \
+ libtry-tiny-perl \
+ libunicode-string-perl \
+ liburi-perl \
+ libwww-perl \
+ lua-sql-mysql \
+ mariadb-client \
+ procps \
+ python3-pip \
+ redis-server \
+ supervisor \
+ syslog-ng \
+ syslog-ng-core \
+ syslog-ng-mod-redis \
+ && apt-key adv --fetch-keys https://repo.dovecot.org/DOVECOT-REPO-GPG \
+ && echo "deb https://repo.dovecot.org/ce-${DOVECOT}/debian/buster buster main" > /etc/apt/sources.list.d/dovecot.list \
+ && apt-get update \
+ && apt-get -y --no-install-recommends install \
+ dovecot-lua \
+ dovecot-managesieved \
+ dovecot-sieve \
+ dovecot-lmtpd \
+ dovecot-ldap \
+ dovecot-mysql \
+ dovecot-core \
+ dovecot-pop3d \
+ dovecot-imapd \
+ dovecot-solr \
+ && pip3 install mysql-connector-python html2text jinja2 redis \
+ && apt-get autoremove --purge -y \
+ && apt-get autoclean \
+ && rm -rf /var/lib/apt/lists/* \
+ && rm -rf /tmp/* /var/tmp/* /etc/cron.daily/* /root/.cache/
+
+COPY trim_logs.sh /usr/local/bin/trim_logs.sh
+COPY clean_q_aged.sh /usr/local/bin/clean_q_aged.sh
+COPY syslog-ng.conf /etc/syslog-ng/syslog-ng.conf
+COPY syslog-ng-redis_slave.conf /etc/syslog-ng/syslog-ng-redis_slave.conf
+COPY imapsync /usr/local/bin/imapsync
+COPY postlogin.sh /usr/local/bin/postlogin.sh
+COPY imapsync_cron.pl /usr/local/bin/imapsync_cron.pl
+COPY report-spam.sieve /usr/lib/dovecot/sieve/report-spam.sieve
+COPY report-ham.sieve /usr/lib/dovecot/sieve/report-ham.sieve
+COPY rspamd-pipe-ham /usr/lib/dovecot/sieve/rspamd-pipe-ham
+COPY rspamd-pipe-spam /usr/lib/dovecot/sieve/rspamd-pipe-spam
+COPY sa-rules.sh /usr/local/bin/sa-rules.sh
+COPY maildir_gc.sh /usr/local/bin/maildir_gc.sh
+COPY docker-entrypoint.sh /
+COPY supervisord.conf /etc/supervisor/supervisord.conf
+COPY stop-supervisor.sh /usr/local/sbin/stop-supervisor.sh
+COPY quarantine_notify.py /usr/local/bin/quarantine_notify.py
+COPY quota_notify.py /usr/local/bin/quota_notify.py
+COPY repl_health.sh /usr/local/bin/repl_health.sh
+
+ENTRYPOINT ["/docker-entrypoint.sh"]
+CMD exec /usr/bin/supervisord -c /etc/supervisor/supervisord.conf
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/clean_q_aged.sh b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/clean_q_aged.sh
new file mode 100755
index 0000000..ef6b61f
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/clean_q_aged.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+source /source_env.sh
+
+MAX_AGE=$(redis-cli --raw -h redis-mailcow GET Q_MAX_AGE)
+
+if [[ -z ${MAX_AGE} ]]; then
+ echo "Max age for quarantine items not defined"
+ exit 1
+fi
+
+NUM_REGEXP='^[0-9]+$'
+if ! [[ ${MAX_AGE} =~ ${NUM_REGEXP} ]] ; then
+ echo "Max age for quarantine items invalid"
+ exit 1
+fi
+
+TO_DELETE=$(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT COUNT(id) FROM quarantine WHERE created < NOW() - INTERVAL ${MAX_AGE//[!0-9]/} DAY" -BN)
+mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "DELETE FROM quarantine WHERE created < NOW() - INTERVAL ${MAX_AGE//[!0-9]/} DAY"
+echo "Deleted ${TO_DELETE} items from quarantine table (max age is ${MAX_AGE//[!0-9]/} days)"
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/docker-entrypoint.sh b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/docker-entrypoint.sh
new file mode 100755
index 0000000..9c626fa
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/docker-entrypoint.sh
@@ -0,0 +1,396 @@
+#!/bin/bash
+set -e
+
+# Wait for MySQL to warm-up
+while ! mysqladmin status --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
+ echo "Waiting for database to come up..."
+ sleep 2
+done
+
+until dig +short mailcow.email @unbound > /dev/null; do
+ echo "Waiting for DNS..."
+ sleep 1
+done
+
+# Do not attempt to write to slave
+if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
+ REDIS_CMDLINE="redis-cli -h ${REDIS_SLAVEOF_IP} -p ${REDIS_SLAVEOF_PORT}"
+else
+ REDIS_CMDLINE="redis-cli -h redis -p 6379"
+fi
+
+until [[ $(${REDIS_CMDLINE} PING) == "PONG" ]]; do
+ echo "Waiting for Redis..."
+ sleep 2
+done
+
+${REDIS_CMDLINE} SET DOVECOT_REPL_HEALTH 1 > /dev/null
+
+# Create missing directories
+[[ ! -d /etc/dovecot/sql/ ]] && mkdir -p /etc/dovecot/sql/
+[[ ! -d /etc/dovecot/lua/ ]] && mkdir -p /etc/dovecot/lua/
+[[ ! -d /var/vmail/_garbage ]] && mkdir -p /var/vmail/_garbage
+[[ ! -d /var/vmail/sieve ]] && mkdir -p /var/vmail/sieve
+[[ ! -d /etc/sogo ]] && mkdir -p /etc/sogo
+[[ ! -d /var/volatile ]] && mkdir -p /var/volatile
+
+# Set Dovecot sql config parameters, escape " in db password
+DBPASS=$(echo ${DBPASS} | sed 's/"/\\"/g')
+
+# Create quota dict for Dovecot
+if [[ "${MASTER}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
+ QUOTA_TABLE=quota2
+else
+ QUOTA_TABLE=quota2replica
+fi
+cat <<EOF > /etc/dovecot/sql/dovecot-dict-sql-quota.conf
+# Autogenerated by mailcow
+connect = "host=/var/run/mysqld/mysqld.sock dbname=${DBNAME} user=${DBUSER} password=${DBPASS}"
+map {
+ pattern = priv/quota/storage
+ table = ${QUOTA_TABLE}
+ username_field = username
+ value_field = bytes
+}
+map {
+ pattern = priv/quota/messages
+ table = ${QUOTA_TABLE}
+ username_field = username
+ value_field = messages
+}
+EOF
+
+# Write last logins to Redis
+if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
+ cp /etc/syslog-ng/syslog-ng-redis_slave.conf /etc/syslog-ng/syslog-ng.conf
+ echo -n "redis:host=${REDIS_SLAVEOF_IP}:port=${REDIS_SLAVEOF_PORT}" > /etc/dovecot/last_login
+else
+ echo -n "redis:host=${IPV4_NETWORK}.249:port=6379" > /etc/dovecot/last_login
+fi
+
+# Create dict used for sieve pre and postfilters
+cat <<EOF > /etc/dovecot/sql/dovecot-dict-sql-sieve_before.conf
+# Autogenerated by mailcow
+connect = "host=/var/run/mysqld/mysqld.sock dbname=${DBNAME} user=${DBUSER} password=${DBPASS}"
+map {
+ pattern = priv/sieve/name/\$script_name
+ table = sieve_before
+ username_field = username
+ value_field = id
+ fields {
+ script_name = \$script_name
+ }
+}
+map {
+ pattern = priv/sieve/data/\$id
+ table = sieve_before
+ username_field = username
+ value_field = script_data
+ fields {
+ id = \$id
+ }
+}
+EOF
+
+cat <<EOF > /etc/dovecot/sql/dovecot-dict-sql-sieve_after.conf
+# Autogenerated by mailcow
+connect = "host=/var/run/mysqld/mysqld.sock dbname=${DBNAME} user=${DBUSER} password=${DBPASS}"
+map {
+ pattern = priv/sieve/name/\$script_name
+ table = sieve_after
+ username_field = username
+ value_field = id
+ fields {
+ script_name = \$script_name
+ }
+}
+map {
+ pattern = priv/sieve/data/\$id
+ table = sieve_after
+ username_field = username
+ value_field = script_data
+ fields {
+ id = \$id
+ }
+}
+EOF
+
+echo -n ${ACL_ANYONE} > /etc/dovecot/acl_anyone
+
+if [[ "${SKIP_SOLR}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
+echo -n 'quota acl zlib listescape mail_crypt mail_crypt_acl mail_log notify replication last_login' > /etc/dovecot/mail_plugins
+echo -n 'quota imap_quota imap_acl acl zlib imap_zlib imap_sieve listescape mail_crypt mail_crypt_acl notify replication mail_log last_login' > /etc/dovecot/mail_plugins_imap
+echo -n 'quota sieve acl zlib listescape mail_crypt mail_crypt_acl notify replication' > /etc/dovecot/mail_plugins_lmtp
+else
+echo -n 'quota acl zlib listescape mail_crypt mail_crypt_acl mail_log notify fts fts_solr replication last_login' > /etc/dovecot/mail_plugins
+echo -n 'quota imap_quota imap_acl acl zlib imap_zlib imap_sieve listescape mail_crypt mail_crypt_acl notify mail_log fts fts_solr replication last_login' > /etc/dovecot/mail_plugins_imap
+echo -n 'quota sieve acl zlib listescape mail_crypt mail_crypt_acl fts fts_solr notify replication' > /etc/dovecot/mail_plugins_lmtp
+fi
+chmod 644 /etc/dovecot/mail_plugins /etc/dovecot/mail_plugins_imap /etc/dovecot/mail_plugins_lmtp /templates/quarantine.tpl
+
+cat <<EOF > /etc/dovecot/sql/dovecot-dict-sql-userdb.conf
+# Autogenerated by mailcow
+driver = mysql
+connect = "host=/var/run/mysqld/mysqld.sock dbname=${DBNAME} user=${DBUSER} password=${DBPASS}"
+user_query = SELECT CONCAT(JSON_UNQUOTE(JSON_VALUE(attributes, '$.mailbox_format')), mailbox_path_prefix, '%d/%n/${MAILDIR_SUB}:VOLATILEDIR=/var/volatile/%u:INDEX=/var/vmail_index/%u') AS mail, '%s' AS protocol, 5000 AS uid, 5000 AS gid, concat('*:bytes=', quota) AS quota_rule FROM mailbox WHERE username = '%u' AND (active = '1' OR active = '2')
+iterate_query = SELECT username FROM mailbox WHERE active = '1' OR active = '2';
+EOF
+
+# Create pass dict for Dovecot
+cat <<EOF > /etc/dovecot/sql/dovecot-dict-sql-passdb.conf
+# Autogenerated by mailcow
+driver = mysql
+connect = "host=/var/run/mysqld/mysqld.sock dbname=${DBNAME} user=${DBUSER} password=${DBPASS}"
+default_pass_scheme = ${MAILCOW_PASS_SCHEME}
+password_query = SELECT password FROM mailbox WHERE active = '1' AND username = '%u' AND domain IN (SELECT domain FROM domain WHERE domain='%d' AND active='1') AND JSON_UNQUOTE(JSON_VALUE(attributes, '$.force_pw_update')) != '1' AND (JSON_UNQUOTE(JSON_VALUE(attributes, '$.%s_access')) = '1' OR ('%s' != 'imap' AND '%s' != 'pop3'))
+EOF
+
+cat <<EOF > /etc/dovecot/lua/app-passdb.lua
+function auth_password_verify(req, pass)
+ if req.domain == nil then
+ return dovecot.auth.PASSDB_RESULT_USER_UNKNOWN, "No such user"
+ end
+ if cur == nil then
+ script_init()
+ end
+ local cur,errorString = con:execute(string.format([[SELECT mailbox, password FROM app_passwd
+ WHERE mailbox = '%s'
+ AND active = '1'
+ AND domain IN (SELECT domain FROM domain WHERE domain='%s' AND active='1')]], con:escape(req.user), con:escape(req.domain)))
+ local row = cur:fetch ({}, "a")
+ while row do
+ if req.password_verify(req, row.password, pass) == 1 then
+ cur:close()
+ return dovecot.auth.PASSDB_RESULT_OK, "password=" .. pass
+ end
+ row = cur:fetch (row, "a")
+ end
+ return dovecot.auth.PASSDB_RESULT_USER_UNKNOWN, "No such user"
+end
+
+function auth_passdb_lookup(req)
+ return dovecot.auth.PASSDB_RESULT_USER_UNKNOWN, ""
+end
+
+function script_init()
+ mysql = require "luasql.mysql"
+ env = mysql.mysql()
+ con = env:connect("__DBNAME__","__DBUSER__","__DBPASS__","localhost")
+ return 0
+end
+
+function script_deinit()
+ con:close()
+ env:close()
+end
+EOF
+
+# Migrate old sieve_after file
+[[ -f /etc/dovecot/sieve_after ]] && mv /etc/dovecot/sieve_after /etc/dovecot/global_sieve_after
+# Create global sieve scripts
+cat /etc/dovecot/global_sieve_after > /var/vmail/sieve/global_sieve_after.sieve
+cat /etc/dovecot/global_sieve_before > /var/vmail/sieve/global_sieve_before.sieve
+
+# Check permissions of vmail/index/garbage directories.
+# Do not do this every start-up, it may take a very long time. So we use a stat check here.
+if [[ $(stat -c %U /var/vmail/) != "vmail" ]] ; then chown -R vmail:vmail /var/vmail ; fi
+if [[ $(stat -c %U /var/vmail/_garbage) != "vmail" ]] ; then chown -R vmail:vmail /var/vmail/_garbage ; fi
+if [[ $(stat -c %U /var/vmail_index) != "vmail" ]] ; then chown -R vmail:vmail /var/vmail_index ; fi
+
+# Cleanup random user maildirs
+rm -rf /var/vmail/mailcow.local/*
+
+# create sni configuration
+echo "" > /etc/dovecot/sni.conf
+for cert_dir in /etc/ssl/mail/*/ ; do
+ if [[ ! -f ${cert_dir}domains ]] || [[ ! -f ${cert_dir}cert.pem ]] || [[ ! -f ${cert_dir}key.pem ]]; then
+ continue
+ fi
+ domains=($(cat ${cert_dir}domains))
+ for domain in ${domains[@]}; do
+ echo 'local_name '${domain}' {' >> /etc/dovecot/sni.conf;
+ echo ' ssl_cert = <'${cert_dir}'cert.pem' >> /etc/dovecot/sni.conf;
+ echo ' ssl_key = <'${cert_dir}'key.pem' >> /etc/dovecot/sni.conf;
+ echo '}' >> /etc/dovecot/sni.conf;
+ done
+done
+
+# Create random master for SOGo sieve features
+RAND_USER=$(cat /dev/urandom | tr -dc 'a-z0-9' | fold -w 16 | head -n 1)
+RAND_PASS=$(cat /dev/urandom | tr -dc 'a-z0-9' | fold -w 24 | head -n 1)
+
+if [[ ! -z ${DOVECOT_MASTER_USER} ]] && [[ ! -z ${DOVECOT_MASTER_PASS} ]]; then
+ RAND_USER=${DOVECOT_MASTER_USER}
+ RAND_PASS=${DOVECOT_MASTER_PASS}
+fi
+echo ${RAND_USER}@mailcow.local:{SHA1}$(echo -n ${RAND_PASS} | sha1sum | awk '{print $1}'):::::: > /etc/dovecot/dovecot-master.passwd
+echo ${RAND_USER}@mailcow.local::5000:5000:::: > /etc/dovecot/dovecot-master.userdb
+echo ${RAND_USER}@mailcow.local:${RAND_PASS} > /etc/sogo/sieve.creds
+
+if [[ -z ${MAILDIR_SUB} ]]; then
+ MAILDIR_SUB_SHARED=
+else
+ MAILDIR_SUB_SHARED=/${MAILDIR_SUB}
+fi
+cat <<EOF > /etc/dovecot/shared_namespace.conf
+# Autogenerated by mailcow
+namespace {
+ type = shared
+ separator = /
+ prefix = Shared/%%u/
+ location = maildir:%%h${MAILDIR_SUB_SHARED}:INDEX=~${MAILDIR_SUB_SHARED}/Shared/%%u
+ subscriptions = no
+ list = children
+}
+EOF
+
+cat <<EOF > /etc/dovecot/sogo_trusted_ip.conf
+# Autogenerated by mailcow
+remote ${IPV4_NETWORK}.248 {
+ disable_plaintext_auth = no
+}
+EOF
+
+if [[ "${ALLOW_ADMIN_EMAIL_LOGIN}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
+ # Create random master Password for SOGo 'login as user' via proxy auth
+ RAND_PASS=$(cat /dev/urandom | tr -dc 'a-z0-9' | fold -w 32 | head -n 1)
+ echo -n ${RAND_PASS} > /etc/phpfpm/sogo-sso.pass
+ cat <<EOF > /etc/dovecot/sogo-sso.conf
+# Autogenerated by mailcow
+passdb {
+ driver = static
+ args = allow_real_nets=${IPV4_NETWORK}.248/32 password={plain}${RAND_PASS}
+}
+EOF
+else
+ rm -f /etc/dovecot/sogo-sso.pass
+ rm -f /etc/dovecot/sogo-sso.conf
+fi
+
+# Hard-code env vars to scripts due to cron not passing them to the scripts
+sed -i "s/__DBUSER__/${DBUSER}/g" /usr/local/bin/imapsync_cron.pl /usr/local/bin/quarantine_notify.py /usr/local/bin/clean_q_aged.sh /etc/dovecot/lua/app-passdb.lua
+sed -i "s/__DBPASS__/${DBPASS}/g" /usr/local/bin/imapsync_cron.pl /usr/local/bin/quarantine_notify.py /usr/local/bin/clean_q_aged.sh /etc/dovecot/lua/app-passdb.lua
+sed -i "s/__DBNAME__/${DBNAME}/g" /usr/local/bin/imapsync_cron.pl /usr/local/bin/quarantine_notify.py /usr/local/bin/clean_q_aged.sh /etc/dovecot/lua/app-passdb.lua
+sed -i "s/__MAILCOW_HOSTNAME__/${MAILCOW_HOSTNAME}/g" /usr/local/bin/quarantine_notify.py
+sed -i "s/__LOG_LINES__/${LOG_LINES}/g" /usr/local/bin/trim_logs.sh
+if [[ "${MASTER}" =~ ^([nN][oO]|[nN])+$ ]]; then
+# Toggling MASTER will result in a rebuild of containers, so the quota script will be recreated
+cat <<'EOF' > /usr/local/bin/quota_notify.py
+#!/usr/bin/python3
+import sys
+sys.exit()
+EOF
+fi
+
+# 401 is user dovecot
+if [[ ! -s /mail_crypt/ecprivkey.pem || ! -s /mail_crypt/ecpubkey.pem ]]; then
+ openssl ecparam -name prime256v1 -genkey | openssl pkey -out /mail_crypt/ecprivkey.pem
+ openssl pkey -in /mail_crypt/ecprivkey.pem -pubout -out /mail_crypt/ecpubkey.pem
+ chown 401 /mail_crypt/ecprivkey.pem /mail_crypt/ecpubkey.pem
+else
+ chown 401 /mail_crypt/ecprivkey.pem /mail_crypt/ecpubkey.pem
+fi
+
+# Compile sieve scripts
+sievec /var/vmail/sieve/global_sieve_before.sieve
+sievec /var/vmail/sieve/global_sieve_after.sieve
+sievec /usr/lib/dovecot/sieve/report-spam.sieve
+sievec /usr/lib/dovecot/sieve/report-ham.sieve
+
+# Fix permissions
+chown root:root /etc/dovecot/sql/*.conf
+chown root:dovecot /etc/dovecot/sql/dovecot-dict-sql-sieve* /etc/dovecot/sql/dovecot-dict-sql-quota* /etc/dovecot/lua/app-passdb.lua
+chmod 640 /etc/dovecot/sql/*.conf /etc/dovecot/lua/app-passdb.lua
+chown -R vmail:vmail /var/vmail/sieve
+chown -R vmail:vmail /var/volatile
+chown -R vmail:vmail /var/vmail_index
+adduser vmail tty
+chmod g+rw /dev/console
+chown root:tty /dev/console
+chmod +x /usr/lib/dovecot/sieve/rspamd-pipe-ham \
+ /usr/lib/dovecot/sieve/rspamd-pipe-spam \
+ /usr/local/bin/imapsync_cron.pl \
+ /usr/local/bin/postlogin.sh \
+ /usr/local/bin/imapsync \
+ /usr/local/bin/trim_logs.sh \
+ /usr/local/bin/sa-rules.sh \
+ /usr/local/bin/clean_q_aged.sh \
+ /usr/local/bin/maildir_gc.sh \
+ /usr/local/sbin/stop-supervisor.sh \
+ /usr/local/bin/quota_notify.py \
+ /usr/local/bin/repl_health.sh
+
+if [[ "${MASTER}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
+# Setup cronjobs
+echo '* * * * * nobody /usr/local/bin/imapsync_cron.pl 2>&1 | /usr/bin/logger' > /etc/cron.d/imapsync
+#echo '30 3 * * * vmail /usr/local/bin/doveadm quota recalc -A' > /etc/cron.d/dovecot-sync
+echo '* * * * * vmail /usr/local/bin/trim_logs.sh >> /dev/console 2>&1' > /etc/cron.d/trim_logs
+echo '25 * * * * vmail /usr/local/bin/maildir_gc.sh >> /dev/console 2>&1' > /etc/cron.d/maildir_gc
+echo '30 1 * * * root /usr/local/bin/sa-rules.sh >> /dev/console 2>&1' > /etc/cron.d/sa-rules
+echo '0 2 * * * root /usr/bin/curl http://solr:8983/solr/dovecot-fts/update?optimize=true >> /dev/console 2>&1' > /etc/cron.d/solr-optimize
+echo '*/20 * * * * vmail /usr/local/bin/quarantine_notify.py >> /dev/console 2>&1' > /etc/cron.d/quarantine_notify
+echo '15 4 * * * vmail /usr/local/bin/clean_q_aged.sh >> /dev/console 2>&1' > /etc/cron.d/clean_q_aged
+echo '*/5 * * * * vmail /usr/local/bin/repl_health.sh >> /dev/console 2>&1' > /etc/cron.d/repl_health
+else
+echo '25 * * * * vmail /usr/local/bin/maildir_gc.sh >> /dev/console 2>&1' > /etc/cron.d/maildir_gc
+echo '30 1 * * * root /usr/local/bin/sa-rules.sh >> /dev/console 2>&1' > /etc/cron.d/sa-rules
+echo '0 2 * * * root /usr/bin/curl http://solr:8983/solr/dovecot-fts/update?optimize=true >> /dev/console 2>&1' > /etc/cron.d/solr-optimize
+echo '*/5 * * * * vmail /usr/local/bin/repl_health.sh >> /dev/console 2>&1' > /etc/cron.d/repl_health
+fi
+
+# Fix more than 1 hardlink issue
+touch /etc/crontab /etc/cron.*/*
+
+# Prepare environment file for cronjobs
+printenv | sed 's/^\(.*\)$/export \1/g' > /source_env.sh
+
+# Clean old PID if any
+[[ -f /var/run/dovecot/master.pid ]] && rm /var/run/dovecot/master.pid
+
+# Clean stopped imapsync jobs
+rm -f /tmp/imapsync_busy.lock
+IMAPSYNC_TABLE=$(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SHOW TABLES LIKE 'imapsync'" -Bs)
+[[ ! -z ${IMAPSYNC_TABLE} ]] && mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "UPDATE imapsync SET is_running='0'"
+
+# Envsubst maildir_gc
+echo "$(envsubst < /usr/local/bin/maildir_gc.sh)" > /usr/local/bin/maildir_gc.sh
+
+# GUID generation
+while [[ ${VERSIONS_OK} != 'OK' ]]; do
+ if [[ ! -z $(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -B -e "SELECT 'OK' FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = \"${DBNAME}\" AND TABLE_NAME = 'versions'") ]]; then
+ VERSIONS_OK=OK
+ else
+ echo "Waiting for versions table to be created..."
+ sleep 3
+ fi
+done
+PUBKEY_MCRYPT=$(doveconf -P 2> /dev/null | grep -i mail_crypt_global_public_key | cut -d '<' -f2)
+if [ -f ${PUBKEY_MCRYPT} ]; then
+ GUID=$(cat <(echo ${MAILCOW_HOSTNAME}) /mail_crypt/ecpubkey.pem | sha256sum | cut -d ' ' -f1 | tr -cd "[a-fA-F0-9.:/] ")
+ if [ ${#GUID} -eq 64 ]; then
+ mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} << EOF
+REPLACE INTO versions (application, version) VALUES ("GUID", "${GUID}");
+EOF
+ else
+ mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} << EOF
+REPLACE INTO versions (application, version) VALUES ("GUID", "INVALID");
+EOF
+ fi
+fi
+
+# Collect SA rules once now
+/usr/local/bin/sa-rules.sh
+
+# Run hooks
+for file in /hooks/*; do
+ if [ -x "${file}" ]; then
+ echo "Running hook ${file}"
+ "${file}"
+ fi
+done
+
+# For some strange, unknown and stupid reason, Dovecot may run into a race condition, when this file is not touched before it is read by dovecot/auth
+# May be related to something inside Docker, I seriously don't know
+touch /etc/dovecot/lua/app-passdb.lua
+
+exec "$@"
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/imapsync b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/imapsync
new file mode 100755
index 0000000..4c941f4
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/imapsync
@@ -0,0 +1,17261 @@
+#!/usr/bin/env perl
+
+# $Id: imapsync,v 1.977 2019/12/23 20:18:02 gilles Exp gilles $
+# structure
+# pod documentation
+# use pragmas
+# main program
+# global variables initialization
+# get_options( ) ;
+# default values
+# folder loop
+# subroutines
+# sub usage
+
+
+# pod documentation
+
+=pod
+
+=head1 NAME
+
+imapsync - Email IMAP tool for syncing, copying, migrating
+and archiving email mailboxes between two imap servers, one way,
+and without duplicates.
+
+=head1 VERSION
+
+This documentation refers to Imapsync $Revision: 1.977 $
+
+=head1 USAGE
+
+ To synchronize the source imap account
+ "test1" on server "test1.lamiral.info" with password "secret1"
+ to the destination imap account
+ "test2" on server "test2.lamiral.info" with password "secret2"
+ do:
+
+ imapsync \
+ --host1 test1.lamiral.info --user1 test1 --password1 secret1 \
+ --host2 test2.lamiral.info --user2 test2 --password2 secret2
+
+=head1 DESCRIPTION
+
+We sometimes need to transfer mailboxes from one imap server to
+one another.
+
+Imapsync command is a tool allowing incremental and
+recursive imap transfers from one mailbox to another.
+If you don't understand the previous sentence, it's normal,
+it's pedantic computer oriented jargon.
+
+All folders are transferred, recursively, meaning
+the whole folder hierarchy is taken, all messages in them,
+and all messages flags (\Seen \Answered \Flagged etc.)
+are synced too.
+
+Imapsync reduces the amount of data transferred by not transferring
+a given message if it already resides on the destination side.
+Messages that are on the destination side but not on the
+source side stay as they are (see the --delete2
+option to have a strict sync).
+
+How imapsync knows a message is already on both sides?
+Same specific headers and the transfer is done only once.
+By default, the identification headers are
+"Message-Id:" and "Received:" lines
+but this choice can be changed with the --useheader option.
+
+All flags are preserved, unread messages will stay unread,
+read ones will stay read, deleted will stay deleted.
+
+You can abort the transfer at any time and restart it later,
+imapsync works well with bad connections and interruptions,
+by design. On a terminal hit Ctr-c twice within two seconds
+in order to abort the program. Hit Ctr-c just once makes
+imapsync reconnect to both imap servers.
+
+A classical scenario is synchronizing a mailbox B from another mailbox A
+where you just want to keep a strict copy of A in B. Strict meaning
+all messages in A will be in B but no more.
+
+For this, option --delete2 has to be used, it deletes messages in host2
+folder B that are not in host1 folder A. If you also need to destroy
+host2 folders that are not in host1 then use --delete2folders. See also
+--delete2foldersonly and --delete2foldersbutnot to set up exceptions
+on folders to destroy. INBOX will never be destroy, it's a mandatory
+folder in IMAP.
+
+A different scenario is to delete the messages from the source mailbox
+after a successful transfer, it can be a good feature when migrating
+mailboxes since messages will be only on one side. The source account
+will only have messages that are not on the destination yet, ie,
+messages that arrived after a sync or that failed to be copied.
+
+In that case, use the --delete1 option. Option --delete1 implies also
+option --expunge1 so all messages marked deleted on host1 will be really
+deleted. In IMAP protocol deleting a message does not really delete it,
+it marks it with the flag \Deleted, allowing an undelete. Expunging
+a folder removes, definitively, all the messages marked as \Deleted
+in this folder.
+
+You can also decide to remove empty folders once all of their messages
+have been transferred. Add --delete1emptyfolders to obtain this
+behavior.
+
+
+Imapsync is not adequate for maintaining two active imap accounts
+in synchronization when the user plays independently on both sides.
+Use offlineimap (written by John Goerzen) or mbsync (written by
+Michael R. Elkins) for a 2 ways synchronization.
+
+
+=head1 OPTIONS
+
+ usage: imapsync [options]
+
+The standard options are the six values forming the credentials.
+Three values on each side are needed in order to log in into the IMAP
+servers. These six values are a host, a username, and a password, two times.
+
+Conventions used in the following descriptions of the options:
+
+ str means string
+ int means integer
+ reg means regular expression
+ cmd means command
+
+ --dry : Makes imapsync doing nothing for real, just print what
+ would be done without --dry.
+
+=head2 OPTIONS/credentials
+
+
+ --host1 str : Source or "from" imap server.
+ --port1 int : Port to connect on host1.
+ Optional since default ports are the
+ well known ports imap/143 or imaps/993.
+ --user1 str : User to login on host1.
+ --password1 str : Password for the user1.
+
+ --host2 str : "destination" imap server.
+ --port2 int : Port to connect on host2. Optional
+ --user2 str : User to login on host2.
+ --password2 str : Password for the user2.
+
+ --showpasswords : Shows passwords on output instead of "MASKED".
+ Useful to restart a complete run by just reading
+ the command line used in the log,
+ or to debug passwords.
+ It's not a secure practice at all.
+
+ --passfile1 str : Password file for the user1. It must contain the
+ password on the first line. This option avoids showing
+ the password on the command line like --password1 does.
+ --passfile2 str : Password file for the user2.
+
+You can also pass the passwords in the environment variables
+IMAPSYNC_PASSWORD1 and IMAPSYNC_PASSWORD2
+
+=head2 OPTIONS/encryption
+
+ --nossl1 : Do not use a SSL connection on host1.
+ --ssl1 : Use a SSL connection on host1. On by default if possible.
+
+ --nossl2 : Do not use a SSL connection on host2.
+ --ssl2 : Use a SSL connection on host2. On by default if possible.
+
+ --notls1 : Do not use a TLS connection on host1.
+ --tls1 : Use a TLS connection on host1. On by default if possible.
+
+ --notls2 : Do not use a TLS connection on host2.
+ --tls2 : Use a TLS connection on host2. On by default if possible.
+
+ --debugssl int : SSL debug mode from 0 to 4.
+
+ --sslargs1 str : Pass any ssl parameter for host1 ssl or tls connection. Example:
+ --sslargs1 SSL_verify_mode=1 --sslargs1 SSL_version=SSLv3
+ See all possibilities in the new() method of IO::Socket::SSL
+ http://search.cpan.org/perldoc?IO::Socket::SSL#Description_Of_Methods
+ --sslargs2 str : Pass any ssl parameter for host2 ssl or tls connection.
+ See --sslargs1
+
+ --timeout1 int : Connection timeout in seconds for host1.
+ Default is 120 and 0 means no timeout at all.
+ --timeout2 int : Connection timeout in seconds for host2.
+ Default is 120 and 0 means no timeout at all.
+
+
+=head2 OPTIONS/authentication
+
+ --authmech1 str : Auth mechanism to use with host1:
+ PLAIN, LOGIN, CRAM-MD5 etc. Use UPPERCASE.
+ --authmech2 str : Auth mechanism to use with host2. See --authmech1
+
+ --authuser1 str : User to auth with on host1 (admin user).
+ Avoid using --authmech1 SOMETHING with --authuser1.
+ --authuser2 str : User to auth with on host2 (admin user).
+ --proxyauth1 : Use proxyauth on host1. Requires --authuser1.
+ Required by Sun/iPlanet/Netscape IMAP servers to
+ be able to use an administrative user.
+ --proxyauth2 : Use proxyauth on host2. Requires --authuser2.
+
+ --authmd51 : Use MD5 authentication for host1.
+ --authmd52 : Use MD5 authentication for host2.
+ --domain1 str : Domain on host1 (NTLM authentication).
+ --domain2 str : Domain on host2 (NTLM authentication).
+
+
+=head2 OPTIONS/folders
+
+
+ --folder str : Sync this folder.
+ --folder str : and this one, etc.
+ --folderrec str : Sync this folder recursively.
+ --folderrec str : and this one, etc.
+
+ --folderfirst str : Sync this folder first. Ex. --folderfirst "INBOX"
+ --folderfirst str : then this one, etc.
+ --folderlast str : Sync this folder last. --folderlast "[Gmail]/All Mail"
+ --folderlast str : then this one, etc.
+
+ --nomixfolders : Do not merge folders when host1 is case-sensitive
+ while host2 is not (like Exchange). Only the first
+ similar folder is synced (example: with folders
+ "Sent", "SENT" and "sent"
+ on host1 only "Sent" will be synced to host2).
+
+ --skipemptyfolders : Empty host1 folders are not created on host2.
+
+ --include reg : Sync folders matching this regular expression
+ --include reg : or this one, etc.
+ If both --include --exclude options are used, then
+ include is done before.
+ --exclude reg : Skips folders matching this regular expression
+ Several folders to avoid:
+ --exclude 'fold1|fold2|f3' skips fold1, fold2 and f3.
+ --exclude reg : or this one, etc.
+
+ --automap : guesses folders mapping, for folders well known as
+ "Sent", "Junk", "Drafts", "All", "Archive", "Flagged".
+
+ --f1f2 str1=str2 : Force folder str1 to be synced to str2,
+ --f1f2 overrides --automap and --regextrans2.
+
+ --subfolder2 str : Syncs the whole host1 folders hierarchy under the
+ host2 folder named str.
+ It does it internally by adding three
+ --regextrans2 options before all others.
+ Add --debug to see what's really going on.
+
+ --subfolder1 str : Syncs the host1 folders hierarchy which is under folder
+ str to the root hierarchy of host2.
+ It's the couterpart of a sync done by --subfolder2
+ when doing it in the reverse order.
+ Backup/Restore scenario:
+ Use --subfolder2 str for a backup to the folder str
+ on host2. Then use --subfolder1 str for restoring
+ from the folder str, after inverting
+ host1/host2 user1/user2 values.
+
+
+ --subscribed : Transfers subscribed folders.
+ --subscribe : Subscribe to the folders transferred on the
+ host2 that are subscribed on host1. On by default.
+ --subscribeall : Subscribe to the folders transferred on the
+ host2 even if they are not subscribed on host1.
+
+ --prefix1 str : Remove prefix str to all destination folders,
+ usually "INBOX." or "INBOX/" or an empty string "".
+ imapsync guesses the prefix if host1 imap server
+ does not have NAMESPACE capability. So this option
+ should not be used most of the time.
+ --prefix2 str : Add prefix to all host2 folders. See --prefix1
+
+ --sep1 str : Host1 separator. This option should not be used
+ most of the time.
+ Imapsync gets the separator from the server itself,
+ by using NAMESPACE, or it tries to guess it
+ from the folders listing (it counts
+ characters / . \\ \ in folder names and choose the
+ more frequent, or finally / if nothing is found.
+ --sep2 str : Host2 separator. See --sep1
+
+ --regextrans2 reg : Apply the whole regex to each destination folders.
+ --regextrans2 reg : and this one. etc.
+ When you play with the --regextrans2 option, first
+ add also the safe options --dry --justfolders
+ Then, when happy, remove --dry for a run, then
+ remove --justfolders for the next ones.
+ Have in mind that --regextrans2 is applied after
+ the automatic prefix and separator inversion.
+ For examples see:
+ https://imapsync.lamiral.info/FAQ.d/FAQ.Folders_Mapping.txt
+
+=head2 OPTIONS/folders sizes
+
+ --nofoldersizes : Do not calculate the size of each folder at the
+ beginning of the sync. Default is to calculate them.
+ --nofoldersizesatend: Do not calculate the size of each folder at the
+ end of the sync. Default is to calculate them.
+ --justfoldersizes : Exit after having printed the initial folder sizes.
+
+
+=head2 OPTIONS/tmp
+
+
+ --tmpdir str : Where to store temporary files and subdirectories.
+ Will be created if it doesn't exist.
+ Default is system specific, Unix is /tmp but
+ /tmp is often too small and deleted at reboot.
+ --tmpdir /var/tmp should be better.
+ --pidfile str : The file where imapsync pid is written,
+ it can be dirname/filename.
+ Default name is imapsync.pid in tmpdir.
+ --pidfilelocking : Abort if pidfile already exists. Useful to avoid
+ concurrent transfers on the same mailbox.
+
+
+=head2 OPTIONS/log
+
+ --nolog : Turn off logging on file
+ --logfile str : Change the default log filename (can be dirname/filename).
+ --logdir str : Change the default log directory. Default is LOG_imapsync/
+
+The default logfile name is for example
+
+ LOG_imapsync/2019_12_22_23_57_59_532_user1_user2.txt
+
+where:
+
+ 2019_12_22_23_57_59_532 is nearly the date of the start
+ YYYY_MM_DD_HH_MM_SS_mmm
+ year_month_day_hour_minute_seconde_millisecond
+
+and user1 user2 are the --user1 --user2 values.
+
+=head2 OPTIONS/messages
+
+ --skipmess reg : Skips messages matching the regex.
+ Example: 'm/[\x80-ff]/' # to avoid 8bits messages.
+ --skipmess is applied before --regexmess
+ --skipmess reg : or this one, etc.
+
+ --skipcrossduplicates : Avoid copying messages that are already copied
+ in another folder, good from Gmail to X when
+ X is not also Gmail.
+ Activated with --gmail1 unless --noskipcrossduplicates
+
+ --debugcrossduplicates : Prints which messages (UIDs) are skipped with
+ --skipcrossduplicates (and in what other folders
+ they are).
+
+ --pipemess cmd : Apply this cmd command to each message content
+ before the copy.
+ --pipemess cmd : and this one, etc.
+ With several --pipemess, the output of each cmd
+ command (STDOUT) is given to the input (STDIN)
+ of the next command.
+ For example,
+ --pipemess cmd1 --pipemess cmd2 --pipemess cmd3
+ is like a Unix pipe:
+ "cat message | cmd1 | cmd2 | cmd3"
+
+ --disarmreadreceipts : Disarms read receipts (host2 Exchange issue)
+
+ --regexmess reg : Apply the whole regex to each message before transfer.
+ Example: 's/\000/ /g' # to replace null by space.
+ --regexmess reg : and this one, etc.
+
+=head2 OPTIONS/labels
+
+Gmail present labels as folders in imap. Imapsync can accelerate the sync
+by syncing X-GM-LABELS, it will avoid to transfer messages when they are
+already on host2.
+
+
+ --synclabels : Syncs also Gmail labels when a message is copied to host2.
+ Activated by default with --gmail1 --gmail2 unless
+ --nosynclabels is added.
+
+ --resynclabels : Resyncs Gmail labels when a message is already on host2.
+ Activated by default with --gmail1 --gmail2 unless
+ --noresynclabels is added.
+
+For Gmail syncs, see also:
+https://imapsync.lamiral.info/FAQ.d/FAQ.Gmail.txt
+
+=head2 OPTIONS/flags
+
+ If you encounter flag problems see also:
+ https://imapsync.lamiral.info/FAQ.d/FAQ.Flags.txt
+
+ --regexflag reg : Apply the whole regex to each flags list.
+ Example: 's/"Junk"//g' # to remove "Junk" flag.
+ --regexflag reg : then this one, etc.
+
+ --resyncflags : Resync flags for already transferred messages.
+ On by default.
+ --noresyncflags : Do not resync flags for already transferred messages.
+ May be useful when a user has already started to play
+ with its host2 account.
+
+=head2 OPTIONS/deletions
+
+ --delete1 : Deletes messages on host1 server after a successful
+ transfer. Option --delete1 has the following behavior:
+ it marks messages as deleted with the IMAP flag
+ \Deleted, then messages are really deleted with an
+ EXPUNGE IMAP command. If expunging after each message
+ slows down too much the sync then use
+ --noexpungeaftereach to speed up, expunging will then be
+ done only twice per folder, one at the beginning and
+ one at the end of a folder sync.
+
+ --expunge1 : Expunge messages on host1 just before syncing a folder.
+ Expunge is done per folder.
+ Expunge aims is to really delete messages marked deleted.
+ An expunge is also done after each message copied
+ if option --delete1 is set (unless --noexpungeaftereach).
+
+ --noexpunge1 : Do not expunge messages on host1.
+
+ --delete1emptyfolders : Deletes empty folders on host1, INBOX excepted.
+ Useful with --delete1 since what remains on host1
+ is only what failed to be synced.
+
+ --delete2 : Delete messages in host2 that are not in
+ host1 server. Useful for backup or pre-sync.
+ --delete2 implies --uidexpunge2
+
+ --delete2duplicates : Delete messages in host2 that are duplicates.
+ Works only without --useuid since duplicates are
+ detected with an header part of each message.
+
+ --delete2folders : Delete folders in host2 that are not in host1 server.
+ For safety, first try it like this (it is safe):
+ --delete2folders --dry --justfolders --nofoldersizes
+ and see what folders will be deleted.
+
+ --delete2foldersonly reg : Delete only folders matching the regex reg.
+ Example: --delete2foldersonly "/^Junk$|^INBOX.Junk$/"
+ This option activates --delete2folders
+
+ --delete2foldersbutnot reg : Do not delete folders matching the regex rex.
+ Example: --delete2foldersbutnot "/Tasks$|Contacts$|Foo$/"
+ This option activates --delete2folders
+
+ --noexpunge2 : Do not expunge messages on host2.
+ --nouidexpunge2 : Do not uidexpunge messages on the host2 account
+ that are not on the host1 account.
+
+
+=head2 OPTIONS/dates
+
+ If you encounter problems with dates, see also:
+ https://imapsync.lamiral.info/FAQ.d/FAQ.Dates.txt
+
+ --syncinternaldates : Sets the internal dates on host2 same as host1.
+ Turned on by default. Internal date is the date
+ a message arrived on a host (Unix mtime).
+ --idatefromheader : Sets the internal dates on host2 same as the
+ ones in "Date:" headers.
+
+
+
+=head2 OPTIONS/message selection
+
+ --maxsize int : Skip messages larger (or equal) than int bytes
+ --minsize int : Skip messages smaller (or equal) than int bytes
+ --maxage int : Skip messages older than int days.
+ final stats (skipped) don't count older messages
+ see also --minage
+ --minage int : Skip messages newer than int days.
+ final stats (skipped) don't count newer messages
+ You can do (+ zone are the messages selected):
+ past|----maxage+++++++++++++++>now
+ past|+++++++++++++++minage---->now
+ past|----maxage+++++minage---->now (intersection)
+ past|++++minage-----maxage++++>now (union)
+
+ --search str : Selects only messages returned by this IMAP SEARCH
+ command. Applied on both sides.
+ For a complete set of what can be search see
+ https://imapsync.lamiral.info/FAQ.d/FAQ.Messages_Selection.txt
+
+ --search1 str : Same as --search but for selecting host1 messages only.
+ --search2 str : Same as --search but for selecting host2 messages only.
+ So --search CRIT equals --search1 CRIT --search2 CRIT
+
+ --maxlinelength int : skip messages with a line length longer than int bytes.
+ RFC 2822 says it must be no more than 1000 bytes but
+ real life servers and email clients do more.
+
+
+ --useheader str : Use this header to compare messages on both sides.
+ Ex: Message-ID or Subject or Date.
+ --useheader str and this one, etc.
+
+ --usecache : Use cache to speed up next syncs. Not set by default.
+ --nousecache : Do not use cache. Caveat: --useuid --nousecache creates
+ duplicates on multiple runs.
+ --useuid : Use UIDs instead of headers as a criterion to recognize
+ messages. Option --usecache is then implied unless
+ --nousecache is used.
+
+
+=head2 OPTIONS/miscellaneous
+
+ --syncacls : Synchronizes acls (Access Control Lists).
+ Acls in IMAP are not standardized, be careful
+ since one acl code on one side may signify something
+ else on the other one.
+ --nosyncacls : Does not synchronize acls. This is the default.
+
+ --addheader : When a message has no headers to be identified,
+ --addheader adds a "Message-Id" header,
+ like "Message-Id: 12345@imapsync", where 12345
+ is the imap UID of the message on the host1 folder.
+
+
+=head2 OPTIONS/debugging
+
+ --debug : Debug mode.
+ --debugfolders : Debug mode for the folders part only.
+ --debugcontent : Debug content of the messages transferred. Huge output.
+ --debugflags : Debug mode for flags.
+ --debugimap1 : IMAP debug mode for host1. Very verbose.
+ --debugimap2 : IMAP debug mode for host2. Very verbose.
+ --debugimap : IMAP debug mode for host1 and host2. Twice very verbose.
+ --debugmemory : Debug mode showing memory consumption after each copy.
+
+ --errorsmax int : Exit when int number of errors is reached. Default is 50.
+
+ --tests : Run local non-regression tests. Exit code 0 means all ok.
+ --testslive : Run a live test with test1.lamiral.info imap server.
+ Useful to check the basics. Needs internet connection.
+ --testslive6 : Run a live test with ks2ipv6.lamiral.info imap server.
+ Useful to check the ipv6 connectivity. Needs internet.
+
+
+=head2 OPTIONS/specific
+
+ --gmail1 : sets --host1 to Gmail and other options. See FAQ.Gmail.txt
+ --gmail2 : sets --host2 to Gmail and other options. See FAQ.Gmail.txt
+
+ --office1 : sets --host1 to Office365 and other options. See FAQ.Exchange.txt
+ --office2 : sets --host2 to Office365 and other options. See FAQ.Exchange.txt
+
+ --exchange1 : sets options for Exchange. See FAQ.Exchange.txt
+ --exchange2 : sets options for Exchange. See FAQ.Exchange.txt
+
+ --domino1 : sets options for Domino. See FAQ.Domino.txt
+ --domino2 : sets options for Domino. See FAQ.Domino.txt
+
+
+=head2 OPTIONS/behavior
+
+ --maxmessagespersecond int : limits the number of messages transferred per second.
+
+ --maxbytespersecond int : limits the average transfer rate per second.
+ --maxbytesafter int : starts --maxbytespersecond limitation only after
+ --maxbytesafter amount of data transferred.
+
+ --maxsleep int : do not sleep more than int seconds.
+ On by default, 2 seconds max, like --maxsleep 2
+
+ --abort : terminates a previous call still running.
+ It uses the pidfile to know what process to abort.
+
+ --exitwhenover int : Stop syncing and exits when int total bytes
+ transferred is reached.
+
+ --version : Print only software version.
+ --noreleasecheck : Do not check for any new imapsync release.
+ --releasecheck : Check for new imapsync release.
+ it's an http request to
+ http://imapsync.lamiral.info/prj/imapsync/VERSION
+
+ --noid : Do not send/receive ID command to imap servers.
+
+ --justconnect : Just connect to both servers and print useful
+ information. Need only --host1 and --host2 options.
+ Obsolete since "imapsync --host1 imaphost" alone
+ implies --justconnect
+
+ --justlogin : Just login to both host1 and host2 with users
+ credentials, then exit.
+
+ --justfolders : Do only things about folders (ignore messages).
+
+ --help : print this help.
+
+ Example: to synchronize imap account "test1" on "test1.lamiral.info"
+ to imap account "test2" on "test2.lamiral.info"
+ with test1 password "secret1"
+ and test2 password "secret2"
+
+ imapsync \
+ --host1 test1.lamiral.info --user1 test1 --password1 secret1 \
+ --host2 test2.lamiral.info --user2 test2 --password2 secret2
+
+
+=cut
+# comment
+
+=pod
+
+
+
+=head1 SECURITY
+
+You can use --passfile1 instead of --password1 to give the
+password since it is safer. With --password1 option, on Linux,
+any user on your host can see the password by using the 'ps auxwwww'
+command. Using a variable (like IMAPSYNC_PASSWORD1) is also
+dangerous because of the 'ps auxwwwwe' command. So, saving
+the password in a well protected file (600 or rw-------) is
+the best solution.
+
+Imapsync activates ssl or tls encryption by default, if possible.
+
+What detailed behavior is under this "if possible"?
+
+Imapsync activates ssl if the well known port imaps port (993) is open
+on the imap servers. If the imaps port is closed then it open a
+normal (clear) connection on port 143 but it looks for TLS support
+in the CAPABILITY list of the servers. If TLS is supported
+then imapsync goes to encryption.
+
+If the automatic ssl and the tls detections fail then imapsync will
+not protect against sniffing activities on the network, especially
+for passwords.
+
+If you want to force ssl or tls just use --ssl1 --ssl2 or --tls1 --tls2
+
+See also the document FAQ.Security.txt in the FAQ.d/ directory
+or at https://imapsync.lamiral.info/FAQ.d/FAQ.Security.txt
+
+=head1 EXIT STATUS
+
+Imapsync will exit with a 0 status (return code) if everything went good.
+Otherwise, it exits with a non-zero status. That's classical Unix behavior.
+Here is the list of the exit code values (an integer between 0 and 255).
+The names reflect their meaning:
+
+=for comment
+egrep '^Readonly my.*\$EX' imapsync | egrep -o 'EX.*' | sed 's_^_ _'
+
+
+ EX_OK => 0 ; #/* successful termination */
+ EX_USAGE => 64 ; #/* command line usage error */
+ EX_NOINPUT => 66 ; #/* cannot open input */
+ EX_UNAVAILABLE => 69 ; #/* service unavailable */
+ EX_SOFTWARE => 70 ; #/* internal software error */
+ EXIT_CATCH_ALL => 1 ; # Any other error
+ EXIT_BY_SIGNAL => 6 ; # Should be 128+n where n is the sig_num
+ EXIT_PID_FILE_ERROR => 8 ;
+ EXIT_CONNECTION_FAILURE => 10 ;
+ EXIT_TLS_FAILURE => 12 ;
+ EXIT_AUTHENTICATION_FAILURE => 16 ;
+ EXIT_SUBFOLDER1_NO_EXISTS => 21 ;
+ EXIT_WITH_ERRORS => 111 ;
+ EXIT_WITH_ERRORS_MAX => 112 ;
+ EXIT_TESTS_FAILED => 254 ; # Like Test::More API
+
+
+
+=head1 LICENSE AND COPYRIGHT
+
+Imapsync is free, open, public but not always gratis software
+cover by the NOLIMIT Public License, now called NLPL.
+See the LICENSE file included in the distribution or just read this
+simple sentence as it IS the licence text:
+
+ "No limits to do anything with this work and this license."
+
+In case it is not long enough, I repeat:
+
+ "No limits to do anything with this work and this license."
+
+Look at https://imapsync.lamiral.info/LICENSE
+
+=head1 AUTHOR
+
+Gilles LAMIRAL <gilles@lamiral.info>
+
+Good feedback is always welcome.
+Bad feedback is very often welcome.
+
+Gilles LAMIRAL earns his living by writing, installing,
+configuring and teaching free, open and often gratis
+software. Imapsync used to be "always gratis" but now it is
+only "often gratis" because imapsync is sold by its author,
+a good way to maintain and support free open public
+software over decades.
+
+=head1 BUGS AND LIMITATIONS
+
+See https://imapsync.lamiral.info/FAQ.d/FAQ.Reporting_Bugs.txt
+
+=head1 IMAP SERVERS supported
+
+See https://imapsync.lamiral.info/S/imapservers.shtml
+
+=head1 HUGE MIGRATION
+
+If you have many mailboxes to migrate think about a little
+shell program. Write a file called file.txt (for example)
+containing users and passwords.
+The separator used in this example is ';'
+
+The file.txt file contains:
+
+user001_1;password001_1;user001_2;password001_2
+user002_1;password002_1;user002_2;password002_2
+user003_1;password003_1;user003_2;password003_2
+user004_1;password004_1;user004_2;password004_2
+user005_1;password005_1;user005_2;password005_2
+...
+
+On Unix the shell program can be:
+
+ { while IFS=';' read u1 p1 u2 p2; do
+ imapsync --host1 imap.side1.org --user1 "$u1" --password1 "$p1" \
+ --host2 imap.side2.org --user2 "$u2" --password2 "$p2" ...
+ done ; } < file.txt
+
+On Windows the batch program can be:
+
+ FOR /F "tokens=1,2,3,4 delims=; eol=#" %%G IN (file.txt) DO imapsync ^
+ --host1 imap.side1.org --user1 %%G --password1 %%H ^
+ --host2 imap.side2.org --user2 %%I --password2 %%J ...
+
+The ... have to be replaced by nothing or any imapsync option.
+Welcome in shell or batch programming !
+
+You will find already written scripts at
+https://imapsync.lamiral.info/examples/
+
+=head1 INSTALL
+
+ Imapsync works under any Unix with Perl.
+
+ Imapsync works under most Windows (2000, XP, Vista, Seven, Eight, Ten
+ and all Server releases 2000, 2003, 2008 and R2, 2012 and R2, 2016)
+ as a standalone binary software called imapsync.exe,
+ usually launched from a batch file in order to avoid always typing
+ the options. There is also a 64bit binary called imapsync_64bit.exe
+
+ Imapsync works under OS X as a standalone binary
+ software called imapsync_bin_Darwin
+
+ Purchase latest imapsync at
+ https://imapsync.lamiral.info/
+
+ You'll receive a link to a compressed tarball called imapsync-x.xx.tgz
+ where x.xx is the version number. Untar the tarball where
+ you want (on Unix):
+
+ tar xzvf imapsync-x.xx.tgz
+
+ Go into the directory imapsync-x.xx and read the INSTALL file.
+ As mentioned at https://imapsync.lamiral.info/#install
+ the INSTALL file can also be found at
+ https://imapsync.lamiral.info/INSTALL.d/INSTALL.ANY.txt
+ It is now split in several files for each system
+ https://imapsync.lamiral.info/INSTALL.d/
+
+=head1 CONFIGURATION
+
+There is no specific configuration file for imapsync,
+everything is specified by the command line parameters
+and the default behavior.
+
+
+=head1 HACKING
+
+Feel free to hack imapsync as the NOLIMIT license permits it.
+
+
+=head1 SIMILAR SOFTWARE
+
+ See also https://imapsync.lamiral.info/S/external.shtml
+ for a better up to date list.
+
+Last updated and verified on Sun Dec 8, 2019.
+
+
+ imapsync: https://github.com/imapsync/imapsync (this is an imapsync copy, sometimes delayed, with --noreleasecheck by default since release 1.592, 2014/05/22)
+ imap_tools: https://web.archive.org/web/20161228145952/http://www.athensfbc.com/imap_tools/. The imap_tools code is now at https://github.com/andrewnimmo/rick-sanders-imap-tools
+ imaputils: https://github.com/mtsatsenko/imaputils (very old imap_tools fork)
+ Doveadm-Sync: https://wiki2.dovecot.org/Tools/Doveadm/Sync ( Dovecot sync tool )
+ davmail: http://davmail.sourceforge.net/
+ offlineimap: http://offlineimap.org/
+ mbsync: http://isync.sourceforge.net/
+ mailsync: http://mailsync.sourceforge.net/
+ mailutil: https://www.washington.edu/imap/ part of the UW IMAP toolkit. (well, seems abandoned now)
+ imaprepl: https://bl0rg.net/software/ http://freecode.com/projects/imap-repl/
+ imapcopy (Pascal): http://www.ardiehl.de/imapcopy/
+ imapcopy (Java): https://code.google.com/archive/p/imapcopy/
+ imapsize: http://www.broobles.com/imapsize/
+ migrationtool: http://sourceforge.net/projects/migrationtool/
+ imapmigrate: http://sourceforge.net/projects/cyrus-utils/
+ larch: https://github.com/rgrove/larch (derived from wonko_imapsync, good at Gmail)
+ wonko_imapsync: http://wonko.com/article/554 (superseded by larch)
+ pop2imap: http://www.linux-france.org/prj/pop2imap/ (I wrote that too)
+ exchange-away: http://exchange-away.sourceforge.net/
+ SyncBackPro: http://www.2brightsparks.com/syncback/sbpro.html
+ ImapSyncClient: https://github.com/ridaamirini/ImapSyncClient
+ MailStore: https://www.mailstore.com/en/products/mailstore-home/
+ mnIMAPSync: https://github.com/manusa/mnIMAPSync
+ imap-upload: http://imap-upload.sourceforge.net/ (A tool for uploading a local mbox file to IMAP4 server)
+ imapbackup: https://github.com/rcarmo/imapbackup (A Python script for incremental backups of IMAP mailboxes)
+ BitRecover email-backup 99 USD, 299 USD https://www.bitrecover.com/email-backup/.
+ ImportExportTools: https://addons.thunderbird.net/en-us/thunderbird/addon/importexporttools/ ImportExportTools for Mozilla Thunderbird by Paolo Kaosmos. ImportExportTools does not do IMAP.
+
+
+
+=head1 HISTORY
+
+I initially wrote imapsync in July 2001 because an enterprise,
+called BaSystemes, paid me to install a new imap server
+without losing huge old mailboxes located in a far
+away remote imap server, accessible by an
+often broken low-bandwidth ISDN link.
+
+I had to verify every mailbox was well transferred, all folders, all messages,
+without wasting bandwidth or creating duplicates upon resyncs. The imapsync
+design was made with the beautiful rsync command in mind.
+
+Imapsync started its life as a patch of the copy_folder.pl
+script. The script copy_folder.pl comes from the Mail-IMAPClient-2.1.3 perl
+module tarball source (more precisely in the examples/ directory of the
+Mail-IMAPClient tarball).
+
+So many happened since then that I wonder
+if it remains any lines of the original
+copy_folder.pl in imapsync source code.
+
+
+=cut
+
+
+# use pragmas
+#
+
+use strict ;
+use warnings ;
+use Carp ;
+use Data::Dumper ;
+use Digest::HMAC_SHA1 qw( hmac_sha1 hmac_sha1_hex ) ;
+use Digest::MD5 qw( md5 md5_hex md5_base64 ) ;
+use English qw( -no_match_vars ) ;
+use Errno qw(EAGAIN EPIPE ECONNRESET) ;
+use Fcntl ;
+use File::Basename ;
+use File::Copy::Recursive ;
+use File::Glob qw( :glob ) ;
+use File::Path qw( mkpath rmtree ) ;
+use File::Spec ;
+use File::stat ;
+use Getopt::Long ( ) ;
+use IO::File ;
+use IO::Socket qw( :crlf SOL_SOCKET SO_KEEPALIVE ) ;
+use IO::Socket::INET6 ;
+use IO::Socket::SSL ;
+use IO::Tee ;
+use IPC::Open3 'open3' ;
+use Mail::IMAPClient 3.30 ;
+use MIME::Base64 ;
+use Pod::Usage qw(pod2usage) ;
+use POSIX qw(uname SIGALRM :sys_wait_h) ;
+use Sys::Hostname ;
+use Term::ReadKey ;
+use Test::More ;
+use Time::HiRes qw( time sleep ) ;
+use Time::Local ;
+use Unicode::String ;
+use Cwd ;
+use Readonly ;
+use Sys::MemInfo ;
+use Regexp::Common ;
+use Text::ParseWords ; # for quotewords()
+use File::Tail ;
+
+use Encode ;
+use Encode::IMAPUTF7 ;
+
+
+local $OUTPUT_AUTOFLUSH = 1 ;
+
+# constants
+
+# Let us do like sysexits.h
+# /usr/include/sysexits.h
+# and https://www.tldp.org/LDP/abs/html/exitcodes.html
+
+# Should avoid 2 126 127 128..128+64=192 255
+# Should use 0 1 3..125 193..254
+
+Readonly my $EX_OK => 0 ; #/* successful termination */
+Readonly my $EX_USAGE => 64 ; #/* command line usage error */
+#Readonly my $EX_DATAERR => 65 ; #/* data format error */
+Readonly my $EX_NOINPUT => 66 ; #/* cannot open input */
+#Readonly my $EX_NOUSER => 67 ; #/* addressee unknown */
+#Readonly my $EX_NOHOST => 68 ; #/* host name unknown */
+Readonly my $EX_UNAVAILABLE => 69 ; #/* service unavailable */
+Readonly my $EX_SOFTWARE => 70 ; #/* internal software error */
+#Readonly my $EX_OSERR => 71 ; #/* system error (e.g., can't fork) */
+#Readonly my $EX_OSFILE => 72 ; #/* critical OS file missing */
+#Readonly my $EX_CANTCREAT => 73 ; #/* can't create (user) output file */
+#Readonly my $EX_IOERR => 74 ; #/* input/output error */
+#Readonly my $EX_TEMPFAIL => 75 ; #/* temp failure; user is invited to retry */
+#Readonly my $EX_PROTOCOL => 76 ; #/* remote error in protocol */
+#Readonly my $EX_NOPERM => 77 ; #/* permission denied */
+#Readonly my $EX_CONFIG => 78 ; #/* configuration error */
+
+# Mine
+Readonly my $EXIT_CATCH_ALL => 1 ; # Any other error
+Readonly my $EXIT_BY_SIGNAL => 6 ; # Should be 128+n where n is the sig_num
+Readonly my $EXIT_PID_FILE_ERROR => 8 ;
+Readonly my $EXIT_CONNECTION_FAILURE => 10 ;
+Readonly my $EXIT_TLS_FAILURE => 12 ;
+Readonly my $EXIT_AUTHENTICATION_FAILURE => 16 ;
+Readonly my $EXIT_SUBFOLDER1_NO_EXISTS => 21 ;
+Readonly my $EXIT_WITH_ERRORS => 111 ;
+Readonly my $EXIT_WITH_ERRORS_MAX => 112 ;
+
+
+Readonly my $EXIT_TESTS_FAILED => 254 ; # Like Test::More API
+
+
+Readonly my %EXIT_TXT => (
+ $EX_OK => 'EX_OK: successful termination',
+ $EX_USAGE => 'EX_USAGE: command line usage error',
+ $EX_NOINPUT => 'EX_NOINPUT: cannot open input',
+ $EX_UNAVAILABLE => 'EX_UNAVAILABLE: service unavailable',
+ $EX_SOFTWARE => 'EX_SOFTWARE: internal software error',
+
+ $EXIT_CATCH_ALL => 'EXIT_CATCH_ALL',
+ $EXIT_BY_SIGNAL => 'EXIT_BY_SIGNAL',
+ $EXIT_PID_FILE_ERROR => 'EXIT_PID_FILE_ERROR' ,
+ $EXIT_CONNECTION_FAILURE => 'EXIT_CONNECTION_FAILURE',
+ $EXIT_TLS_FAILURE => 'EXIT_TLS_FAILURE',
+ $EXIT_AUTHENTICATION_FAILURE => 'EXIT_AUTHENTICATION_FAILURE',
+ $EXIT_SUBFOLDER1_NO_EXISTS => 'EXIT_SUBFOLDER1_NO_EXISTS',
+ $EXIT_WITH_ERRORS => 'EXIT_WITH_ERRORS',
+ $EXIT_WITH_ERRORS_MAX => 'EXIT_WITH_ERRORS_MAX',
+ $EXIT_TESTS_FAILED => 'EXIT_TESTS_FAILED',
+) ;
+
+
+Readonly my $DEFAULT_LOGDIR => 'LOG_imapsync' ;
+
+Readonly my $ERRORS_MAX => 50 ; # exit after 50 errors.
+Readonly my $ERRORS_MAX_CGI => 20 ; # exit after 20 errors in CGI context.
+
+
+
+Readonly my $INTERVAL_TO_EXIT => 2 ; # interval max to exit instead of reconnect
+
+Readonly my $SPLIT => 100 ; # By default, 100 at a time, not more.
+Readonly my $SPLIT_FACTOR => 10 ; # init_imap() calls Maxcommandlength( $SPLIT_FACTOR * $split )
+ # which means default Maxcommandlength is 10*100 = 1000 characters ;
+
+Readonly my $IMAP_PORT => 143 ; # Well know port for IMAP
+Readonly my $IMAP_SSL_PORT => 993 ; # Well know port for IMAP over SSL
+
+Readonly my $LAST => -1 ;
+Readonly my $MINUS_ONE => -1 ;
+Readonly my $MINUS_TWO => -2 ;
+
+Readonly my $RELEASE_NUMBER_EXAMPLE_1 => '1.351' ;
+Readonly my $RELEASE_NUMBER_EXAMPLE_2 => 42.4242 ;
+
+Readonly my $TCP_PING_TIMEOUT => 5 ;
+Readonly my $DEFAULT_TIMEOUT => 120 ;
+Readonly my $DEFAULT_NB_RECONNECT_PER_IMAP_COMMAND => 3 ;
+Readonly my $DEFAULT_UIDNEXT => 999_999 ;
+Readonly my $DEFAULT_BUFFER_SIZE => 4096 ;
+
+Readonly my $MAX_SLEEP => 2 ; # 2 seconds max for limiting too long sleeps from --maxbytespersecond and --maxmessagespersecond
+
+Readonly my $DEFAULT_EXPIRATION_TIME_OAUTH2_PK12 => 3600 ;
+
+Readonly my $PERMISSION_FILTER => 7777 ;
+
+Readonly my $KIBI => 1024 ;
+
+Readonly my $NUMBER_10 => 10 ;
+Readonly my $NUMBER_42 => 42 ;
+Readonly my $NUMBER_100 => 100 ;
+Readonly my $NUMBER_200 => 200 ;
+Readonly my $NUMBER_300 => 300 ;
+Readonly my $NUMBER_123456 => 123_456 ;
+Readonly my $NUMBER_654321 => 654_321 ;
+
+Readonly my $NUMBER_20_000 => 20_000 ;
+
+Readonly my $QUOTA_PERCENT_LIMIT => 90 ;
+
+Readonly my $NUMBER_104_857_600 => 104_857_600 ;
+
+Readonly my $SIZE_MAX_STR => 64 ;
+
+Readonly my $NB_SECONDS_IN_A_DAY => 86_400 ;
+
+Readonly my $STD_CHAR_PER_LINE => 80 ;
+
+Readonly my $TRUE => 1 ;
+Readonly my $FALSE => 0 ;
+
+Readonly my $LAST_RESSORT_SEPARATOR => q{/} ;
+
+Readonly my $CGI_TMPDIR_TOP => '/var/tmp/imapsync_cgi' ;
+Readonly my $CGI_HASHFILE => '/var/tmp/imapsync_hash' ;
+Readonly my $UMASK_PARANO => '0077' ;
+
+Readonly my $STR_use_releasecheck => q{Check if a new imapsync release is available by adding --releasecheck} ;
+
+Readonly my $GMAIL_MAXSIZE => 35_651_584 ;
+
+Readonly my $FORCE => 1 ;
+
+# if ( 'MSWin32' eq $OSNAME )
+# if ( 'darwin' eq $OSNAME )
+# if ( 'linux' eq $OSNAME )
+
+
+
+# global variables
+# Currently working to finish with only $sync
+# Not finished yet...
+
+my(
+ $sync,
+ $timestart_str,
+ $debugimap, $debugimap1, $debugimap2, $debugcontent, $debugflags,
+ $debuglist, $debugdev, $debugmaxlinelength, $debugcgi,
+ $domain1, $domain2,
+
+ @include, @exclude, @folderrec,
+ @folderfirst, @folderlast,
+ @h1_folders_all, %h1_folders_all,
+ @h2_folders_all, %h2_folders_all,
+ @h2_folders_from_1_wanted, %h2_folders_from_1_all,
+ %requested_folder,
+ $h1_folders_wanted_nb, $h1_folders_wanted_ct,
+ @h2_folders_not_in_1,
+ %h1_subscribed_folder, %h2_subscribed_folder,
+ %h2_folders_from_1_wanted,
+ %h2_folders_from_1_several,
+
+ $prefix1, $prefix2,
+ @regexmess, @regexflag, @skipmess, @pipemess, $pipemesscheck,
+ $flagscase, $filterflags, $syncflagsaftercopy,
+ $syncinternaldates,
+ $idatefromheader,
+ $syncacls,
+ $fastio1, $fastio2,
+ $minsize, $maxage, $minage,
+ $search,
+ $skipheader, @useheader, %useheader,
+ $skipsize, $allowsizemismatch, $buffersize,
+
+
+ $authmd5, $authmd51, $authmd52,
+ $subscribed, $subscribe, $subscribeall,
+ $help,
+ $justbanner,
+ $fast,
+
+ $nb_msg_skipped_dry_mode,
+ $h1_nb_msg_duplicate,
+ $h2_nb_msg_duplicate,
+ $h2_nb_msg_noheader,
+
+ $h2_nb_msg_deleted,
+
+ $h1_bytes_processed,
+
+ $h1_nb_msg_end, $h1_bytes_end,
+ $h2_nb_msg_end, $h2_bytes_end,
+
+ $timeout,
+ $timestart_int,
+
+ $uid1, $uid2,
+ $authuser1, $authuser2,
+ $proxyauth1, $proxyauth2,
+ $authmech1, $authmech2,
+ $split1, $split2,
+ $reconnectretry1, $reconnectretry2,
+ $max_msg_size_in_bytes,
+ $modulesversion,
+ $delete2folders, $delete2foldersonly, $delete2foldersbutnot,
+ $usecache, $debugcache, $cacheaftercopy,
+ $wholeheaderifneeded, %h1_msgs_copy_by_uid, $useuid, $h2_uidguess,
+ $checkmessageexists,
+ $messageidnodomain,
+ $fixInboxINBOX,
+ $maxlinelength, $maxlinelengthcmd,
+ $minmaxlinelength,
+ $uidnext_default,
+ $fixcolonbug,
+ $create_folder_old,
+ $skipcrossduplicates, $debugcrossduplicates,
+ $disarmreadreceipts,
+ $mixfolders,
+ $fetch_hash_set,
+ $cgidir,
+ %month_abrev,
+ $SSL_VERIFY_POLICY,
+ $warn_release,
+) ;
+
+single_sync( ) ;
+
+sub single_sync
+{
+
+# main program
+# global variables initialization
+
+# I'm currently removing all global variables except $sync
+# passing each of them under $sync->{variable_name}
+
+$sync->{timestart} = time ; # Is a float because of use Time::HiRres
+
+$sync->{rcs} = q{$Id: imapsync,v 1.977 2019/12/23 20:18:02 gilles Exp gilles $} ;
+
+$sync->{ memory_consumption_at_start } = memory_consumption( ) || 0 ;
+
+
+my @loadavg = loadavg( ) ;
+
+$sync->{cpu_number} = cpu_number( ) ;
+$sync->{loaddelay} = load_and_delay( $sync->{cpu_number}, @loadavg ) ;
+$sync->{loadavg} = join( q{ }, $loadavg[ 0 ] )
+ . " on $sync->{cpu_number} cores and "
+ . ram_memory_info( ) ;
+
+
+
+$sync->{ total_bytes_transferred } = 0 ;
+$sync->{ total_bytes_skipped } = 0 ;
+$sync->{ nb_msg_transferred } = 0 ;
+$sync->{ nb_msg_skipped } = $nb_msg_skipped_dry_mode = 0 ;
+$sync->{ h1_nb_msg_deleted } = 0 ;
+$h2_nb_msg_deleted = 0 ;
+$h1_nb_msg_duplicate = 0 ;
+$h2_nb_msg_duplicate = 0 ;
+$sync->{ h1_nb_msg_noheader } = 0 ;
+$h2_nb_msg_noheader = 0 ;
+
+
+$sync->{ h1_nb_msg_start } = 0 ;
+$sync->{ h1_bytes_start } = 0 ;
+$sync->{ h2_nb_msg_start } = 0 ;
+$sync->{ h2_bytes_start } = 0 ;
+$sync->{ h1_nb_msg_processed } = $h1_bytes_processed = 0 ;
+
+$sync->{ h2_nb_msg_crossdup } = 0 ;
+
+#$h1_nb_msg_end = $h1_bytes_end = 0 ;
+#$h2_nb_msg_end = $h2_bytes_end = 0 ;
+
+$sync->{nb_errors} = 0;
+$max_msg_size_in_bytes = 0;
+
+%month_abrev = (
+ Jan => '00',
+ Feb => '01',
+ Mar => '02',
+ Apr => '03',
+ May => '04',
+ Jun => '05',
+ Jul => '06',
+ Aug => '07',
+ Sep => '08',
+ Oct => '09',
+ Nov => '10',
+ Dec => '11',
+);
+
+
+
+# Just create a CGI object if under cgi context only.
+# Needed for the get_options() call
+cgibegin( $sync ) ;
+
+# In cgi context, printing must start by the header so we delay other prints by using output() storage
+my $options_good = get_options( $sync, @ARGV ) ;
+# Is it the first myprint?
+docker_context( $sync ) ;
+cgibuildheader( $sync ) ;
+
+myprint( output( $sync ) ) ;
+output_reset_with( $sync ) ;
+
+# Old place for cgiload( $sync ) ;
+
+# don't go on if options are not all known.
+if ( ! defined $options_good ) { exit $EX_USAGE ; }
+
+# If you want releasecheck not to be done by default (like the github maintainer),
+# then just uncomment the first "$sync->{releasecheck} =" line, the line ending with "0 ;",
+# the second line (ending with "1 ;") can then stay active or be commented,
+# the result will be the same: no releasecheck by default (because 0 is then the defined value).
+
+#$sync->{releasecheck} = defined $sync->{releasecheck} ? $sync->{releasecheck} : 0 ;
+$sync->{releasecheck} = defined $sync->{releasecheck} ? $sync->{releasecheck} : 1 ;
+
+# just the version
+if ( $sync->{ version } ) {
+ myprint( imapsync_version( $sync ), "\n" ) ;
+ exit 0 ;
+}
+
+#$sync->{debugenv} = 1 ;
+$sync->{debugenv} and printenv( $sync ) ; # if option --debugenv
+load_modules( ) ;
+
+# after_get_options call usage and exit if --help or options were not well got
+after_get_options( $sync, $options_good ) ;
+
+
+# Under CGI environment, fix caveat emptor potential issues
+cgisetcontext( $sync ) ;
+
+# --gmail --gmail --exchange --office etc.
+easyany( $sync ) ;
+
+$sync->{ sanitize } = defined $sync->{ sanitize } ? $sync->{ sanitize } : 1 ;
+sanitize( $sync ) ;
+
+$sync->{ tmpdir } ||= File::Spec->tmpdir( ) ;
+
+# Unit tests
+testsexit( $sync ) ;
+
+# init live varaiables
+testslive_init( $sync ) if ( $sync->{testslive} ) ;
+testslive6_init( $sync ) if ( $sync->{testslive6} ) ;
+
+#
+
+pidfile( $sync ) ;
+
+# old abort place
+
+install_signals( $sync ) ;
+
+$sync->{log} = defined $sync->{log} ? $sync->{log} : 1 ;
+$sync->{errorsdump} = defined $sync->{errorsdump} ? $sync->{errorsdump} : 1 ;
+$sync->{errorsmax} = defined $sync->{errorsmax} ? $sync->{errorsmax} : $ERRORS_MAX ;
+
+# log and output
+binmode STDOUT, ":encoding(UTF-8)" ;
+
+if ( $sync->{log} ) {
+ setlogfile( $sync ) ;
+ teelaunch( $sync ) ;
+ # now $sync->{tee} is a filehandle to STDOUT and the logfile
+}
+
+#binmode STDERR, ":encoding(UTF-8)" ;
+# STDERR goes to the same place: LOG and STDOUT (if logging is on)
+# Useful only for --debugssl
+$sync->{tee} and local *STDERR = *${$sync->{tee}}{IO} ;
+
+
+
+$timestart_int = int( $sync->{timestart} ) ;
+$sync->{timebefore} = $sync->{timestart} ;
+
+
+$timestart_str = localtime( $sync->{timestart} ) ;
+
+# The prints in the log starts here
+
+myprint( localhost_info( $sync ), "\n" ) ;
+myprint( "Transfer started at $timestart_str\n" ) ;
+myprint( "PID is $PROCESS_ID my PPID is ", mygetppid( ), "\n" ) ;
+myprint( "Log file is $sync->{logfile} ( to change it, use --logfile path ; or use --nolog to turn off logging )\n" ) if ( $sync->{log} ) ;
+myprint( "Load is " . ( join( q{ }, loadavg( ) ) || 'unknown' ), " on $sync->{cpu_number} cores\n" ) ;
+#myprintf( "Memory consumption so far: %.1f MiB\n", memory_consumption( ) / $KIBI / $KIBI ) ;
+myprint( 'Current directory is ' . getcwd( ) . "\n" ) ;
+myprint( 'Real user id is ' . getpwuid_any_os( $REAL_USER_ID ) . " (uid $REAL_USER_ID)\n" ) ;
+myprint( 'Effective user id is ' . getpwuid_any_os( $EFFECTIVE_USER_ID ). " (euid $EFFECTIVE_USER_ID)\n" ) ;
+
+$modulesversion = defined $modulesversion ? $modulesversion : 1 ;
+
+$warn_release = ( $sync->{releasecheck} ) ? check_last_release( ) : $STR_use_releasecheck ;
+
+
+$wholeheaderifneeded = defined $wholeheaderifneeded ? $wholeheaderifneeded : 1;
+
+# Activate --usecache if --useuid is set and no --nousecache
+$usecache = 1 if ( $useuid and ( ! defined $usecache ) ) ;
+$cacheaftercopy = 1 if ( $usecache and ( ! defined $cacheaftercopy ) ) ;
+
+$sync->{ checkselectable } = defined $sync->{ checkselectable } ? $sync->{ checkselectable } : 1 ;
+$sync->{ checkfoldersexist } = defined $sync->{ checkfoldersexist } ? $sync->{ checkfoldersexist } : 1 ;
+$checkmessageexists = defined $checkmessageexists ? $checkmessageexists : 0 ;
+$sync->{ expungeaftereach } = defined $sync->{ expungeaftereach } ? $sync->{ expungeaftereach } : 1 ;
+
+# abletosearch is on by default
+$sync->{abletosearch} = defined $sync->{abletosearch} ? $sync->{abletosearch} : 1 ;
+$sync->{abletosearch1} = defined $sync->{abletosearch1} ? $sync->{abletosearch1} : $sync->{abletosearch} ;
+$sync->{abletosearch2} = defined $sync->{abletosearch2} ? $sync->{abletosearch2} : $sync->{abletosearch} ;
+$checkmessageexists = 0 if ( not $sync->{abletosearch1} ) ;
+
+
+$sync->{showpasswords} = defined $sync->{showpasswords} ? $sync->{showpasswords} : 0 ;
+$sync->{ fixslash2 } = defined $sync->{ fixslash2 } ? $sync->{ fixslash2 } : 1 ;
+$fixInboxINBOX = defined $fixInboxINBOX ? $fixInboxINBOX : 1 ;
+$create_folder_old = defined $create_folder_old ? $create_folder_old : 0 ;
+$mixfolders = defined $mixfolders ? $mixfolders : 1 ;
+$sync->{automap} = defined $sync->{automap} ? $sync->{automap} : 0 ;
+
+$sync->{ delete2duplicates } = 1 if ( $sync->{ delete2 } and ( ! defined $sync->{ delete2duplicates } ) ) ;
+
+$sync->{maxmessagespersecond} = defined $sync->{maxmessagespersecond} ? $sync->{maxmessagespersecond} : 0 ;
+$sync->{maxbytespersecond} = defined $sync->{maxbytespersecond} ? $sync->{maxbytespersecond} : 0 ;
+
+$sync->{sslcheck} = defined $sync->{sslcheck} ? $sync->{sslcheck} : 1 ;
+
+myprint( banner_imapsync( $sync, @ARGV ) ) ;
+
+myprint( "Temp directory is $sync->{ tmpdir } ( to change it use --tmpdir dirpath )\n" ) ;
+
+myprint( output( $sync ) ) ;
+output_reset_with( $sync ) ;
+
+do_valid_directory( $sync->{ tmpdir } ) || croak "Error creating tmpdir $sync->{ tmpdir } : $OS_ERROR" ;
+
+remove_pidfile_not_running( $sync->{pidfile} ) ;
+
+# if another imapsync is running then tail -f its logfile and exit
+# useful in cgi context
+if ( $sync->{ tail } and tail( $sync ) )
+{
+ $sync->{nb_errors}++ ;
+ exit_clean( $sync, $EX_OK, "Tail -f finished. Now finishing myself processus $PROCESS_ID\n" ) ;
+ exit $EX_OK ;
+}
+
+if ( ! write_pidfile( $sync ) ) {
+ myprint( "Exiting with return value $EXIT_PID_FILE_ERROR ($EXIT_TXT{$EXIT_PID_FILE_ERROR}) $sync->{nb_errors}/$sync->{errorsmax} nb_errors/max_errors\n" ) ;
+ exit $EXIT_PID_FILE_ERROR ;
+}
+
+
+# New place for abort
+# abort before simulong in order to be able to abort a simulong sync
+if ( $sync->{ abort } )
+{
+ abort( $sync ) ;
+}
+
+# simulong is just a loop printing some lines for xx seconds with option "--simulong xx".
+if ( $sync->{ simulong } )
+{
+ simulong( $sync->{ simulong } ) ;
+}
+
+
+# New place for cgiload 2019_03_03
+# because I want to log it
+# Can break here if load is too heavy
+cgiload( $sync ) ;
+
+
+$fixcolonbug = defined $fixcolonbug ? $fixcolonbug : 1 ;
+
+if ( $usecache and $fixcolonbug ) { tmpdir_fix_colon_bug( $sync ) } ;
+
+$modulesversion and myprint( "Modules version list:\n", modulesversion(), "( use --no-modulesversion to turn off printing this Perl modules list )\n" ) ;
+
+
+check_lib_version( $sync ) or
+ croak "imapsync needs perl lib Mail::IMAPClient release 3.30 or superior.\n";
+
+
+exit_clean( $sync, $EX_OK ) if ( $justbanner ) ;
+
+# turn on RFC standard flags correction like \SEEN -> \Seen
+$flagscase = defined $flagscase ? $flagscase : 1 ;
+
+# Use PERMANENTFLAGS if available
+$filterflags = defined $filterflags ? $filterflags : 1 ;
+
+# sync flags just after an APPEND, some servers ignore the flags given in the APPEND
+# like MailEnable IMAP server.
+# Off by default since it takes time.
+$syncflagsaftercopy = defined $syncflagsaftercopy ? $syncflagsaftercopy : 0 ;
+
+# update flags on host2 for already transferred messages
+$sync->{resyncflags} = defined $sync->{resyncflags} ? $sync->{resyncflags} : 1 ;
+if ( $sync->{resyncflags} ) {
+ myprint( "Info: will resync flags for already transferred messages. Use --noresyncflags to not resync flags.\n" ) ;
+}else{
+ myprint( "Info: will not resync flags for already transferred messages. Use --resyncflags to resync flags.\n" ) ;
+}
+
+
+sslcheck( $sync ) ;
+#print Data::Dumper->Dump( [ \$sync ] ) ;
+
+$split1 ||= $SPLIT ;
+$split2 ||= $SPLIT ;
+
+#$sync->{host1} || missing_option( $sync, '--host1' ) ;
+$sync->{port1} ||= ( $sync->{ssl1} ) ? $IMAP_SSL_PORT : $IMAP_PORT ;
+
+#$sync->{host2} || missing_option( $sync, '--host2' ) ;
+$sync->{port2} ||= ( $sync->{ssl2} ) ? $IMAP_SSL_PORT : $IMAP_PORT ;
+
+$debugimap1 = $debugimap2 = 1 if ( $debugimap ) ;
+$sync->{ debug } = 1 if ( $debugimap1 or $debugimap2 ) ;
+
+# By default, don't take size to compare
+$skipsize = (defined $skipsize) ? $skipsize : 1;
+
+$uid1 = defined $uid1 ? $uid1 : 1;
+$uid2 = defined $uid2 ? $uid2 : 1;
+
+$subscribe = defined $subscribe ? $subscribe : 1;
+
+# Allow size mismatch by default
+$allowsizemismatch = defined $allowsizemismatch ? $allowsizemismatch : 1;
+
+
+if ( defined $delete2foldersbutnot or defined $delete2foldersonly ) {
+ $delete2folders = 1 ;
+}
+
+
+my %SSL_VERIFY_STR ;
+
+Readonly $SSL_VERIFY_POLICY => IO::Socket::SSL::SSL_VERIFY_NONE( ) ;
+Readonly %SSL_VERIFY_STR => (
+ IO::Socket::SSL::SSL_VERIFY_NONE( ) => 'SSL_VERIFY_NONE, ie, do not check the certificate server.' ,
+ IO::Socket::SSL::SSL_VERIFY_PEER( ) => 'SSL_VERIFY_PEER, ie, check the certificate server' ,
+) ;
+
+$IO::Socket::SSL::DEBUG = defined( $sync->{debugssl} ) ? $sync->{debugssl} : 1 ;
+
+
+if ( $sync->{ssl1} or $sync->{ssl2} or $sync->{tls1} or $sync->{tls2}) {
+ myprint( "SSL debug mode level is --debugssl $IO::Socket::SSL::DEBUG (can be set from 0 meaning no debug to 4 meaning max debug)\n" ) ;
+}
+
+if ( $sync->{ssl1} ) {
+ myprint( qq{Host1: SSL default mode is like --sslargs1 "SSL_verify_mode=$SSL_VERIFY_POLICY", meaning for host1 $SSL_VERIFY_STR{$SSL_VERIFY_POLICY}\n} ) ;
+ myprint( 'Host1: Use --sslargs1 SSL_verify_mode=' . IO::Socket::SSL::SSL_VERIFY_PEER( ) . " to have $SSL_VERIFY_STR{IO::Socket::SSL::SSL_VERIFY_PEER( )} of host1\n" ) ;
+}
+
+if ( $sync->{ssl2} ) {
+ myprint( qq{Host2: SSL default mode is like --sslargs2 "SSL_verify_mode=$SSL_VERIFY_POLICY", meaning for host2 $SSL_VERIFY_STR{$SSL_VERIFY_POLICY}\n} ) ;
+ myprint( 'Host2: Use --sslargs2 SSL_verify_mode=' . IO::Socket::SSL::SSL_VERIFY_PEER( ) . " to have $SSL_VERIFY_STR{IO::Socket::SSL::SSL_VERIFY_PEER( )} of host2\n" ) ;
+}
+
+# ID on by default since 1.832
+$sync->{id} = defined $sync->{id} ? $sync->{id} : 1 ;
+
+if ( $sync->{justconnect}
+ or not $sync->{user1}
+ or not $sync->{user2}
+ or not $sync->{host1}
+ or not $sync->{host2}
+ )
+{
+ my $justconnect = justconnect( $sync ) ;
+
+ myprint( debugmemory( $sync, " after justconnect() call" ) ) ;
+ exit_clean( $sync, $EX_OK,
+ "Exiting after a justconnect on host(s): $justconnect\n"
+ ) ;
+}
+
+
+#$sync->{user1} || missing_option( $sync, '--user1' ) ;
+#$sync->{user2} || missing_option( $sync, '--user2' ) ;
+
+$syncinternaldates = defined $syncinternaldates ? $syncinternaldates : 1;
+
+# Turn on expunge if there is not explicit option --noexpunge1 and option
+# --delete1 is given.
+# Done because --delete1 --noexpunge1 is very dangerous on the second run:
+# the Deleted flag is then synced to all previously transferred messages.
+# So --delete1 implies --expunge1 is a better usability default behavior.
+if ( $sync->{ delete1 } ) {
+ if ( ! defined $sync->{ expunge1 } ) {
+ myprint( "Info: turning on --expunge1 because --delete1 --noexpunge1 is very dangerous on the second run.\n" ) ;
+ $sync->{ expunge1 } = 1 ;
+ }
+ myprint( "Info: if expunging after each message slows down too much the sync then use --noexpungeaftereach to speed up\n" ) ;
+}
+
+if ( $sync->{ uidexpunge2 } and not Mail::IMAPClient->can( 'uidexpunge' ) ) {
+ myprint( "Failure: uidexpunge not supported (IMAPClient release < 3.17), use nothing or --expunge2 instead\n" ) ;
+ $sync->{nb_errors}++ ;
+ exit_clean( $sync, $EX_SOFTWARE ) ;
+}
+
+if ( ( $sync->{ delete2 } or $sync->{ delete2duplicates } ) and not defined $sync->{ uidexpunge2 } ) {
+ if ( Mail::IMAPClient->can( 'uidexpunge' ) ) {
+ myprint( "Info: will act as --uidexpunge2\n" ) ;
+ $sync->{ uidexpunge2 } = 1 ;
+ }elsif ( not defined $sync->{ expunge2 } ) {
+ myprint( "Info: will act as --expunge2 (no uidexpunge support)\n" ) ;
+ $sync->{ expunge2 } = 1 ;
+ }
+}
+
+if ( $sync->{ delete1 } and $sync->{ delete2 } ) {
+ myprint( "Warning: using --delete1 and --delete2 together is almost always a bad idea, exiting imapsync\n" ) ;
+ $sync->{nb_errors}++ ;
+ exit_clean( $sync, $EX_USAGE ) ;
+}
+
+if ( $idatefromheader ) {
+ myprint( 'Turned ON idatefromheader, ',
+ "will set the internal dates on host2 from the 'Date:' header line.\n" ) ;
+ $syncinternaldates = 0 ;
+}
+
+if ( $syncinternaldates ) {
+ myprint( 'Info: turned ON syncinternaldates, ',
+ "will set the internal dates (arrival dates) on host2 same as host1.\n" ) ;
+}else{
+ myprint( "Info: turned OFF syncinternaldates\n" ) ;
+}
+
+if ( defined $authmd5 and $authmd5 ) {
+ $authmd51 = 1 ;
+ $authmd52 = 1 ;
+}
+
+if ( defined $authmd51 and $authmd51 ) {
+ $authmech1 ||= 'CRAM-MD5';
+}
+else{
+ $authmech1 ||= $authuser1 ? 'PLAIN' : 'LOGIN';
+}
+
+if ( defined $authmd52 and $authmd52 ) {
+ $authmech2 ||= 'CRAM-MD5';
+}
+else{
+ $authmech2 ||= $authuser2 ? 'PLAIN' : 'LOGIN';
+}
+
+$authmech1 = uc $authmech1;
+$authmech2 = uc $authmech2;
+
+if (defined $proxyauth1 && !$authuser1) {
+ missing_option( $sync, 'With --proxyauth1, --authuser1' ) ;
+}
+
+if (defined $proxyauth2 && !$authuser2) {
+ missing_option( $sync, 'With --proxyauth2, --authuser2' ) ;
+}
+
+#$authuser1 ||= $sync->{user1};
+#$authuser2 ||= $sync->{user2};
+
+myprint( "Host1: will try to use $authmech1 authentication on host1\n") ;
+myprint( "Host2: will try to use $authmech2 authentication on host2\n") ;
+
+$timeout = defined $timeout ? $timeout : $DEFAULT_TIMEOUT ;
+
+$sync->{h1}->{timeout} = defined $sync->{h1}->{timeout} ? $sync->{h1}->{timeout} : $timeout ;
+myprint( "Host1: imap connection timeout is $sync->{h1}->{timeout} seconds\n") ;
+$sync->{h2}->{timeout} = defined $sync->{h2}->{timeout} ? $sync->{h2}->{timeout} : $timeout ;
+myprint( "Host2: imap connection timeout is $sync->{h2}->{timeout} seconds\n" ) ;
+
+$syncacls = defined $syncacls ? $syncacls : 0 ;
+
+# No folders sizes if --justfolders, unless really wanted.
+if (
+ $sync->{ justfolders }
+ and not defined $sync->{ foldersizes }
+ and not $sync->{ justfoldersizes } )
+{
+ $sync->{ foldersizes } = 0 ;
+ $sync->{ foldersizesatend } = 1 ;
+}
+
+$sync->{ foldersizes } = ( defined $sync->{ foldersizes } ) ? $sync->{ foldersizes } : 1 ;
+$sync->{ foldersizesatend } = ( defined $sync->{ foldersizesatend } ) ? $sync->{ foldersizesatend } : $sync->{ foldersizes } ;
+
+
+$fastio1 = defined $fastio1 ? $fastio1 : 0 ;
+$fastio2 = defined $fastio2 ? $fastio2 : 0 ;
+
+$reconnectretry1 = defined $reconnectretry1 ? $reconnectretry1 : $DEFAULT_NB_RECONNECT_PER_IMAP_COMMAND ;
+$reconnectretry2 = defined $reconnectretry2 ? $reconnectretry2 : $DEFAULT_NB_RECONNECT_PER_IMAP_COMMAND ;
+
+# Since select_msgs() returns no messages when uidnext does not return something
+# then $uidnext_default is never used. So I have to remove it.
+$uidnext_default = $DEFAULT_UIDNEXT ;
+
+if ( ! @useheader ) { @useheader = qw( Message-Id Received ) ; }
+
+# Make a hash %useheader of each --useheader 'key' in uppercase
+for ( @useheader ) { $useheader{ uc $_ } = undef } ;
+
+#myprint( Data::Dumper->Dump( [ \%useheader ] ) ) ;
+#exit ;
+
+myprint( "Host1: IMAP server [$sync->{host1}] port [$sync->{port1}] user [$sync->{user1}]\n" ) ;
+myprint( "Host2: IMAP server [$sync->{host2}] port [$sync->{port2}] user [$sync->{user2}]\n" ) ;
+
+get_password1( $sync ) ;
+get_password2( $sync ) ;
+
+
+$sync->{dry_message} = q{} ;
+if( $sync->{dry} ) {
+ $sync->{dry_message} = "\t(not really since --dry mode)" ;
+}
+
+$sync->{ search1 } ||= $search if ( $search ) ;
+$sync->{ search2 } ||= $search if ( $search ) ;
+
+if ( $disarmreadreceipts ) {
+ push @regexmess, q{s{\A((?:[^\n]+\r\n)+|)(^Disposition-Notification-To:[^\n]*\n)(\r?\n|.*\n\r?\n)}{$1X-$2$3}ims} ;
+}
+
+$pipemesscheck = ( defined $pipemesscheck ) ? $pipemesscheck : 1 ;
+
+if ( @pipemess and $pipemesscheck ) {
+ myprint( 'Checking each --pipemess command, '
+ . join( q{, }, @pipemess )
+ . ", with an space string. ( Can avoid this check with --nopipemesscheck )\n" ) ;
+ my $string = pipemess( q{ }, @pipemess ) ;
+ # string undef means something was bad.
+ if ( not ( defined $string ) ) {
+ $sync->{nb_errors}++ ;
+ exit_clean( $sync, $EX_USAGE,
+ "Error: one of --pipemess command is bad, check it\n"
+ ) ;
+ }
+ myprint( "Ok with each --pipemess @pipemess\n" ) ;
+}
+
+if ( $maxlinelengthcmd ) {
+ myprint( "Checking --maxlinelengthcmd command,
+ $maxlinelengthcmd, with an space string.\n"
+ ) ;
+ my $string = pipemess( q{ }, $maxlinelengthcmd ) ;
+ # string undef means something was bad.
+ if ( not ( defined $string ) ) {
+ $sync->{nb_errors}++ ;
+ exit_clean( $sync, $EX_USAGE,
+ "Error: --maxlinelengthcmd command is bad, check it\n"
+ ) ;
+ }
+ myprint( "Ok with --maxlinelengthcmd $maxlinelengthcmd\n" ) ;
+}
+
+if ( @regexmess ) {
+ my $string = regexmess( q{ } ) ;
+ myprint( "Checking each --regexmess command with an space string.\n" ) ;
+ # string undef means one of the eval regex was bad.
+ if ( not ( defined $string ) ) {
+ #errors_incr( $sync, 'Warning: one of --regexmess option may be bad, check them' ) ;
+ exit_clean( $sync, $EX_USAGE,
+ "Error: one of --regexmess option is bad, check it\n"
+ ) ;
+ }
+ myprint( "Ok with each --regexmess\n" ) ;
+}
+
+if ( @skipmess ) {
+ myprint( "Checking each --skipmess command with an space string.\n" ) ;
+ my $match = skipmess( q{ } ) ;
+ # match undef means one of the eval regex was bad.
+ if ( not ( defined $match ) ) {
+ $sync->{nb_errors}++ ;
+ exit_clean( $sync, $EX_USAGE,
+ "Error: one of --skipmess option is bad, check it\n"
+ ) ;
+ }
+ myprint( "Ok with each --skipmess\n" ) ;
+}
+
+if ( @regexflag ) {
+ myprint( "Checking each --regexflag command with an space string.\n" ) ;
+ my $string = flags_regex( q{ } ) ;
+ # string undef means one of the eval regex was bad.
+ if ( not ( defined $string ) ) {
+ $sync->{nb_errors}++ ;
+ exit_clean( $sync, $EX_USAGE,
+ "Error: one of --regexflag option is bad, check it\n"
+ ) ;
+ }
+ myprint( "Ok with each --regexflag\n" ) ;
+}
+
+$sync->{imap1} = login_imap( $sync->{host1}, $sync->{port1}, $sync->{user1}, $domain1, $sync->{password1},
+ $debugimap1, $sync->{h1}->{timeout}, $fastio1, $sync->{ssl1}, $sync->{tls1},
+ $authmech1, $authuser1, $reconnectretry1,
+ $proxyauth1, $uid1, $split1, 'Host1', $sync->{h1}, $sync ) ;
+
+$sync->{imap2} = login_imap( $sync->{host2}, $sync->{port2}, $sync->{user2}, $domain2, $sync->{password2},
+ $debugimap2, $sync->{h2}->{timeout}, $fastio2, $sync->{ssl2}, $sync->{tls2},
+ $authmech2, $authuser2, $reconnectretry2,
+ $proxyauth2, $uid2, $split2, 'Host2', $sync->{h2}, $sync ) ;
+
+
+$sync->{ debug } and myprint( 'Host1 Buffer I/O: ', $sync->{imap1}->Buffer(), "\n" ) ;
+$sync->{ debug } and myprint( 'Host2 Buffer I/O: ', $sync->{imap2}->Buffer(), "\n" ) ;
+
+
+if ( ! $sync->{imap1}->IsAuthenticated( ) )
+{
+ $sync->{nb_errors}++ ;
+ exit_clean( $sync, $EXIT_AUTHENTICATION_FAILURE, "Not authenticated on host1\n" ) ;
+}
+myprint( "Host1: state Authenticated\n" ) ;
+
+if ( ! $sync->{imap2}->IsAuthenticated( ) )
+{
+ $sync->{nb_errors}++ ;
+ exit_clean( $sync, $EXIT_AUTHENTICATION_FAILURE, "Not authenticated on host2\n" ) ;
+}
+myprint( "Host2: state Authenticated\n" ) ;
+
+myprint( 'Host1 capability once authenticated: ', join(q{ }, @{ $sync->{imap1}->capability() || [] }), "\n" ) ;
+
+#myprint( Data::Dumper->Dump( [ $sync->{imap1} ] ) ) ;
+#myprint( "imap4rev1: " . $sync->{imap1}->imap4rev1() . "\n" ) ;
+
+myprint( 'Host2 capability once authenticated: ', join(q{ }, @{ $sync->{imap2}->capability() || [] }), "\n" ) ;
+
+imap_id_stuff( $sync ) ;
+
+#quota( $sync, $sync->{imap1}, 'h1' ) ; # quota on host1 is useless and pollute host2 output.
+quota( $sync, $sync->{imap2}, 'h2' ) ;
+
+maxsize_setting( $sync ) ;
+
+if ( $sync->{ justlogin } ) {
+ $sync->{imap1}->logout( ) ;
+ $sync->{imap2}->logout( ) ;
+ exit_clean( $sync, $EX_OK, "Exiting because of --justlogin\n" ) ;
+}
+
+
+#
+# Folder stuff
+#
+
+$h1_folders_wanted_nb = 0 ; # counter of folders to be done.
+$h1_folders_wanted_ct = 0 ; # counter of folders done.
+
+# All folders on host1 and host2
+
+@h1_folders_all = sort $sync->{imap1}->folders( ) ;
+@h2_folders_all = sort $sync->{imap2}->folders( ) ;
+
+myprint( 'Host1: found ', scalar @h1_folders_all , " folders.\n" ) ;
+myprint( 'Host2: found ', scalar @h2_folders_all , " folders.\n" ) ;
+
+foreach my $f ( @h1_folders_all )
+{
+ $h1_folders_all{ $f } = 1
+}
+
+foreach my $f ( @h2_folders_all )
+{
+ $h2_folders_all{ $f } = 1 ;
+ $sync->{h2_folders_all_UPPER}{ uc $f } = 1 ;
+}
+
+$sync->{h1_folders_all} = \%h1_folders_all ;
+$sync->{h2_folders_all} = \%h2_folders_all ;
+
+
+private_folders_separators_and_prefixes( ) ;
+
+
+# Make a hash of subscribed folders in both servers.
+
+for ( $sync->{imap1}->subscribed( ) ) { $h1_subscribed_folder{ $_ } = 1 } ;
+for ( $sync->{imap2}->subscribed( ) ) { $h2_subscribed_folder{ $_ } = 1 } ;
+
+
+if ( defined $sync->{ subfolder1 } ) {
+ subfolder1( $sync ) ;
+}
+
+
+
+
+if ( defined $sync->{ subfolder2 } ) {
+ subfolder2( $sync ) ;
+}
+
+if ( $fixInboxINBOX and ( my $reg = fix_Inbox_INBOX_mapping( \%h1_folders_all, \%h2_folders_all ) ) ) {
+ push @{ $sync->{ regextrans2 } }, $reg ;
+}
+
+
+
+if ( ( $sync->{ folder } and scalar @{ $sync->{ folder } } )
+ or $subscribed
+ or scalar @folderrec )
+{
+ # folders given by option --folder
+ if ( $sync->{ folder } and scalar @{ $sync->{ folder } } ) {
+ add_to_requested_folders( @{ $sync->{ folder } } ) ;
+ }
+
+ # option --subscribed
+ if ( $subscribed ) {
+ add_to_requested_folders( keys %h1_subscribed_folder ) ;
+ }
+
+ # option --folderrec
+ if ( scalar @folderrec ) {
+ foreach my $folderrec ( @folderrec ) {
+ add_to_requested_folders( $sync->{imap1}->folders( $folderrec ) ) ;
+ }
+ }
+}
+else
+{
+ # no include, no folder/subscribed/folderrec options => all folders
+ if ( not scalar @include ) {
+ myprint( "Including all folders found by default. Use --subscribed or --folder or --folderrec or --include to select specific folders. Use --exclude to unselect specific folders.\n" ) ;
+ add_to_requested_folders( @h1_folders_all ) ;
+ }
+}
+
+
+# consider (optional) includes and excludes
+if ( scalar @include ) {
+ foreach my $include ( @include ) {
+ # No, do not add /x after the regex, never.
+ # Users would kill you!
+ my @included_folders = grep { /$include/ } @h1_folders_all ;
+ add_to_requested_folders( @included_folders ) ;
+ myprint( "Including folders matching pattern $include\n" . jux_utf8_list( @included_folders ) . "\n" ) ;
+ }
+}
+
+if ( scalar @exclude ) {
+ foreach my $exclude ( @exclude ) {
+ my @requested_folder = sort keys %requested_folder ;
+ # No, do not add /x after the regex, never.
+ # Users would kill you!
+ my @excluded_folders = grep { /$exclude/ } @requested_folder ;
+ remove_from_requested_folders( @excluded_folders ) ;
+ myprint( "Excluding folders matching pattern $exclude\n" . jux_utf8_list( @excluded_folders ) . "\n" ) ;
+ }
+}
+
+
+# sort before is not very powerful
+# it adds --folderfirst and --folderlast even if they don't exist on host1
+#@h1_folders_wanted = sort_requested_folders( ) ;
+$sync->{h1_folders_wanted} = [ sort_requested_folders( ) ] ;
+
+# Remove no selectable folders
+
+
+if ( $sync->{ checkfoldersexist } ) {
+ my @h1_folders_wanted_exist ;
+ myprint( "Host1: Checking wanted folders exist. Use --nocheckfoldersexist to avoid this check (shared of public namespace targeted).\n" ) ;
+ foreach my $folder ( @{ $sync->{h1_folders_wanted} } ) {
+ ( $sync->{ debug } or $sync->{debugfolders} ) and myprint( "Checking $folder exists on host1\n" ) ;
+ if ( ! exists $h1_folders_all{ $folder } ) {
+ myprint( "Host1: warning! ignoring folder $folder because it is not in host1 whole folders list.\n" ) ;
+ next ;
+ }else{
+ push @h1_folders_wanted_exist, $folder ;
+ }
+ }
+ @{ $sync->{h1_folders_wanted} } = @h1_folders_wanted_exist ;
+}else{
+ myprint( "Host1: Not checking that wanted folders exist. Remove --nocheckfoldersexist to get this check.\n" ) ;
+}
+
+
+if ( $sync->{ checkselectable } ) {
+ my @h1_folders_wanted_selectable ;
+ myprint( "Host1: Checking wanted folders are selectable. Use --nocheckselectable to avoid this check.\n" ) ;
+ foreach my $folder ( @{ $sync->{h1_folders_wanted} } ) {
+ ( $sync->{ debug } or $sync->{debugfolders} ) and myprint( "Checking $folder is selectable on host1\n" ) ;
+ # It does an imap command LIST "" $folder and then search for no \Noselect
+ if ( ! $sync->{imap1}->selectable( $folder ) ) {
+ myprint( "Host1: warning! ignoring folder $folder because it is not selectable\n" ) ;
+ }else{
+ push @h1_folders_wanted_selectable, $folder ;
+ }
+ }
+ @{ $sync->{h1_folders_wanted} } = @h1_folders_wanted_selectable ;
+ ( $sync->{ debug } or $sync->{debugfolders} ) and myprint( 'Host1: checking folders took ', timenext( $sync ), " s\n" ) ;
+}else{
+ myprint( "Host1: Not checking that wanted folders are selectable. Remove --nocheckselectable to get this check.\n" ) ;
+}
+
+
+
+# Old place of private_folders_separators_and_prefixes( ) call.
+#private_folders_separators_and_prefixes( ) ;
+
+
+# this hack is because LWP post does not pass well a hash in the $form parameter
+# but it does pass well an array
+%{ $sync->{f1f2h} } = split_around_equal( @{ $sync->{f1f2} } ) ;
+
+automap( $sync ) ;
+
+
+foreach my $h1_fold ( @{ $sync->{h1_folders_wanted} } ) {
+ my $h2_fold ;
+ $h2_fold = imap2_folder_name( $sync, $h1_fold ) ;
+ $h2_folders_from_1_wanted{ $h2_fold }++ ;
+ if ( 1 < $h2_folders_from_1_wanted{ $h2_fold } ) {
+ $h2_folders_from_1_several{ $h2_fold }++ ;
+ }
+}
+
+@h2_folders_from_1_wanted = sort keys %h2_folders_from_1_wanted;
+
+
+foreach my $h1_fold ( @h1_folders_all ) {
+ my $h2_fold ;
+ $h2_fold = imap2_folder_name( $sync, $h1_fold ) ;
+ $h2_folders_from_1_all{ $h2_fold }++ ;
+ # Follows a fix to avoid deleting folder $sync->{ subfolder2 }
+ # because it usually does not exist on host1.
+ if ( $sync->{ subfolder2 } )
+ {
+ $h2_folders_from_1_all{ $sync->{ h2_prefix } . $sync->{ subfolder2 } }++ ;
+ $h2_folders_from_1_all{ $sync->{ subfolder2 } }++ ;
+ }
+}
+
+
+
+myprint( << 'END_LISTING' ) ;
+
+++++ Listing folders
+All foldernames are presented between brackets like [X] where X is the foldername.
+When a foldername contains non-ASCII characters it is presented in the form
+[X] = [Y] where
+X is the imap foldername you have to use in command line options and
+Y is the utf8 output just printed for convenience, to recognize it.
+
+END_LISTING
+
+myprint(
+ "Host1: folders list (first the raw imap format then the [X] = [Y]):\n",
+ $sync->{imap1}->list( ),
+ "\n",
+ jux_utf8_list( @h1_folders_all ),
+ "\n",
+ "Host2: folders list (first the raw imap format then the [X] = [Y]):\n",
+ $sync->{imap2}->list( ),
+ "\n",
+ jux_utf8_list( @h2_folders_all ),
+ "\n",
+ q{}
+) ;
+
+if ( $subscribed ) {
+ myprint(
+ 'Host1 subscribed folders list: ',
+ jux_utf8_list( sort keys %h1_subscribed_folder ), "\n",
+ ) ;
+}
+
+
+
+@h2_folders_not_in_1 = list_folders_in_2_not_in_1( ) ;
+
+if ( @h2_folders_not_in_1 ) {
+ myprint( "Folders in host2 not in host1:\n",
+ jux_utf8_list( @h2_folders_not_in_1 ), "\n" ) ;
+}
+
+
+if ( keys %{ $sync->{f1f2auto} } ) {
+ myprint( "Folders mapping from --automap feature (use --f1f2 to override any mapping):\n" ) ;
+ foreach my $h1_fold ( keys %{ $sync->{f1f2auto} } ) {
+ my $h2_fold = $sync->{f1f2auto}{$h1_fold} ;
+ myprintf( "%-40s -> %-40s\n",
+ jux_utf8( $h1_fold ), jux_utf8( $h2_fold ) ) ;
+ }
+ myprint( "\n" ) ;
+}
+
+if ( keys %{ $sync->{f1f2h} } ) {
+ myprint( "Folders mapping from --f1f2 options, it overrides --automap:\n" ) ;
+ foreach my $h1_fold ( keys %{ $sync->{f1f2h} } ) {
+ my $h2_fold = $sync->{f1f2h}{$h1_fold} ;
+ my $warn = q{} ;
+ if ( not exists $h1_folders_all{ $h1_fold } ) {
+ $warn = "BUT $h1_fold does NOT exist on host1!" ;
+ }
+ myprintf( "%-40s -> %-40s %s\n",
+ jux_utf8( $h1_fold ), jux_utf8( $h2_fold ), $warn ) ;
+ }
+ myprint( "\n" ) ;
+}
+
+exit_clean( $sync, $EX_OK, "Exiting because of --justfolderlists\n" ) if ( $sync->{ justfolderlists } ) ;
+exit_clean( $sync, $EX_OK, "Exiting because of --justautomap\n" ) if ( $sync->{ justautomap } ) ;
+
+debugsleep( $sync ) ;
+
+if ( $sync->{ skipemptyfolders } )
+{
+ myprint( "Host1: will not syncing empty folders on host1. Use --noskipemptyfolders to create them anyway on host2\n") ;
+}
+
+
+if ( $sync->{ foldersizes } ) {
+
+ foldersizes_at_the_beggining( $sync ) ;
+ #foldersizes_at_the_beggining_old( $sync ) ;
+}
+
+
+
+if ( $sync->{ justfoldersizes } )
+{
+ exit_clean( $sync, $EX_OK, "Exiting because of --justfoldersizes\n" ) ;
+}
+
+$sync->{stats} = 1 ;
+
+if ( $sync->{ delete1emptyfolders } ) {
+ delete1emptyfolders( $sync ) ;
+}
+
+delete_folders_in_2_not_in_1( ) if $delete2folders ;
+
+# folder loop
+$h1_folders_wanted_nb = scalar @{ $sync->{h1_folders_wanted} } ;
+
+myprint( "++++ Looping on each one of $h1_folders_wanted_nb folders to sync\n" ) ;
+
+$sync->{begin_transfer_time} = time ;
+
+my %uid_candidate_for_deletion ;
+my %uid_candidate_no_deletion ;
+
+$sync->{ h2_folders_of_md5 } = { } ;
+
+
+FOLDER: foreach my $h1_fold ( @{ $sync->{h1_folders_wanted} } )
+{
+ $sync->{ h1_current_folder } = $h1_fold ;
+ eta_print( $sync ) ;
+ if ( ! reconnect_12_if_needed( $sync ) ) { last FOLDER ; }
+
+ my $h2_fold = imap2_folder_name( $sync, $h1_fold ) ;
+ $sync->{ h2_current_folder } = $h2_fold ;
+
+ $h1_folders_wanted_ct++ ;
+ myprintf( "Folder %7s %-35s -> %-35s\n", "$h1_folders_wanted_ct/$h1_folders_wanted_nb",
+ jux_utf8( $h1_fold ), jux_utf8( $h2_fold ) ) ;
+ myprint( debugmemory( $sync, " at folder loop" ) ) ;
+
+ # host1 can not be fetched read only, select is needed because of expunge.
+ select_folder( $sync, $sync->{imap1}, $h1_fold, 'Host1' ) or next FOLDER ;
+
+ debugsleep( $sync ) ;
+
+ my $h1_fold_nb_messages = count_from_select( $sync->{imap1}->History ) ;
+ myprint( "Host1: folder [$h1_fold] has $h1_fold_nb_messages messages in total (mentioned by SELECT)\n" ) ;
+
+ if ( $sync->{ skipemptyfolders } and 0 == $h1_fold_nb_messages ) {
+ myprint( "Host1: skipping empty host1 folder [$h1_fold]\n" ) ;
+ next FOLDER ;
+ }
+
+ # Code added from https://github.com/imapsync/imapsync/issues/95
+ # Thanks jh1995
+ # Goal: do not create folder if --search or --max/minage return 0 message.
+ # even if there are messages by SELECT (no not real empty, empty for the user point of vue).
+ if ( $sync->{ skipemptyfolders } )
+ {
+ my $h1_msgs_all_hash_ref_tmp = { } ;
+ my @h1_msgs_tmp = select_msgs( $sync->{imap1}, $h1_msgs_all_hash_ref_tmp, $sync->{ search1 }, $h1_fold ) ;
+ my $h1_fold_nb_messages_tmp = scalar( @h1_msgs_tmp ) ;
+ if ( 0 == $h1_fold_nb_messages_tmp ) {
+ myprint( "Host1: skipping empty host1 folder [$h1_fold] (0 message found by SEARCH)\n" ) ;
+ next FOLDER ;
+ }
+ }
+
+ if ( ! exists $h2_folders_all{ $h2_fold } ) {
+ create_folder( $sync, $sync->{imap2}, $h2_fold, $h1_fold ) or next FOLDER ;
+ }
+
+ acls_sync( $h1_fold, $h2_fold ) ;
+
+ # Sometimes the folder on host2 is listed (it exists) but is
+ # not selectable but becomes selectable by a create (Gmail)
+ select_folder( $sync, $sync->{imap2}, $h2_fold, 'Host2' )
+ or ( create_folder( $sync, $sync->{imap2}, $h2_fold, $h1_fold )
+ and select_folder( $sync, $sync->{imap2}, $h2_fold, 'Host2' ) )
+ or next FOLDER ;
+ my @select_results = $sync->{imap2}->Results( ) ;
+
+ my $h2_fold_nb_messages = count_from_select( @select_results ) ;
+ myprint( "Host2: folder [$h2_fold] has $h2_fold_nb_messages messages in total (mentioned by SELECT)\n" ) ;
+
+ my $permanentflags2 = permanentflags( @select_results ) ;
+ myprint( "Host2: folder [$h2_fold] permanentflags: $permanentflags2\n" ) ;
+
+ if ( $sync->{ expunge1 } )
+ {
+ myprint( "Host1: Expunging $h1_fold $sync->{dry_message}\n" ) ;
+ if ( ! $sync->{dry} )
+ {
+ $sync->{imap1}->expunge( ) ;
+ }
+ }
+
+ if ( ( ( $subscribe and exists $h1_subscribed_folder{ $h1_fold } ) or $subscribeall )
+ and not exists $h2_subscribed_folder{ $h2_fold } )
+ {
+ myprint( "Host2: Subscribing to folder $h2_fold\n" ) ;
+ if ( ! $sync->{dry} ) { $sync->{imap2}->subscribe( $h2_fold ) } ;
+ }
+
+ next FOLDER if ( $sync->{ justfolders } ) ;
+
+ if ( ! reconnect_12_if_needed( $sync ) ) { last FOLDER ; }
+
+ my $h1_msgs_all_hash_ref = { } ;
+ my @h1_msgs = select_msgs( $sync->{imap1}, $h1_msgs_all_hash_ref, $sync->{ search1 }, $sync->{abletosearch1}, $h1_fold );
+
+ if ( ! reconnect_12_if_needed( $sync ) ) { last FOLDER ; }
+
+ my $h1_msgs_nb = scalar @h1_msgs ;
+
+ myprint( "Host1: folder [$h1_fold] considering $h1_msgs_nb messages\n" ) ;
+ ( $sync->{ debug } or $debuglist ) and myprint( "Host1: folder [$h1_fold] considering $h1_msgs_nb messages, LIST gives: @h1_msgs\n" ) ;
+ $sync->{ debug } and myprint( "Host1: selecting messages of folder [$h1_fold] took ", timenext( $sync ), " s\n" ) ;
+
+ my $h2_msgs_all_hash_ref = { } ;
+ my @h2_msgs = select_msgs( $sync->{imap2}, $h2_msgs_all_hash_ref, $sync->{ search2 }, $sync->{abletosearch2}, $h2_fold ) ;
+
+ if ( ! reconnect_12_if_needed( $sync ) ) { last FOLDER ; }
+
+ my $h2_msgs_nb = scalar @h2_msgs ;
+
+ myprint( "Host2: folder [$h2_fold] considering $h2_msgs_nb messages\n" ) ;
+ ( $sync->{ debug } or $debuglist ) and myprint( "Host2: folder [$h2_fold] considering $h2_msgs_nb messages, LIST gives: @h2_msgs\n" ) ;
+ $sync->{ debug } and myprint( "Host2: selecting messages of folder [$h2_fold] took ", timenext( $sync ), " s\n" ) ;
+
+ my $cache_base = "$sync->{ tmpdir }/imapsync_cache/" ;
+ my $cache_dir = cache_folder( $cache_base,
+ "$sync->{host1}/$sync->{user1}/$sync->{host2}/$sync->{user2}", $h1_fold, $h2_fold ) ;
+ my ( $cache_1_2_ref, $cache_2_1_ref ) = ( {}, {} ) ;
+
+ my $h1_uidvalidity = $sync->{imap1}->uidvalidity( ) || q{} ;
+ my $h2_uidvalidity = $sync->{imap2}->uidvalidity( ) || q{} ;
+
+ if ( ! reconnect_12_if_needed( $sync ) ) { last FOLDER ; }
+
+ if ( $usecache ) {
+ myprint( "Local cache directory: $cache_dir ( " . length( $cache_dir ) . " characters long )\n" ) ;
+ mkpath( "$cache_dir" ) ;
+ ( $cache_1_2_ref, $cache_2_1_ref )
+ = get_cache( $cache_dir, \@h1_msgs, \@h2_msgs, $h1_msgs_all_hash_ref, $h2_msgs_all_hash_ref ) ;
+ myprint( 'CACHE h1 h2: ', scalar keys %{ $cache_1_2_ref } , " files\n" ) ;
+ $sync->{ debug } and myprint( '[',
+ map ( { "$_->$cache_1_2_ref->{$_} " } keys %{ $cache_1_2_ref } ), " ]\n" ) ;
+ }
+
+ my %h1_hash = ( ) ;
+ my %h2_hash = ( ) ;
+
+ my ( %h1_msgs, %h2_msgs ) ;
+ @h1_msgs{ @h1_msgs } = ( ) ;
+ @h2_msgs{ @h2_msgs } = ( ) ;
+
+ my @h1_msgs_in_cache = sort { $a <=> $b } keys %{ $cache_1_2_ref } ;
+ my @h2_msgs_in_cache = keys %{ $cache_2_1_ref } ;
+
+ my ( %h1_msgs_not_in_cache, %h2_msgs_not_in_cache ) ;
+ %h1_msgs_not_in_cache = %h1_msgs ;
+ %h2_msgs_not_in_cache = %h2_msgs ;
+ delete @h1_msgs_not_in_cache{ @h1_msgs_in_cache } ;
+ delete @h2_msgs_not_in_cache{ @h2_msgs_in_cache } ;
+
+ my @h1_msgs_not_in_cache = keys %h1_msgs_not_in_cache ;
+ #myprint( "h1_msgs_not_in_cache: [@h1_msgs_not_in_cache]\n" ) ;
+ my @h2_msgs_not_in_cache = keys %h2_msgs_not_in_cache ;
+
+ my @h2_msgs_delete2_not_in_cache = () ;
+ %h1_msgs_copy_by_uid = ( ) ;
+
+ if ( $useuid ) {
+ # use uid so we have to avoid getting header
+ @h1_msgs_copy_by_uid{ @h1_msgs_not_in_cache } = ( ) ;
+ @h2_msgs_delete2_not_in_cache = @h2_msgs_not_in_cache if $usecache ;
+ @h1_msgs_not_in_cache = ( ) ;
+ @h2_msgs_not_in_cache = ( ) ;
+
+ #myprint( "delete2: @h2_msgs_delete2_not_in_cache\n" ) ;
+ }
+
+ $sync->{ debug } and myprint( "Host1: parsing headers of folder [$h1_fold]\n" ) ;
+
+ my ($h1_heads_ref, $h1_fir_ref) = ({}, {});
+ $h1_heads_ref = $sync->{imap1}->parse_headers([@h1_msgs_not_in_cache], @useheader) if (@h1_msgs_not_in_cache);
+ $sync->{ debug } and myprint( "Host1: parsing headers of folder [$h1_fold] took ", timenext( $sync ), " s\n" ) ;
+
+ @{ $h1_fir_ref }{@h1_msgs} = ( undef ) ;
+
+ $sync->{ debug } and myprint( "Host1: getting flags idate and sizes of folder [$h1_fold]\n" ) ;
+
+ my @h1_common_fetch_param = ( 'FLAGS', 'INTERNALDATE', 'RFC822.SIZE' ) ;
+ if ( $sync->{ synclabels } or $sync->{ resynclabels } ) { push @h1_common_fetch_param, 'X-GM-LABELS' ; }
+
+ if ( $sync->{abletosearch1} )
+ {
+ $h1_fir_ref = $sync->{imap1}->fetch_hash( \@h1_msgs, @h1_common_fetch_param, $h1_fir_ref )
+ if ( @h1_msgs ) ;
+ }
+ else
+ {
+ my $uidnext = $sync->{imap1}->uidnext( $h1_fold ) || $uidnext_default ;
+ my $fetch_hash_uids = $fetch_hash_set || "1:$uidnext" ;
+ $h1_fir_ref = $sync->{imap1}->fetch_hash( $fetch_hash_uids, @h1_common_fetch_param, $h1_fir_ref )
+ if ( @h1_msgs ) ;
+ }
+
+ $sync->{ debug } and myprint( "Host1: getting flags idate and sizes of folder [$h1_fold] took ", timenext( $sync ), " s\n" ) ;
+ if ( ! $h1_fir_ref )
+ {
+ my $error = join( q{}, "Host1: folder $h1_fold : Could not fetch_hash ",
+ scalar @h1_msgs, ' msgs: ', $sync->{imap1}->LastError || q{}, "\n" ) ;
+ errors_incr( $sync, $error ) ;
+ next FOLDER ;
+ }
+
+ my @h1_msgs_duplicate;
+ foreach my $m ( @h1_msgs_not_in_cache )
+ {
+ my $rc = parse_header_msg( $sync, $sync->{imap1}, $m, $h1_heads_ref, $h1_fir_ref, 'Host1', \%h1_hash ) ;
+ if ( ! defined $rc )
+ {
+ my $h1_size = $h1_fir_ref->{$m}->{'RFC822.SIZE'} || 0;
+ myprint( "Host1: $h1_fold/$m size $h1_size ignored (no wanted headers so we ignore this message. To solve this: use --addheader)\n" ) ;
+ $sync->{ total_bytes_skipped } += $h1_size ;
+ $sync->{ nb_msg_skipped } += 1 ;
+ $sync->{ h1_nb_msg_noheader } +=1 ;
+ $sync->{ h1_nb_msg_processed } +=1 ;
+ } elsif(0 == $rc)
+ {
+ # duplicate
+ push @h1_msgs_duplicate, $m;
+ # duplicate, same id same size?
+ my $h1_size = $h1_fir_ref->{$m}->{'RFC822.SIZE'} || 0;
+ $sync->{ nb_msg_skipped } += 1;
+ $h1_nb_msg_duplicate += 1;
+ $sync->{ h1_nb_msg_processed } +=1 ;
+ }
+ }
+ my $h1_msgs_duplicate_nb = scalar @h1_msgs_duplicate ;
+
+ myprint( "Host1: folder [$h1_fold] selected $h1_msgs_nb messages, duplicates $h1_msgs_duplicate_nb\n" ) ;
+
+ $sync->{ debug } and myprint( 'Host1: whole time parsing headers took ', timenext( $sync ), " s\n" ) ;
+ # Getting headers and metada can be so long that host2 might be disconnected here
+ if ( ! reconnect_12_if_needed( $sync ) ) { last FOLDER ; }
+
+
+ $sync->{ debug } and myprint( "Host2: parsing headers of folder [$h2_fold]\n" ) ;
+
+ my ($h2_heads_ref, $h2_fir_ref) = ( {}, {} );
+ $h2_heads_ref = $sync->{imap2}->parse_headers([@h2_msgs_not_in_cache], @useheader) if (@h2_msgs_not_in_cache);
+ $sync->{ debug } and myprint( "Host2: parsing headers of folder [$h2_fold] took ", timenext( $sync ), " s\n" ) ;
+
+ $sync->{ debug } and myprint( "Host2: getting flags idate and sizes of folder [$h2_fold]\n" ) ;
+ @{ $h2_fir_ref }{@h2_msgs} = ( ); # fetch_hash can select by uid with last arg as ref
+
+
+ my @h2_common_fetch_param = ( 'FLAGS', 'INTERNALDATE', 'RFC822.SIZE' ) ;
+ if ( $sync->{ synclabels } or $sync->{ resynclabels } ) { push @h2_common_fetch_param, 'X-GM-LABELS' ; }
+
+ if ( $sync->{abletosearch2} and scalar( @h2_msgs ) ) {
+ $h2_fir_ref = $sync->{imap2}->fetch_hash( \@h2_msgs, @h2_common_fetch_param, $h2_fir_ref) ;
+ }else{
+ my $uidnext = $sync->{imap2}->uidnext( $h2_fold ) || $uidnext_default ;
+ my $fetch_hash_uids = $fetch_hash_set || "1:$uidnext" ;
+ $h2_fir_ref = $sync->{imap2}->fetch_hash( $fetch_hash_uids, @h2_common_fetch_param, $h2_fir_ref )
+ if ( @h2_msgs ) ;
+ }
+
+ $sync->{ debug } and myprint( "Host2: getting flags idate and sizes of folder [$h2_fold] took ", timenext( $sync ), " s\n" ) ;
+
+ my @h2_msgs_duplicate;
+ foreach my $m (@h2_msgs_not_in_cache) {
+ my $rc = parse_header_msg( $sync, $sync->{imap2}, $m, $h2_heads_ref, $h2_fir_ref, 'Host2', \%h2_hash ) ;
+ my $h2_size = $h2_fir_ref->{$m}->{'RFC822.SIZE'} || 0 ;
+ if (! defined $rc ) {
+ myprint( "Host2: $h2_fold/$m size $h2_size ignored (no wanted headers so we ignore this message)\n" ) ;
+ $h2_nb_msg_noheader += 1 ;
+ } elsif( 0 == $rc ) {
+ # duplicate
+ $h2_nb_msg_duplicate += 1 ;
+ push @h2_msgs_duplicate, $m ;
+ }
+ }
+
+ # %h2_folders_of_md5
+ foreach my $md5 ( keys %h2_hash ) {
+ $sync->{ h2_folders_of_md5 }->{ $md5 }->{ $h2_fold } ++ ;
+ }
+ # %h1_folders_of_md5
+ foreach my $md5 ( keys %h1_hash ) {
+ $sync->{ h1_folders_of_md5 }->{ $md5 }->{ $h2_fold } ++ ;
+ }
+
+
+ my $h2_msgs_duplicate_nb = scalar @h2_msgs_duplicate ;
+
+ myprint( "Host2: folder [$h2_fold] selected $h2_msgs_nb messages, duplicates $h2_msgs_duplicate_nb\n" ) ;
+
+ $sync->{ debug } and myprint( 'Host2 whole time parsing headers took ', timenext( $sync ), " s\n" ) ;
+
+ $sync->{ debug } and myprint( "++++ Verifying [$h1_fold] -> [$h2_fold]\n" ) ;
+ # messages in host1 that are not in host2
+
+ my @h1_hash_keys_sorted_by_uid
+ = sort {$h1_hash{$a}{'m'} <=> $h1_hash{$b}{'m'}} keys %h1_hash;
+
+ #myprint( map { $h1_hash{$_}{'m'} . q{ }} @h1_hash_keys_sorted_by_uid ) ;
+
+ my @h2_hash_keys_sorted_by_uid
+ = sort {$h2_hash{$a}{'m'} <=> $h2_hash{$b}{'m'}} keys %h2_hash;
+
+ # Deletions on account2.
+
+ if( $sync->{ delete2duplicates } and not exists $h2_folders_from_1_several{ $h2_fold } ) {
+ my @h2_expunge ;
+
+ foreach my $h2_msg ( @h2_msgs_duplicate ) {
+ myprint( "Host2: msg $h2_fold/$h2_msg marked \\Deleted [duplicate] on host2 $sync->{dry_message}\n" ) ;
+ push @h2_expunge, $h2_msg if $sync->{ uidexpunge2 } ;
+ if ( ! $sync->{dry} ) {
+ $sync->{imap2}->delete_message( $h2_msg ) ;
+ $h2_nb_msg_deleted += 1 ;
+ }
+ }
+ my $cnt = scalar @h2_expunge ;
+ if( @h2_expunge and not $sync->{ expunge2 } ) {
+ myprint( "Host2: UidExpunging $cnt message(s) in folder $h2_fold $sync->{dry_message}\n" ) ;
+ $sync->{imap2}->uidexpunge( \@h2_expunge ) if ! $sync->{dry} ;
+ }
+ if ( $sync->{ expunge2 } ){
+ myprint( "Host2: Expunging folder $h2_fold $sync->{dry_message}\n" ) ;
+ $sync->{imap2}->expunge( ) if ! $sync->{dry} ;
+ }
+ }
+
+ if( $sync->{ delete2 } and not exists $h2_folders_from_1_several{ $h2_fold } ) {
+ # No host1 folders f1a f1b ... going all to same f2 (via --regextrans2)
+ my @h2_expunge;
+ foreach my $m_id (@h2_hash_keys_sorted_by_uid) {
+ #myprint( "$m_id " ) ;
+ if ( ! exists $h1_hash{$m_id} ) {
+ my $h2_msg = $h2_hash{$m_id}{'m'};
+ my $h2_flags = $h2_hash{$m_id}{'F'} || q{};
+ my $isdel = $h2_flags =~ /\B\\Deleted\b/x ? 1 : 0;
+ myprint( "Host2: msg $h2_fold/$h2_msg marked \\Deleted on host2 [$m_id] $sync->{dry_message}\n" )
+ if ! $isdel;
+ push @h2_expunge, $h2_msg if $sync->{ uidexpunge2 };
+ if ( ! ( $sync->{dry} or $isdel ) ) {
+ $sync->{imap2}->delete_message($h2_msg);
+ $h2_nb_msg_deleted += 1;
+ }
+ }
+ }
+ foreach my $h2_msg ( @h2_msgs_delete2_not_in_cache ) {
+ myprint( "Host2: msg $h2_fold/$h2_msg marked \\Deleted [not in cache] on host2 $sync->{dry_message}\n" ) ;
+ push @h2_expunge, $h2_msg if $sync->{ uidexpunge2 };
+ if ( ! $sync->{dry} ) {
+ $sync->{imap2}->delete_message($h2_msg);
+ $h2_nb_msg_deleted += 1;
+ }
+ }
+ my $cnt = scalar @h2_expunge ;
+
+ if( @h2_expunge and not $sync->{ expunge2 } ) {
+ myprint( "Host2: UidExpunging $cnt message(s) in folder $h2_fold $sync->{dry_message}\n" ) ;
+ $sync->{imap2}->uidexpunge( \@h2_expunge ) if ! $sync->{dry} ;
+ }
+ if ( $sync->{ expunge2 } ) {
+ myprint( "Host2: Expunging folder $h2_fold $sync->{dry_message}\n" ) ;
+ $sync->{imap2}->expunge( ) if ! $sync->{dry} ;
+ }
+ }
+
+ if( $sync->{ delete2 } and exists $h2_folders_from_1_several{ $h2_fold } ) {
+ myprint( "Host2: folder $h2_fold $h2_folders_from_1_several{ $h2_fold } folders left to sync there\n" ) ;
+ my @h2_expunge;
+ foreach my $m_id ( @h2_hash_keys_sorted_by_uid ) {
+ my $h2_msg = $h2_hash{ $m_id }{ 'm' } ;
+ if ( ! exists $h1_hash{ $m_id } ) {
+ my $h2_flags = $h2_hash{ $m_id }{ 'F' } || q{} ;
+ my $isdel = $h2_flags =~ /\B\\Deleted\b/x ? 1 : 0 ;
+ if ( ! $isdel ) {
+ $sync->{ debug } and myprint( "Host2: msg $h2_fold/$h2_msg candidate for deletion [$m_id]\n" ) ;
+ $uid_candidate_for_deletion{ $h2_fold }{ $h2_msg }++ ;
+ }
+ }else{
+ $sync->{ debug } and myprint( "Host2: msg $h2_fold/$h2_msg will cancel deletion [$m_id]\n" ) ;
+ $uid_candidate_no_deletion{ $h2_fold }{ $h2_msg }++ ;
+ }
+ }
+ foreach my $h2_msg ( @h2_msgs_delete2_not_in_cache ) {
+ myprint( "Host2: msg $h2_fold/$h2_msg candidate for deletion [not in cache]\n" ) ;
+ $uid_candidate_for_deletion{ $h2_fold }{ $h2_msg }++ ;
+ }
+
+ foreach my $h2_msg ( @h2_msgs_in_cache ) {
+ myprint( "Host2: msg $h2_fold/$h2_msg will cancel deletion [in cache]\n" ) ;
+ $uid_candidate_no_deletion{ $h2_fold }{ $h2_msg }++ ;
+ }
+
+
+ if ( 0 == $h2_folders_from_1_several{ $h2_fold } ) {
+ # last host1 folder going to $h2_fold
+ myprint( "Last host1 folder going to $h2_fold\n" ) ;
+ foreach my $h2_msg ( keys %{ $uid_candidate_for_deletion{ $h2_fold } } ) {
+ $sync->{ debug } and myprint( "Host2: msg $h2_fold/$h2_msg candidate for deletion\n" ) ;
+ if ( exists $uid_candidate_no_deletion{ $h2_fold }{ $h2_msg } ) {
+ $sync->{ debug } and myprint( "Host2: msg $h2_fold/$h2_msg canceled deletion\n" ) ;
+ }else{
+ myprint( "Host2: msg $h2_fold/$h2_msg marked \\Deleted $sync->{dry_message}\n" ) ;
+ push @h2_expunge, $h2_msg if $sync->{ uidexpunge2 } ;
+ if ( ! $sync->{dry} ) {
+ $sync->{imap2}->delete_message( $h2_msg ) ;
+ $h2_nb_msg_deleted += 1 ;
+ }
+ }
+ }
+ }
+
+ my $cnt = scalar @h2_expunge ;
+ if( @h2_expunge and not $sync->{ expunge2 } ) {
+ myprint( "Host2: UidExpunging $cnt message(s) in folder $h2_fold $sync->{dry_message}\n" ) ;
+ $sync->{imap2}->uidexpunge( \@h2_expunge ) if ! $sync->{dry} ;
+ }
+ if ( $sync->{ expunge2 } ) {
+ myprint( "Host2: Expunging host2 folder $h2_fold $sync->{dry_message}\n" ) ;
+ $sync->{imap2}->expunge( ) if ! $sync->{dry} ;
+ }
+
+ $h2_folders_from_1_several{ $h2_fold }-- ;
+ }
+
+ my $h2_uidnext = $sync->{imap2}->uidnext( $h2_fold ) ;
+ $sync->{ debug } and myprint( "Host2: uidnext is $h2_uidnext\n" ) ;
+ $h2_uidguess = $h2_uidnext ;
+
+ # Getting host2 headers, metada and delete2 stuff can be so long that host1 might be disconnected here
+ if ( ! reconnect_12_if_needed( $sync ) ) { last FOLDER ; }
+
+ my @h1_msgs_to_delete ;
+ MESS: foreach my $m_id (@h1_hash_keys_sorted_by_uid) {
+ if ( ! reconnect_12_if_needed( $sync ) ) { last FOLDER ; }
+
+ #myprint( "h1_nb_msg_processed: $sync->{ h1_nb_msg_processed }\n" ) ;
+ my $h1_size = $h1_hash{$m_id}{'s'};
+ my $h1_msg = $h1_hash{$m_id}{'m'};
+ my $h1_idate = $h1_hash{$m_id}{'D'};
+
+ #my $labels = labels( $sync->{imap1}, $h1_msg ) ;
+ #print "LABELS: $labels\n" ;
+
+ if ( ( not exists $h2_hash{ $m_id } )
+ and ( not ( exists $sync->{ h2_folders_of_md5 }->{ $m_id } )
+ or not $skipcrossduplicates ) )
+ {
+ # copy
+ my $h2_msg = copy_message( $sync, $h1_msg, $h1_fold, $h2_fold, $h1_fir_ref, $permanentflags2, $cache_dir ) ;
+ if ( $h2_msg and $sync->{ delete1 } and not $sync->{ expungeaftereach } ) {
+ # not expunged
+ push @h1_msgs_to_delete, $h1_msg ;
+ }
+
+ # A bug here with imapsync 1.920, fixed in 1.921
+ # Added $h2_msg in the condition. Errors of APPEND were not counted as missing messages on host2!
+ if ( $h2_msg and not $sync->{ dry } )
+ {
+ $sync->{ h2_folders_of_md5 }->{ $m_id }->{ $h2_fold } ++ ;
+ }
+
+ #
+ if( $sync->{ delete2 } and ( exists $h2_folders_from_1_several{ $h2_fold } ) and $h2_msg ) {
+ myprint( "Host2: msg $h2_fold/$h2_msg will cancel deletion [fresh copy] on host2\n" ) ;
+ $uid_candidate_no_deletion{ $h2_fold }{ $h2_msg }++ ;
+ }
+
+ if ( total_bytes_max_reached( $sync ) ) {
+ # a bug when using --delete1 --noexpungeaftereach
+ # same thing below on all total_bytes_max_reached!
+ last FOLDER ;
+ }
+ next MESS;
+ }
+ else
+ {
+ # already on host2
+ if ( exists $h2_hash{ $m_id } )
+ {
+ my $h2_msg = $h2_hash{$m_id}{'m'} ;
+ $sync->{ debug } and myprint( "Host1: found that msg $h1_fold/$h1_msg equals Host2 $h2_fold/$h2_msg\n" ) ;
+ if ( $usecache )
+ {
+ $debugcache and myprint( "touch $cache_dir/${h1_msg}_$h2_msg\n" ) ;
+ touch( "$cache_dir/${h1_msg}_$h2_msg" )
+ or croak( "Couldn't touch $cache_dir/${h1_msg}_$h2_msg" ) ;
+ }
+ }
+ elsif( exists $sync->{ h2_folders_of_md5 }->{ $m_id } )
+ {
+ my @folders_dup = keys %{ $sync->{ h2_folders_of_md5 }->{ $m_id } } ;
+ ( $sync->{ debug } or $debugcrossduplicates ) and myprint( "Host1: found that msg $h1_fold/$h1_msg is also in Host2 folders @folders_dup\n" ) ;
+ $sync->{ h2_nb_msg_crossdup } +=1 ;
+ }
+ $sync->{ total_bytes_skipped } += $h1_size ;
+ $sync->{ nb_msg_skipped } += 1 ;
+ $sync->{ h1_nb_msg_processed } +=1 ;
+ }
+
+ if ( exists $h2_hash{ $m_id } ) {
+ #$debug and myprint( "MESSAGE $m_id\n" ) ;
+ my $h2_msg = $h2_hash{$m_id}{'m'};
+ if ( $sync->{resyncflags} ) {
+ sync_flags_fir( $sync, $h1_fold, $h1_msg, $h2_fold, $h2_msg, $permanentflags2, $h1_fir_ref, $h2_fir_ref ) ;
+ }
+ # Good
+ my $h2_size = $h2_hash{$m_id}{'s'};
+ $sync->{ debug } and myprint(
+ "Host1: size msg $h1_fold/$h1_msg = $h1_size <> $h2_size = Host2 $h2_fold/$h2_msg\n" ) ;
+
+ if ( $sync->{ resynclabels } )
+ {
+ resynclabels( $sync, $h1_msg, $h2_msg, $h1_fir_ref, $h2_fir_ref, $h1_fold )
+ }
+ }
+
+ if ( ! reconnect_12_if_needed( $sync ) ) { last FOLDER ; }
+
+ if ( $sync->{ delete1 } ) {
+ push @h1_msgs_to_delete, $h1_msg ;
+ }
+ }
+ # END MESS: loop
+
+ delete_message_on_host1( $sync, $h1_fold, $sync->{ expunge1 }, @h1_msgs_to_delete, @h1_msgs_in_cache ) ;
+
+ if ( ! reconnect_12_if_needed( $sync ) ) { last FOLDER ; }
+
+ # MESS_IN_CACHE:
+ if ( ! $sync->{ delete1 } )
+ {
+ foreach my $h1_msg ( @h1_msgs_in_cache )
+ {
+ my $h2_msg = $cache_1_2_ref->{ $h1_msg } ;
+ $debugcache and myprint( "cache messages update flags $h1_msg->$h2_msg\n" ) ;
+ if ( $sync->{resyncflags} )
+ {
+ sync_flags_fir( $sync, $h1_fold, $h1_msg, $h2_fold, $h2_msg, $permanentflags2, $h1_fir_ref, $h2_fir_ref ) ;
+ }
+ my $h1_size = $h1_fir_ref->{ $h1_msg }->{ 'RFC822.SIZE' } || 0 ;
+ $sync->{ total_bytes_skipped } += $h1_size;
+ $sync->{ nb_msg_skipped } += 1;
+ $sync->{ h1_nb_msg_processed } +=1 ;
+ }
+ }
+
+ if ( ! reconnect_12_if_needed( $sync ) ) { last FOLDER ; }
+
+ @h1_msgs_to_delete = ( ) ;
+ #myprint( "Messages by uid: ", map { "$_ " } keys %h1_msgs_copy_by_uid, "\n" ) ;
+ # MESS_BY_UID:
+ foreach my $h1_msg ( sort { $a <=> $b } keys %h1_msgs_copy_by_uid )
+ {
+ $sync->{ debug } and myprint( "Copy by uid $h1_fold/$h1_msg\n" ) ;
+ if ( ! reconnect_12_if_needed( $sync ) ) { last FOLDER ; }
+
+ my $h2_msg = copy_message( $sync, $h1_msg, $h1_fold, $h2_fold, $h1_fir_ref, $permanentflags2, $cache_dir ) ;
+ if( $sync->{ delete2 } and exists $h2_folders_from_1_several{ $h2_fold } and $h2_msg ) {
+ myprint( "Host2: msg $h2_fold/$h2_msg will cancel deletion [fresh copy] on host2\n" ) ;
+ $uid_candidate_no_deletion{ $h2_fold }{ $h2_msg }++ ;
+ }
+ last FOLDER if total_bytes_max_reached( $sync ) ;
+ }
+
+ if ( $sync->{ expunge1 } ){
+ myprint( "Host1: Expunging folder $h1_fold $sync->{dry_message}\n" ) ;
+ if ( ! $sync->{dry} ) { $sync->{imap1}->expunge( ) } ;
+ }
+ if ( $sync->{ expunge2 } ){
+ myprint( "Host2: Expunging folder $h2_fold $sync->{dry_message}\n" ) ;
+ if ( ! $sync->{dry} ) { $sync->{imap2}->expunge( ) } ;
+ }
+ $sync->{ debug } and myprint( 'Time: ', timenext( $sync ), " s\n" ) ;
+}
+
+eta_print( $sync ) ;
+
+myprint( "++++ End looping on each folder\n" ) ;
+
+if ( $sync->{ delete1 } and $sync->{ delete1emptyfolders } ) {
+ delete1emptyfolders( $sync ) ;
+}
+
+( $sync->{ debug } or $sync->{debugfolders} ) and myprint( 'Time: ', timenext( $sync ), " s\n" ) ;
+
+
+if ( $sync->{ foldersizesatend } ) {
+ myprint( << 'END_SIZE' ) ;
+
+Folders sizes after the synchronization.
+You can remove this foldersizes listing by using "--nofoldersizesatend"
+END_SIZE
+
+ foldersizesatend( $sync ) ;
+}
+
+if ( ! lost_connection( $sync, $sync->{imap1}, "for host1 [$sync->{host1}]" ) ) { $sync->{imap1}->logout( ) ; }
+if ( ! lost_connection( $sync, $sync->{imap2}, "for host2 [$sync->{host2}]" ) ) { $sync->{imap2}->logout( ) ; }
+
+stats( $sync ) ;
+myprint( errorsdump( $sync->{nb_errors}, errors_log( $sync ) ) ) if ( $sync->{errorsdump} ) ;
+tests_live_result( $sync->{nb_errors} ) if ( $sync->{testslive} or $sync->{testslive6} ) ;
+
+
+
+if ( $sync->{nb_errors} )
+{
+ exit_clean( $sync, $EXIT_WITH_ERRORS ) ;
+}
+else
+{
+ exit_clean( $sync, $EX_OK ) ;
+}
+
+return ;
+}
+
+# END of sub single_sync
+
+
+# subroutines
+sub myprint
+{
+ #print @ARG ;
+ print { $sync->{ tee } || \*STDOUT } @ARG ;
+ return ;
+}
+
+sub myprintf
+{
+ printf { $sync->{ tee } || \*STDOUT } @ARG ;
+ return ;
+}
+
+sub mysprintf
+{
+ my( $format, @list ) = @ARG ;
+ return sprintf $format, @list ;
+}
+
+sub output_start
+{
+ my $mysync = shift @ARG ;
+
+ if ( not $mysync ) { return ; }
+
+ my @output = @ARG ;
+ $mysync->{ output } = join( q{}, @output ) . ( $mysync->{ output } || q{} ) ;
+ return $mysync->{ output } ;
+}
+
+
+sub tests_output_start
+{
+ note( 'Entering tests_output_start()' ) ;
+
+ my $mysync = { } ;
+
+ is( undef, output_start( ), 'output_start: no args => undef' ) ;
+ is( q{}, output_start( $mysync ), 'output_start: one arg => ""' ) ;
+ is( 'rrrr', output_start( $mysync, 'rrrr' ), 'output_start: rrrr => rrrr' ) ;
+ is( 'aaaarrrr', output_start( $mysync, 'aaaa' ), 'output_start: aaaa => aaaarrrr' ) ;
+ is( "\naaaarrrr", output_start( $mysync, "\n" ), 'output_start: \n => \naaaarrrr' ) ;
+ is( "ABC\naaaarrrr", output_start( $mysync, 'A', 'B', 'C' ), 'output_start: A B C => ABC\naaaarrrr' ) ;
+
+ note( 'Leaving tests_output_start()' ) ;
+ return ;
+}
+
+sub tests_output
+{
+ note( 'Entering tests_output()' ) ;
+
+ my $mysync = { } ;
+
+ is( undef, output( ), 'output: no args => undef' ) ;
+ is( q{}, output( $mysync ), 'output: one arg => ""' ) ;
+ is( 'rrrr', output( $mysync, 'rrrr' ), 'output: rrrr => rrrr' ) ;
+ is( 'rrrraaaa', output( $mysync, 'aaaa' ), 'output: aaaa => rrrraaaa' ) ;
+ is( "rrrraaaa\n", output( $mysync, "\n" ), 'output: \n => rrrraaaa\n' ) ;
+ is( "rrrraaaa\nABC", output( $mysync, 'A', 'B', 'C' ), 'output: A B C => rrrraaaaABC\n' ) ;
+
+ note( 'Leaving tests_output()' ) ;
+ return ;
+}
+
+sub output
+{
+ my $mysync = shift @ARG ;
+
+ if ( not $mysync ) { return ; }
+
+ my @output = @ARG ;
+ $mysync->{ output } .= join( q{}, @output ) ;
+ return $mysync->{ output } ;
+}
+
+
+
+sub tests_output_reset_with
+{
+ note( 'Entering tests_output_reset_with()' ) ;
+
+ my $mysync = { } ;
+
+ is( undef, output_reset_with( ), 'output_reset_with: no args => undef' ) ;
+ is( q{}, output_reset_with( $mysync ), 'output_reset_with: one arg => ""' ) ;
+ is( 'rrrr', output_reset_with( $mysync, 'rrrr' ), 'output_reset_with: rrrr => rrrr' ) ;
+ is( 'aaaa', output_reset_with( $mysync, 'aaaa' ), 'output_reset_with: aaaa => aaaa' ) ;
+ is( "\n", output_reset_with( $mysync, "\n" ), 'output_reset_with: \n => \n' ) ;
+
+ note( 'Leaving tests_output_reset_with()' ) ;
+ return ;
+}
+
+sub output_reset_with
+{
+ my $mysync = shift @ARG ;
+
+ if ( not $mysync ) { return ; }
+
+ my @output = @ARG ;
+ $mysync->{ output } = join( q{}, @output ) ;
+ return $mysync->{ output } ;
+}
+
+sub pidfile
+{
+ my $mysync = shift ;
+
+ $mysync->{ pidfilelocking } = defined $mysync->{ pidfilelocking } ? $mysync->{ pidfilelocking } : 0 ;
+
+ my $host1 = $mysync->{ host1 } || q{} ;
+ my $user1 = $mysync->{ user1 } || q{} ;
+ my $host2 = $mysync->{ host2 } || q{} ;
+ my $user2 = $mysync->{ user2 } || q{} ;
+
+ my $account1_filtered = filter_forbidden_characters( slash_to_underscore( $host1 . '_' . $user1 ) ) || q{} ;
+ my $account2_filtered = filter_forbidden_characters( slash_to_underscore( $host2 . '_' . $user2 ) ) || q{} ;
+
+ my $pidfile_basename ;
+
+ if ( $ENV{ 'NET_SERVER_SOFTWARE' } and ( $ENV{ 'NET_SERVER_SOFTWARE' } =~ /Net::Server::HTTP/ ) )
+ {
+ # under local webserver
+ $pidfile_basename = 'imapsync' . '_' . $account1_filtered . '_' . $account2_filtered . '.pid' ;
+ }
+ else
+ {
+ $pidfile_basename = 'imapsync.pid' ;
+ }
+
+ $mysync->{ pidfile } = defined $mysync->{ pidfile } ? $mysync-> { pidfile } : $mysync->{ tmpdir } . "/$pidfile_basename" ;
+ return ;
+}
+
+
+sub tests_kill_zero
+{
+ note( 'Entering tests_kill_zero()' ) ;
+
+
+
+ SKIP: {
+ if ( 'MSWin32' eq $OSNAME ) { skip( 'Tests tests_kill_zero avoided on Windows', 8 ) ; }
+
+
+ is( 1, kill( 'ZERO', $PROCESS_ID ), "kill ZERO : myself $PROCESS_ID => 1" ) ;
+ is( 2, kill( 'ZERO', $PROCESS_ID, $PROCESS_ID ), "kill ZERO : myself $PROCESS_ID $PROCESS_ID => 2" ) ;
+
+ if ( (-e '/.dockerenv' ) or ( 0 == $EFFECTIVE_USER_ID) )
+ {
+ is( 1, kill( 'ZERO', 1 ), "kill ZERO : pid 1 => 1 (docker context or root)" ) ;
+ is( 2, kill( 'ZERO', $PROCESS_ID, 1 ), "kill ZERO : myself + pid 1, $PROCESS_ID 1 => 2 (docker context or root)" ) ;
+ }
+ else
+ {
+ is( 0, kill( 'ZERO', 1 ), "kill ZERO : pid 1 => 0 (non root)" ) ;
+ is( 1, kill( 'ZERO', $PROCESS_ID, 1 ), "kill ZERO : myself + pid 1, $PROCESS_ID 1 => 1 (one is non root)" ) ;
+
+ }
+
+
+ my $pid_1 = fork( ) ;
+ if ( $pid_1 )
+ {
+ # parent
+ }
+ else
+ {
+ # child
+ sleep 3 ;
+ exit ;
+ }
+
+ my $pid_2 ;
+ $pid_2 = fork( ) ;
+ if ( $pid_2 )
+ {
+ # I am the parent
+ ok( defined( $pid_2 ), "kill_zero: initial fork ok. I am the parent $PROCESS_ID" ) ;
+ ok( $pid_2 , "kill_zero: initial fork ok, child pid is $pid_2" ) ;
+ is( 3, kill( 'ZERO', $PROCESS_ID, $pid_2, $pid_1 ), "kill ZERO : myself $PROCESS_ID and child $pid_2 and brother $pid_1 => 3" ) ;
+
+ is( $pid_2, waitpid( $pid_2, 0 ), "kill_zero: child $pid_2 no more there => waitpid return $pid_2" ) ;
+ }
+ else
+ {
+ # I am the child
+ note( 'This one fails under Windows, kill ZERO returns 0 instead of 2' ) ;
+ is( 2, kill( 'ZERO', $PROCESS_ID, $pid_1 ), "kill ZERO : myself child $PROCESS_ID brother $pid_1 => 2" ) ;
+ myprint( "I am the child pid $PROCESS_ID, Exiting\n" ) ;
+ exit ;
+ }
+ wait( ) ;
+
+ # End of SKIP block
+ }
+
+ note( 'Leaving tests_kill_zero()' ) ;
+ return ;
+}
+
+
+
+
+sub tests_killpid_by_parent
+{
+ note( 'Entering tests_killpid_by_parent()' ) ;
+
+ SKIP: {
+ if ( 'MSWin32' eq $OSNAME ) { skip( 'Tests tests_killpid_by_parent avoided on Windows', 7 ) ; }
+
+ is( undef, killpid( ), 'killpid: no args => undef' ) ;
+ note( "killpid: trying to kill myself pid $PROCESS_ID, hope I will not succeed" ) ;
+ is( undef, killpid( $PROCESS_ID ), 'killpid: myself => undef' ) ;
+
+ local $SIG{'QUIT'} = sub { myprint "GOT SIG QUIT! I am PID $PROCESS_ID. Exiting\n" ; exit ; } ;
+
+ my $pid ;
+ $pid = fork( ) ;
+ if ( $pid )
+ {
+ # I am the parent
+ ok( defined( $pid ), "killpid: initial fork ok. I am the parent $PROCESS_ID" ) ;
+ ok( $pid , "killpid: initial fork ok, child pid is $pid" ) ;
+
+ is( 2, kill( 'ZERO', $PROCESS_ID, $pid ), "kill ZERO : myself $PROCESS_ID and child $pid => 2" ) ;
+ is( 1, killpid( $pid ), "killpid: child $pid killed => 1" ) ;
+ is( -1, waitpid( $pid, 0 ), "killpid: child $pid no more there => waitpid return -1" ) ;
+ }
+ else
+ {
+ # I am the child
+ myprint( "I am the child pid $PROCESS_ID, sleeping 1 + 3 seconds then kill myself\n" ) ;
+ sleep 1 ;
+ myprint( "I am the child pid $PROCESS_ID, slept 1 second, should be killed by my parent now, PPID " . mygetppid( ) . "\n" ) ;
+ sleep 3 ;
+ # this test should not be run. If it happens => failure.
+ ok( 0 == 1, "killpid: child pid $PROCESS_ID not dead => failure" ) ;
+ myprint( "I am the child pid $PROCESS_ID, killing myself failure... Exiting\n" ) ;
+ exit ;
+ }
+
+ # End of SKIP block
+ }
+ note( 'Leaving tests_killpid_by_parent()' ) ;
+ return ;
+}
+
+sub tests_killpid_by_brother
+{
+ note( 'Entering tests_killpid_by_brother()' ) ;
+
+
+ SKIP: {
+ if ( 'MSWin32' eq $OSNAME ) { skip( 'Tests tests_killpid_by_brother avoided on Windows', 2 ) ; }
+
+ local $SIG{'QUIT'} = sub { myprint "GOT SIG QUIT! I am PID $PROCESS_ID. Exiting\n" ; exit ; } ;
+
+ my $pid_parent = $PROCESS_ID ;
+ myprint( "I am the parent pid $pid_parent\n" ) ;
+ my $pid_1 = fork( ) ;
+ if ( $pid_1 )
+ {
+ # parent
+ }
+ else
+ {
+ # child
+ #while ( 1 ) { } ;
+ sleep 2 ;
+ sleep 2 ;
+ # this test should not be run. If it happens => failure.
+ # Well under Windows this always fails, shit!
+ ok( 0 == 1 or ( 'MSWin32' eq $OSNAME ) , "killpid: child pid $PROCESS_ID killing by brother but not dead => failure" ) ;
+ myprint( "I am the child pid $PROCESS_ID, killing by brother failed... Exiting\n" ) ;
+ exit ;
+ }
+
+ my $pid_2 ;
+ $pid_2 = fork( ) ;
+ if ( $pid_2 )
+ {
+ # parent
+ }
+ else
+ {
+ # I am the child
+ myprint( "I am the child pid $PROCESS_ID, my brother has pid $pid_1\n" ) ;
+ is( 1, killpid( $pid_1 ), "killpid: brother $pid_1 killed => 1" ) ;
+ sleep 2 ;
+ exit ;
+ }
+
+ #sleep 1 ;
+ is( $pid_1, waitpid( $pid_1, 0), "I am the parent $PROCESS_ID waitpid _1( $pid_1 )" ) ;
+ is( $pid_2, waitpid( $pid_2, 0 ), "I am the parent $PROCESS_ID waitpid _2( $pid_2 )" ) ;
+
+
+ # End of SKIP block
+ }
+
+ note( 'Leaving tests_killpid_by_brother()' ) ;
+ return ;
+}
+
+
+sub killpid
+{
+ my $pidtokill = shift ;
+
+ if ( ! $pidtokill ) {
+ myprint( "No process to abort.\n" ) ;
+ return ;
+ }
+
+ if ( $PROCESS_ID == $pidtokill ) {
+ myprint( "I will not kill myself pid $PROCESS_ID via killpid. Sractch it!\n" ) ;
+ return ;
+ }
+
+
+ # First ask for suicide
+ if ( kill( 'ZERO', $pidtokill ) or ( 'MSWin32' eq $OSNAME ) ) {
+ myprint( "Sending signal QUIT to PID $pidtokill \n" ) ;
+ kill 'QUIT', $pidtokill ;
+ sleep 2 ;
+ waitpid( $pidtokill, WNOHANG) ;
+ }else{
+ myprint( "Can not send signal kill ZERO to PID $pidtokill.\n" ) ;
+ return ;
+ }
+
+ #while ( waitpid( $pidtokill, WNOHANG) > 0 ) { } ;
+
+ # Then murder
+ if ( kill( 'ZERO', $pidtokill ) or ( 'MSWin32' eq $OSNAME ) ) {
+ myprint( "Sending signal KILL to PID $pidtokill \n" ) ;
+ kill 'KILL', $pidtokill ;
+ sleep 1 ;
+ waitpid( $pidtokill, WNOHANG) ;
+ }else{
+ myprint( "Process PID $pidtokill ended.\n" ) ;
+ return 1;
+ }
+ # Well ...
+ if ( kill( 'ZERO', $pidtokill ) or ( 'xMSWin32' eq $OSNAME ) ) {
+ myprint( "Process PID $pidtokill seems still there. Can not do much.\n" ) ;
+ return ;
+ }else{
+ myprint( "Process PID $pidtokill ended.\n" ) ;
+ return 1;
+ }
+
+ return ;
+}
+
+sub tests_abort
+{
+ note( 'Entering tests_abort()' ) ;
+
+ is( undef, abort( ), 'abort: no args => undef' ) ;
+ note( 'Leaving tests_abort()' ) ;
+ return ;
+}
+
+
+
+
+sub abort
+{
+ my $mysync = shift @ARG ;
+
+ if ( not $mysync ) { return ; }
+
+ if ( ! -r $mysync->{pidfile} ) {
+ myprint( "Can not read pidfile $mysync->{pidfile}. Exiting.\n" ) ;
+ exit $EX_OK ;
+ }
+ my $pidtokill = firstline( $mysync->{pidfile} ) ;
+ if ( ! $pidtokill ) {
+ myprint( "No process to abort. Exiting.\n" ) ;
+ exit $EX_OK ;
+ }
+
+ killpid( $pidtokill ) ;
+
+ # well, the abort job is done anyway, because even when not succeeded
+ # in aborting another run, this run has to end without doing any
+ # thing else
+
+ exit $EX_OK ;
+}
+
+
+sub under_docker_context
+{
+ my $mysync = shift ;
+
+ if ( -e '/.dockerenv' )
+ {
+ return 1 ;
+ }
+ else
+ {
+ return 0 ;
+ }
+
+ return ;
+}
+
+
+sub docker_context
+{
+ my $mysync = shift ;
+
+ #-e '/.dockerenv' || return ;
+
+ if ( ! under_docker_context( $mysync ) )
+ {
+ return ;
+ }
+
+ $mysync->{ debug } and myprint( "Docker context detected with /.dockerenv\n" ) ;
+ # No pidfile
+ $mysync->{pidfile} = q{} ;
+ # No log
+ $mysync->{log} = 0 ;
+ # In case
+ $mysync->{ debug } and myprint( "Changing current directory to /var/tmp/\n" ) ;
+ chdir '/var/tmp/' ;
+
+ return ;
+}
+
+sub cgibegin
+{
+ my $mysync = shift ;
+ if ( ! under_cgi_context( $mysync ) ) { return ; }
+ require CGI ;
+ CGI->import( qw( -no_debug -utf8 ) ) ;
+ require CGI::Carp ;
+ CGI::Carp->import( qw( fatalsToBrowser ) ) ;
+ $mysync->{cgi} = CGI->new( ) ;
+ return ;
+}
+
+sub tests_under_cgi_context
+{
+ note( 'Entering tests_under_cgi_context()' ) ;
+
+ # $ENV{SERVER_SOFTWARE} = 'under imapsync' ;
+ do {
+ # Not in cgi context
+ delete local $ENV{SERVER_SOFTWARE} ;
+ is( undef, under_cgi_context( ), 'under_cgi_context: SERVER_SOFTWARE unset => not in cgi context' ) ;
+ } ;
+ do {
+ # In cgi context
+ local $ENV{SERVER_SOFTWARE} = 'under imapsync' ;
+ is( 1, under_cgi_context( ), 'under_cgi_context: SERVER_SOFTWARE set => in cgi context' ) ;
+ } ;
+ do {
+ # Not in cgi context
+ delete local $ENV{SERVER_SOFTWARE} ;
+ is( undef, under_cgi_context( ), 'under_cgi_context: SERVER_SOFTWARE unset => not in cgi context' ) ;
+ } ;
+ do {
+ # In cgi context
+ local $ENV{SERVER_SOFTWARE} = 'under imapsync' ;
+ is( 1, under_cgi_context( ), 'under_cgi_context: SERVER_SOFTWARE set => in cgi context' ) ;
+ } ;
+ note( 'Leaving tests_under_cgi_context()' ) ;
+ return ;
+}
+
+
+sub under_cgi_context
+{
+ my $mysync = shift ;
+ # Under cgi context
+ if ( $ENV{SERVER_SOFTWARE} ) {
+ return 1 ;
+ }
+ # Not in cgi context
+ return ;
+}
+
+sub cgibuildheader
+{
+ my $mysync = shift ;
+ if ( ! under_cgi_context( $mysync ) ) { return ; }
+
+ my $imapsync_runs = $mysync->{cgi}->cookie( 'imapsync_runs' ) || 0 ;
+ my $cookie = $mysync->{cgi}->cookie(
+ -name => 'imapsync_runs',
+ -value => 1 + $imapsync_runs,
+ -expires => '+20y',
+ -path => '/cgi-bin/imapsync',
+ ) ;
+ my $httpheader ;
+ if ( $mysync->{ abort } ) {
+ $httpheader = $mysync->{cgi}->header(
+ -type => 'text/plain',
+ -status => '200 OK to abort syncing IMAP boxes' . ". Here is " . hostname(),
+ ) ;
+ }elsif( $mysync->{ loaddelay } ) {
+# https://tools.ietf.org/html/rfc2616#section-10.5.4
+# 503 Service Unavailable
+# The server is currently unable to handle the request due to a temporary overloading or maintenance of the server.
+ $httpheader = $mysync->{cgi}->header(
+ -type => 'text/plain',
+ -status => '503 Service Unavailable' . ". Be back in $mysync->{ loaddelay } min. Load on " . hostname() . " is $mysync->{ loadavg }",
+ ) ;
+ }else{
+ $httpheader = $mysync->{cgi}->header(
+ -type => 'text/plain; charset=UTF-8',
+ -status => '200 OK to sync IMAP boxes' . ". Load on " . hostname() . " is $mysync->{ loadavg }",
+ -cookie => $cookie,
+ ) ;
+ }
+ output_start( $mysync, $httpheader ) ;
+
+ return ;
+}
+
+sub cgiload
+{
+ # Exit on heavy load in CGI context
+ my $mysync = shift ;
+ if ( ! under_cgi_context( $mysync ) ) { return ; }
+ if ( $mysync->{ abort } ) { return ; } # keep going to abort since some ressources will be free soon
+ if ( $mysync->{ loaddelay } )
+ {
+ $mysync->{nb_errors}++ ;
+ exit_clean( $mysync, $EX_UNAVAILABLE,
+ "Server is on heavy load. Be back in $mysync->{ loaddelay } min. Load is $mysync->{ loadavg }\n"
+ ) ;
+ }
+ return ;
+}
+
+sub tests_set_umask
+{
+ note( 'Entering tests_set_umask()' ) ;
+
+ my $save_umask = umask ;
+
+ my $mysync = {} ;
+ if ( 'MSWin32' eq $OSNAME ) {
+ is( undef, set_umask( $mysync ), "set_umask: set failure to $UMASK_PARANO on MSWin32" ) ;
+ }else{
+ is( 1, set_umask( $mysync ), "set_umask: set to $UMASK_PARANO" ) ;
+ }
+
+ umask $save_umask ;
+ note( 'Leaving tests_set_umask()' ) ;
+ return ;
+}
+
+sub set_umask
+{
+ my $mysync = shift ;
+ my $previous_umask = umask_str( ) ;
+ my $new_umask = umask_str( $UMASK_PARANO ) ;
+ output( $mysync, "Umask set with $new_umask (was $previous_umask)\n" ) ;
+ if ( $new_umask eq $UMASK_PARANO ) {
+ return 1 ;
+ }
+ return ;
+}
+
+sub tests_umask_str
+{
+ note( 'Entering tests_umask_str()' ) ;
+
+ my $save_umask = umask ;
+
+ is( umask_str( ), umask_str( ), 'umask_str: no parameters => idopotent' ) ;
+ is( my $save_umask_str = umask_str( ), umask_str( ), 'umask_str: no parameters => idopotent + save' ) ;
+ is( '0000', umask_str( q{ } ), 'umask_str: q{ } => 0000' ) ;
+ is( '0000', umask_str( q{} ), 'umask_str: q{} => 0000' ) ;
+ is( '0000', umask_str( '0000' ), 'umask_str: 0000 => 0000' ) ;
+ is( '0000', umask_str( '0' ), 'umask_str: 0 => 0000' ) ;
+ is( '0200', umask_str( '0200' ), 'umask_str: 0200 => 0200' ) ;
+ is( '0400', umask_str( '0400' ), 'umask_str: 0400 => 0400' ) ;
+ is( '0600', umask_str( '0600' ), 'umask_str: 0600 => 0600' ) ;
+
+ SKIP: {
+ if ( 'MSWin32' eq $OSNAME ) { skip( 'Tests success only for Unix', 6 ) ; }
+ is( '0100', umask_str( '0100' ), 'umask_str: 0100 => 0100' ) ;
+ is( '0001', umask_str( '0001' ), 'umask_str: 0001 => 0001' ) ;
+ is( '0777', umask_str( '0777' ), 'umask_str: 0777 => 0777' ) ;
+ is( '0777', umask_str( '00777' ), 'umask_str: 00777 => 0777' ) ;
+ is( '0777', umask_str( ' 777 ' ), 'umask_str: 777 => 0777' ) ;
+ is( "$UMASK_PARANO", umask_str( $UMASK_PARANO ), "umask_str: UMASK_PARANO $UMASK_PARANO => $UMASK_PARANO" ) ;
+ }
+
+ is( $save_umask_str, umask_str( $save_umask_str ), 'umask_str: restore with str' ) ;
+ is( $save_umask, umask, 'umask_str: umask is restored, controlled by direct umask' ) ;
+ is( $save_umask, umask $save_umask, 'umask_str: umask is restored by direct umask' ) ;
+ is( $save_umask, umask, 'umask_str: umask initial controlled by direct umask' ) ;
+
+ note( 'Leaving tests_umask_str()' ) ;
+ return ;
+}
+
+sub umask_str
+{
+ my $value = shift ;
+
+ if ( defined $value ) {
+ umask oct( $value ) ;
+ }
+ my $current = umask ;
+
+ return( sprintf( '%#04o', $current ) ) ;
+}
+
+sub tests_umask
+{
+ note( 'Entering tests_umask()' ) ;
+
+ my $save_umask ;
+ is( umask, umask, 'umask: umask is umask' ) ;
+ is( $save_umask = umask, umask, "umask: umask is umask again + save it: $save_umask" ) ;
+ is( $save_umask, umask oct(0000), 'umask: umask 0000' ) ;
+ is( oct(0000), umask, 'umask: umask is now 0000' ) ;
+ is( oct(0000), umask oct(777), 'umask: umask 0777 call, previous 0000' ) ;
+
+ SKIP: {
+ if ( 'MSWin32' eq $OSNAME ) { skip( 'Tests success only for Unix', 2 ) ; }
+ is( oct(777), umask, 'umask: umask is now 0777' ) ;
+ is( oct(777), umask $save_umask, "umask: umask $save_umask restore inital value, previous 0777" ) ;
+ }
+
+ ok( defined umask $save_umask, "umask: umask $save_umask restore inital value, previous defined" ) ;
+ is( $save_umask, umask, 'umask: umask is umask restored' ) ;
+ note( 'Leaving tests_umask()' ) ;
+
+ return ;
+}
+
+sub cgisetcontext
+{
+ my $mysync = shift ;
+ if ( ! under_cgi_context( $mysync ) ) { return ; }
+
+ output( $mysync, "Under cgi context\n" ) ;
+ set_umask( $mysync ) ;
+
+ # Remove all content in unsafe evaled options
+ @{ $mysync->{ regextrans2 } } = ( ) ;
+ @regexflag = ( ) ;
+ @regexmess = ( ) ;
+ @skipmess = ( ) ;
+ @pipemess = ( ) ;
+ $delete2foldersonly = undef ;
+ $delete2foldersbutnot = undef ;
+ $maxlinelengthcmd = undef ;
+
+ # Set safe default values (I hope...)
+
+
+ #$mysync->{pidfile} = 'imapsync.pid' ;
+ $mysync->{pidfilelocking} = 1 ;
+ $mysync->{errorsmax} = $ERRORS_MAX_CGI ;
+ $modulesversion = 0 ;
+ $mysync->{releasecheck} = defined $mysync->{releasecheck} ? $mysync->{releasecheck} : 1 ;
+ $usecache = 0 ;
+ $mysync->{showpasswords} = 0 ;
+ $debugimap1 = $debugimap2 = $debugimap = 0 ;
+ $reconnectretry1 = $reconnectretry2 = $DEFAULT_NB_RECONNECT_PER_IMAP_COMMAND ;
+ $pipemesscheck = 0 ;
+
+ $mysync->{hashfile} = $CGI_HASHFILE ;
+ my $hashsynclocal = hashsynclocal( $mysync ) || die "Can not get hashsynclocal. Exiting\n" ;
+
+ if ( $ENV{ 'NET_SERVER_SOFTWARE' } and ( $ENV{ 'NET_SERVER_SOFTWARE' } =~ /Net::Server::HTTP/ ) )
+ {
+ # under local webserver
+ $cgidir = q{.} ;
+ }
+ else
+ {
+ $cgidir = $CGI_TMPDIR_TOP . '/' . $hashsynclocal ;
+ }
+ -d $cgidir or mkpath $cgidir or die "Can not create $cgidir: $OS_ERROR\n" ;
+ $mysync->{ tmpdir } = $cgidir ;
+
+ chdir $cgidir or die "Can not cd to $cgidir: $OS_ERROR\n" ;
+ cgioutputenvcontext( $mysync ) ;
+ $mysync->{ debug } and output( $mysync, 'Current directory is ' . getcwd( ) . "\n" ) ;
+ $mysync->{ debug } and output( $mysync, 'Real user id is ' . getpwuid_any_os( $REAL_USER_ID ) . " (uid $REAL_USER_ID)\n" ) ;
+ $mysync->{ debug } and output( $mysync, 'Effective user id is ' . getpwuid_any_os( $EFFECTIVE_USER_ID ). " (euid $EFFECTIVE_USER_ID)\n" ) ;
+
+ $mysync->{ skipemptyfolders } = defined $mysync->{ skipemptyfolders } ? $mysync->{ skipemptyfolders } : 1 ;
+
+ # Out of memory with messages over 1 GB ?
+ $mysync->{ maxsize } = defined $mysync->{ maxsize } ? $mysync->{ maxsize } : 1_000_000_000 ;
+
+ # tail -f behaviour on by default
+ $mysync->{ tail } = defined $mysync->{ tail } ? $mysync->{ tail } : 1 ;
+
+ # not sure it's for good
+ @useheader = qw( Message-Id ) ;
+
+ # addheader on by default
+ $mysync->{ addheader } = defined $mysync->{ addheader } ? $mysync->{ addheader } : 1 ;
+
+ return ;
+}
+
+sub cgioutputenvcontext
+{
+ my $mysync = shift @ARG ;
+
+ for my $envvar ( qw( REMOTE_ADDR REMOTE_HOST HTTP_REFERER HTTP_USER_AGENT SERVER_SOFTWARE SERVER_PORT HTTP_COOKIE ) ) {
+
+ my $envval = $ENV{ $envvar } || q{} ;
+ if ( $envval ) { output( $mysync, "$envvar is $envval\n" ) } ;
+ }
+
+ return ;
+}
+
+
+sub debugsleep
+{
+ my $mysync = shift @ARG ;
+ if ( defined $mysync->{debugsleep} ) {
+ myprint( "Info: sleeping $mysync->{debugsleep}s\n" ) ;
+ sleep $mysync->{debugsleep} ;
+ }
+ return ;
+}
+
+sub tests_foldersize
+{
+ note( 'Entering tests_foldersize()' ) ;
+
+ is( undef, foldersize( ), 'foldersize: no args => undef' ) ;
+
+
+ #is_deeply( {}, {}, 'foldersize: a hash is a hash' ) ;
+ #is_deeply( [], [], 'foldersize: an array is an array' ) ;
+ note( 'Leaving tests_foldersize()' ) ;
+ return ;
+}
+
+
+
+# Globals:
+# $uidnext_default
+# $fetch_hash_set
+#
+sub foldersize
+{
+ # size of one folder
+ my ( $mysync, $side, $imap, $search_cmd, $abletosearch, $folder ) = @ARG ;
+
+ if ( ! all_defined( $mysync, $side, $imap, $folder ) )
+ {
+ return ;
+ }
+
+ # FTGate is RFC buggy with EXAMINE it does not act as SELECT
+ #if ( ! $imap->examine( $folder ) ) {
+ if ( ! $imap->select( $folder ) ) {
+ my $error = join q{},
+ "$side Folder $folder: Could not select: ",
+ $imap->LastError, "\n" ;
+ errors_incr( $mysync, $error ) ;
+ return ;
+ }
+
+ if ( $imap->IsUnconnected( ) )
+ {
+ return ;
+ }
+
+ my $hash_ref = { } ;
+ my @msgs = select_msgs( $imap, undef, $search_cmd, $abletosearch, $folder ) ;
+ my $nb_msgs = scalar @msgs ;
+ my $biggest_in_folder = 0 ;
+ @{ $hash_ref }{ @msgs } = ( undef ) if @msgs ;
+
+ my $stot = 0 ;
+
+ if ( $imap->IsUnconnected( ) )
+ {
+ return ;
+ }
+
+ if ( $nb_msgs > 0 and @msgs ) {
+ if ( $abletosearch ) {
+ if ( ! $imap->fetch_hash( \@msgs, 'RFC822.SIZE', $hash_ref) ) {
+ my $error = "$side failure with fetch_hash: $EVAL_ERROR\n" ;
+ errors_incr( $mysync, $error ) ;
+ return ;
+ }
+ }
+ else
+ {
+ my $uidnext = $imap->uidnext( $folder ) || $uidnext_default ;
+ my $fetch_hash_uids = $fetch_hash_set || "1:$uidnext" ;
+ if ( ! $imap->fetch_hash( $fetch_hash_uids, 'RFC822.SIZE', $hash_ref ) ) {
+ my $error = "$side failure with fetch_hash: $EVAL_ERROR\n" ;
+ errors_incr( $mysync, $error ) ;
+ return ;
+ }
+ }
+ for ( keys %{ $hash_ref } ) {
+ my $size = $hash_ref->{ $_ }->{ 'RFC822.SIZE' } ;
+ $stot += $size ;
+ $biggest_in_folder = max( $biggest_in_folder, $size ) ;
+ }
+ }
+ return( $stot, $nb_msgs, $biggest_in_folder ) ;
+
+}
+
+
+# The old subroutine that performed just one side at a time.
+# Still here for a while, until confident with sub foldersize_diff_compute()
+sub foldersizes
+{
+ my ( $mysync, $side, $imap, $search_cmd, $abletosearch, @folders ) = @_ ;
+ my $total_size = 0 ;
+ my $total_nb = 0 ;
+ my $biggest_in_all = 0 ;
+
+ my $nb_folders = scalar @folders ;
+ my $ct_folders = 0 ; # folder counter.
+ myprint( "++++ Calculating sizes of $nb_folders folders on $side\n" ) ;
+ foreach my $folder ( @folders ) {
+ my $stot = 0 ;
+ my $nb_msgs = 0 ;
+ my $biggest_in_folder = 0 ;
+
+ $ct_folders++ ;
+ myprintf( "$side folder %7s %-35s", "$ct_folders/$nb_folders", jux_utf8( $folder ) ) ;
+ if ( 'Host2' eq $side and not exists $mysync->{h2_folders_all_UPPER}{ uc $folder } ) {
+ myprint( " does not exist yet\n") ;
+ next ;
+ }
+ if ( 'Host1' eq $side and not exists $h1_folders_all{ $folder } ) {
+ myprint( " does not exist\n" ) ;
+ next ;
+ }
+
+ last if $imap->IsUnconnected( ) ;
+
+ ( $stot, $nb_msgs, $biggest_in_folder ) = foldersize( $mysync, $side, $imap, $search_cmd, $abletosearch, $folder ) ;
+
+ myprintf( ' Size: %9s', $stot ) ;
+ myprintf( ' Messages: %5s', $nb_msgs ) ;
+ myprintf( " Biggest: %9s\n", $biggest_in_folder ) ;
+ $total_size += $stot ;
+ $total_nb += $nb_msgs ;
+ $biggest_in_all = max( $biggest_in_all, $biggest_in_folder ) ;
+ }
+ myprintf( "%s Nb folders: %11s folders\n", $side, $nb_folders ) ;
+ myprintf( "%s Nb messages: %11s messages\n", $side, $total_nb ) ;
+ myprintf( "%s Total size: %11s bytes (%s)\n", $side, $total_size, bytes_display_string( $total_size ) ) ;
+ myprintf( "%s Biggest message: %11s bytes (%s)\n", $side, $biggest_in_all, bytes_display_string( $biggest_in_all ) ) ;
+ myprintf( "%s Time spent on sizing: %11.1f seconds\n", $side, timenext( $mysync ) ) ;
+ return( $total_nb, $total_size ) ;
+}
+
+
+sub foldersize_diff_present
+{
+ my $mysync = shift ;
+ my $folder1 = shift ;
+ my $folder2 = shift ;
+ my $counter_str = shift ;
+ my $force = shift ;
+
+ my $values1_str ;
+ my $values2_str ;
+
+ if ( ! defined $mysync->{ folder1 }->{ $folder1 }->{ size } || $force )
+ {
+ foldersize_diff_compute( $mysync, $folder1, $folder2, $force ) ;
+ }
+
+ # again, but this time it means no availaible data.
+ if ( defined $mysync->{ folder1 }->{ $folder1 }->{ size } )
+ {
+ $values1_str = sprintf( "Size: %9s Messages: %5s Biggest: %9s\n",
+ $mysync->{ folder1 }->{ $folder1 }->{ size },
+ $mysync->{ folder1 }->{ $folder1 }->{ nb_msgs },
+ $mysync->{ folder1 }->{ $folder1 }->{ biggest },
+ ) ;
+ }
+ else
+ {
+ $values1_str = " does not exist\n" ;
+ }
+
+ if ( defined $mysync->{ folder2 }->{ $folder2 }->{ size } )
+ {
+ $values2_str = sprintf( "Size: %9s Messages: %5s Biggest: %9s\n",
+ $mysync->{ folder2 }->{ $folder2 }->{ size },
+ $mysync->{ folder2 }->{ $folder2 }->{ nb_msgs },
+ $mysync->{ folder2 }->{ $folder2 }->{ biggest },
+ ) ;
+ }
+ else
+ {
+ $values2_str = " does not exist yet\n" ;
+ }
+
+ myprintf( "Host1 folder %7s %-35s %s",
+ "$counter_str",
+ jux_utf8( $folder1 ),
+ $values1_str
+ ) ;
+
+ myprintf( "Host2 folder %7s %-35s %s",
+ "$counter_str",
+ jux_utf8( $folder2 ),
+ $values2_str
+ ) ;
+
+ myprintf( "Host2-Host1 %7s %-35s %9s %5s %9s\n\n",
+ "",
+ "",
+ $mysync->{ folder1 }->{ $folder1 }->{ size_diff },
+ $mysync->{ folder1 }->{ $folder1 }->{ nb_msgs_diff },
+ $mysync->{ folder1 }->{ $folder1 }->{ biggest_diff },
+
+ ) ;
+
+
+
+
+ return ;
+}
+
+sub foldersize_diff_compute
+{
+ my $mysync = shift ;
+ my $folder1 = shift ;
+ my $folder2 = shift ;
+ my $force = shift ;
+
+
+
+ my ( $size_1, $nb_msgs_1, $biggest_1 ) ;
+ # memoization
+ if (
+ exists $h1_folders_all{ $folder1 }
+ &&
+ (
+ ! defined $mysync->{ folder1 }->{ $folder1 }->{ size }
+ || $force
+ )
+ )
+ {
+ #myprint( "foldersize folder1 $h1_folders_all{ $folder1 }\n" ) ;
+ ( $size_1, $nb_msgs_1, $biggest_1 ) =
+ foldersize( $mysync,
+ 'Host1',
+ $mysync->{ imap1 },
+ $mysync->{ search1 },
+ $mysync->{ abletosearch1 },
+ $folder1
+ ) ;
+ $mysync->{ folder1 }->{ $folder1 }->{ size } = $size_1 ;
+ $mysync->{ folder1 }->{ $folder1 }->{ nb_msgs } = $nb_msgs_1 ;
+ $mysync->{ folder1 }->{ $folder1 }->{ biggest } = $biggest_1 ;
+ }
+ else
+ {
+ $size_1 = $mysync->{ folder1 }->{ $folder1 }->{ size } ;
+ $nb_msgs_1 = $mysync->{ folder1 }->{ $folder1 }->{ nb_msgs } ;
+ $biggest_1 = $mysync->{ folder1 }->{ $folder1 }->{ biggest } ;
+
+ }
+
+
+ my ( $size_2, $nb_msgs_2, $biggest_2 ) ;
+ if (
+ exists $mysync->{ h2_folders_all_UPPER }{ uc $folder2 }
+ &&
+ (
+ ! defined $mysync->{ folder2 }->{ $folder2 }->{ size }
+ || $force
+ )
+ )
+ {
+ #myprint( "foldersize folder2\n" ) ;
+ ( $size_2, $nb_msgs_2, $biggest_2 ) =
+ foldersize( $mysync,
+ 'Host2',
+ $mysync->{ imap2 },
+ $mysync->{ search2 },
+ $mysync->{ abletosearch2 },
+ $folder2
+ ) ;
+
+ $mysync->{ folder2 }->{ $folder2 }->{ size } = $size_2 ;
+ $mysync->{ folder2 }->{ $folder2 }->{ nb_msgs } = $nb_msgs_2 ;
+ $mysync->{ folder2 }->{ $folder2 }->{ biggest } = $biggest_2 ;
+ }
+ else
+ {
+ $size_2 = $mysync->{ folder2 }->{ $folder2 }->{ size } ;
+ $nb_msgs_2 = $mysync->{ folder2 }->{ $folder2 }->{ nb_msgs } ;
+ $biggest_2 = $mysync->{ folder2 }->{ $folder2 }->{ biggest } ;
+
+ }
+
+
+ my $size_diff = diff( $size_2, $size_1 ) ;
+ my $nb_msgs_diff = diff( $nb_msgs_2, $nb_msgs_1 ) ;
+ my $biggest_diff = diff( $biggest_2, $biggest_1 ) ;
+
+ $mysync->{ folder1 }->{ $folder1 }->{ size_diff } = $size_diff ;
+ $mysync->{ folder1 }->{ $folder1 }->{ nb_msgs_diff } = $nb_msgs_diff ;
+ $mysync->{ folder1 }->{ $folder1 }->{ biggest_diff } = $biggest_diff ;
+
+ # It's redundant but easier to access later
+ $mysync->{ folder2 }->{ $folder2 }->{ size_diff } = $size_diff ;
+ $mysync->{ folder2 }->{ $folder2 }->{ nb_msgs_diff } = $nb_msgs_diff ;
+ $mysync->{ folder2 }->{ $folder2 }->{ biggest_diff } = $biggest_diff ;
+
+ return ;
+}
+
+sub diff
+{
+ my $x = shift ;
+ my $y = shift ;
+
+ $x ||= 0 ;
+ $y ||= 0 ;
+
+ return $x - $y ;
+}
+
+sub add
+{
+ my $x = shift ;
+ my $y = shift ;
+
+ $x ||= 0 ;
+ $y ||= 0 ;
+
+ return $x + $y ;
+}
+
+
+sub foldersizes_diff_list
+{
+ my $mysync = shift ;
+ my $force = shift ;
+
+ my @folders = @{ $mysync->{h1_folders_wanted} } ;
+ my $nb_folders = scalar @folders ;
+ my $ct_folders = 0 ; # folder counter.
+
+ foreach my $folder1 ( @folders )
+ {
+ $ct_folders++ ;
+ my $counter_str = "$ct_folders/$nb_folders" ;
+ my $folder2 = imap2_folder_name( $mysync, $folder1 ) ;
+ foldersize_diff_present( $mysync, $folder1, $folder2, $counter_str, $force ) ;
+ }
+
+ return ;
+}
+
+sub foldersizes_total
+{
+ my $mysync = shift ;
+
+ my @folders_1 = @{ $mysync->{h1_folders_wanted} } ;
+ my @folders_2 = @h2_folders_from_1_wanted ;
+
+ my $nb_folders_1 = scalar( @folders_1 ) ;
+ my $nb_folders_2 = scalar( @folders_2 ) ;
+
+ my ( $total_size_1, $total_nb_1, $biggest_in_all_1 ) = ( 0, 0, 0 ) ;
+ my ( $total_size_2, $total_nb_2, $biggest_in_all_2 ) = ( 0, 0, 0 ) ;
+
+ foreach my $folder1 ( @folders_1 )
+ {
+ $total_size_1 = add( $total_size_1, $mysync->{ folder1 }->{ $folder1 }->{ size } ) ;
+ $total_nb_1 = add( $total_nb_1, $mysync->{ folder1 }->{ $folder1 }->{ nb_msgs } ) ;
+ $biggest_in_all_1 = max( $biggest_in_all_1 , $mysync->{ folder1 }->{ $folder1 }->{ biggest } ) ;
+ }
+
+ foreach my $folder2 ( @folders_2 )
+ {
+ $total_size_2 = add( $total_size_2, $mysync->{ folder2 }->{ $folder2 }->{ size } ) ;
+ $total_nb_2 = add( $total_nb_2, $mysync->{ folder2 }->{ $folder2 }->{ nb_msgs } ) ;
+ $biggest_in_all_2 = max( $biggest_in_all_2 , $mysync->{ folder2 }->{ $folder2 }->{ biggest } ) ;
+
+ }
+
+ myprintf( "Host1 Nb folders: %11s folders\n", $nb_folders_1 ) ;
+ myprintf( "Host2 Nb folders: %11s folders\n", $nb_folders_2 ) ;
+ myprint( "\n" ) ;
+ myprintf( "Host1 Nb messages: %11s messages\n", $total_nb_1 ) ;
+ myprintf( "Host2 Nb messages: %11s messages\n", $total_nb_2 ) ;
+ myprint( "\n" ) ;
+ myprintf( "Host1 Total size: %11s bytes (%s)\n", $total_size_1, bytes_display_string( $total_size_1 ) ) ;
+ myprintf( "Host2 Total size: %11s bytes (%s)\n", $total_size_2, bytes_display_string( $total_size_2 ) ) ;
+ myprint( "\n" ) ;
+ myprintf( "Host1 Biggest message: %11s bytes (%s)\n", $biggest_in_all_1, bytes_display_string( $biggest_in_all_1 ) ) ;
+ myprintf( "Host2 Biggest message: %11s bytes (%s)\n", $biggest_in_all_2, bytes_display_string( $biggest_in_all_2 ) ) ;
+ myprint( "\n" ) ;
+ myprintf( "Time spent on sizing: %11.1f seconds\n", timenext( $mysync ) ) ;
+
+ my @total_1_2 = ( $total_nb_1, $total_size_1, $total_nb_2, $total_size_2 ) ;
+ return @total_1_2 ;
+}
+
+sub foldersizesatend_old
+{
+ my $mysync = shift ;
+ timenext( $mysync ) ;
+ return if ( $mysync->{imap1}->IsUnconnected( ) ) ;
+ return if ( $mysync->{imap2}->IsUnconnected( ) ) ;
+ # Get all folders on host2 again since new were created
+ @h2_folders_all = sort $mysync->{imap2}->folders();
+ for ( @h2_folders_all ) {
+ $h2_folders_all{ $_ } = 1 ;
+ $mysync->{h2_folders_all_UPPER}{ uc $_ } = 1 ;
+ } ;
+ ( $h1_nb_msg_end, $h1_bytes_end ) = foldersizes( $mysync, 'Host1', $mysync->{imap1}, $mysync->{ search1 }, $mysync->{abletosearch1}, @{ $mysync->{h1_folders_wanted} } ) ;
+ ( $h2_nb_msg_end, $h2_bytes_end ) = foldersizes( $mysync, 'Host2', $mysync->{imap2}, $mysync->{ search2 }, $mysync->{abletosearch2}, @h2_folders_from_1_wanted ) ;
+ if ( not all_defined( $h1_nb_msg_end, $h1_bytes_end, $h2_nb_msg_end, $h2_bytes_end ) ) {
+ my $error = "Failure getting foldersizes, final differences will not be calculated\n" ;
+ errors_incr( $mysync, $error ) ;
+ }
+ return ;
+}
+
+sub foldersizesatend
+{
+ my $mysync = shift ;
+ timenext( $mysync ) ;
+ return if ( $mysync->{imap1}->IsUnconnected( ) ) ;
+ return if ( $mysync->{imap2}->IsUnconnected( ) ) ;
+ # Get all folders on host2 again since new were created
+ @h2_folders_all = sort $mysync->{imap2}->folders();
+ for ( @h2_folders_all ) {
+ $h2_folders_all{ $_ } = 1 ;
+ $mysync->{h2_folders_all_UPPER}{ uc $_ } = 1 ;
+ } ;
+
+
+ foldersizes_diff_list( $mysync, $FORCE ) ;
+
+ ( $h1_nb_msg_end, $h1_bytes_end, $h2_nb_msg_end, $h2_bytes_end )
+ = foldersizes_total( $mysync ) ;
+
+
+ if ( not all_defined( $h1_nb_msg_end, $h1_bytes_end, $h2_nb_msg_end, $h2_bytes_end ) ) {
+ my $error = "Failure getting foldersizes, final differences will not be calculated\n" ;
+ errors_incr( $mysync, $error ) ;
+ }
+ return ;
+}
+
+
+sub foldersizes_at_the_beggining
+{
+ my $mysync = shift ;
+
+ myprint( << 'END_SIZE' ) ;
+
+Folders sizes before the synchronization.
+You can remove foldersizes listings by using "--nofoldersizes" and "--nofoldersizesatend"
+but then you will also lose the ETA (Estimation Time of Arrival) given after each message copy.
+END_SIZE
+
+ foldersizes_diff_list( $mysync ) ;
+
+ ( $mysync->{ h1_nb_msg_start }, $mysync->{ h1_bytes_start },
+ $mysync->{ h2_nb_msg_start }, $mysync->{ h2_bytes_start } )
+ = foldersizes_total( $mysync ) ;
+
+
+ if ( not all_defined(
+ $mysync->{ h1_nb_msg_start },
+ $mysync->{ h1_bytes_start },
+ $mysync->{ h2_nb_msg_start },
+ $mysync->{ h2_bytes_start } ) )
+ {
+ my $error = "Failure getting foldersizes, ETA and final diff will not be displayed\n" ;
+ errors_incr( $mysync, $error ) ;
+ $mysync->{ foldersizes } = 0 ;
+ $mysync->{ foldersizesatend } = 0 ;
+ return ;
+ }
+
+ my $h2_bytes_limit = $mysync->{h2}->{quota_limit_bytes} || 0 ;
+ if ( $h2_bytes_limit and ( $h2_bytes_limit < $mysync->{ h1_bytes_start } ) )
+ {
+ my $quota_percent = mysprintf( '%.0f', $NUMBER_100 * $mysync->{ h1_bytes_start } / $h2_bytes_limit ) ;
+ my $error = "Host2: Quota limit will be exceeded! Over $quota_percent % ( $mysync->{ h1_bytes_start } bytes / $h2_bytes_limit bytes )\n" ;
+ errors_incr( $mysync, $error ) ;
+ }
+ return ;
+}
+
+
+# Globals:
+# @h2_folders_from_1_wanted
+
+sub foldersizes_at_the_beggining_old
+{
+ my $mysync = shift ;
+
+ myprint( << 'END_SIZE' ) ;
+
+Folders sizes before the synchronization.
+You can remove foldersizes listings by using "--nofoldersizes" and "--nofoldersizesatend"
+but then you will also lose the ETA (Estimation Time of Arrival) given after each message copy.
+END_SIZE
+
+ ( $mysync->{ h1_nb_msg_start }, $mysync->{ h1_bytes_start } ) =
+ foldersizes( $mysync, 'Host1', $mysync->{imap1}, $mysync->{ search1 },
+ $mysync->{abletosearch1}, @{ $mysync->{h1_folders_wanted} } ) ;
+ ( $mysync->{ h2_nb_msg_start }, $mysync->{ h2_bytes_start } ) =
+ foldersizes( $mysync, 'Host2', $mysync->{imap2}, $mysync->{ search2 },
+ $mysync->{abletosearch2}, @h2_folders_from_1_wanted ) ;
+
+ if ( not all_defined( $mysync->{ h1_nb_msg_start },
+ $mysync->{ h1_bytes_start }, $mysync->{ h2_nb_msg_start }, $mysync->{ h2_bytes_start } ) )
+ {
+ my $error = "Failure getting foldersizes, ETA and final diff will not be displayed\n" ;
+ errors_incr( $mysync, $error ) ;
+ $mysync->{ foldersizes } = 0 ;
+ $mysync->{ foldersizesatend } = 0 ;
+ return ;
+ }
+
+ my $h2_bytes_limit = $mysync->{h2}->{quota_limit_bytes} || 0 ;
+ if ( $h2_bytes_limit and ( $h2_bytes_limit < $mysync->{ h1_bytes_start } ) )
+ {
+ my $quota_percent = mysprintf( '%.0f', $NUMBER_100 * $mysync->{ h1_bytes_start } / $h2_bytes_limit ) ;
+ my $error = "Host2: Quota limit will be exceeded! Over $quota_percent % ( $mysync->{ h1_bytes_start } bytes / $h2_bytes_limit bytes )\n" ;
+ errors_incr( $mysync, $error ) ;
+ }
+ return ;
+}
+
+
+sub total_bytes_max_reached
+{
+ my $mysync = shift ;
+
+ if ( ! $mysync->{ exitwhenover } ) {
+ return( 0 ) ;
+ }
+ if ( $mysync->{ total_bytes_transferred } >= $mysync->{ exitwhenover } ) {
+ myprint( "Maximum bytes transferred reached, $mysync->{total_bytes_transferred} >= $mysync->{ exitwhenover }, ending sync\n" ) ;
+ return( 1 ) ;
+ }
+ return ;
+}
+
+
+sub tests_mock_capability
+{
+ note( 'Entering tests_mock_capability()' ) ;
+
+ my $myimap ;
+ ok( $myimap = mock_capability( ),
+ 'mock_capability: (1) no args => a Test::MockObject'
+ ) ;
+ ok( $myimap->isa( 'Test::MockObject' ),
+ 'mock_capability: (2) no args => a Test::MockObject'
+ ) ;
+
+ is( undef, $myimap->capability( ),
+ 'mock_capability: (3) no args => capability undef'
+ ) ;
+
+ ok( mock_capability( $myimap ),
+ 'mock_capability: (1) one arg => MockObject'
+ ) ;
+
+ is( undef, $myimap->capability( ),
+ 'mock_capability: (2) one arg OO style => capability undef'
+ ) ;
+
+ ok( mock_capability( $myimap, $NUMBER_123456 ),
+ 'mock_capability: (1) two args 123456 => capability 123456'
+ ) ;
+
+ is( $NUMBER_123456, $myimap->capability( ),
+ 'mock_capability: (2) two args 123456 => capability 123456'
+ ) ;
+
+ ok( mock_capability( $myimap, 'ABCD' ),
+ 'mock_capability: (1) two args ABCD => capability ABCD'
+ ) ;
+ is( 'ABCD', $myimap->capability( ),
+ 'mock_capability: (2) two args ABCD => capability ABCD'
+ ) ;
+
+ ok( mock_capability( $myimap, [ 'ABCD' ] ),
+ 'mock_capability: (1) two args [ ABCD ] => capability [ ABCD ]'
+ ) ;
+ is_deeply( [ 'ABCD' ], $myimap->capability( ),
+ 'mock_capability: (2) two args [ ABCD ] => capability [ ABCD ]'
+ ) ;
+
+ ok( mock_capability( $myimap, [ 'ABC', 'DEF' ] ),
+ 'mock_capability: (1) two args [ ABC, DEF ] => capability [ ABC, DEF ]'
+ ) ;
+ is_deeply( [ 'ABC', 'DEF' ], $myimap->capability( ),
+ 'mock_capability: (2) two args [ ABC, DEF ] => capability capability [ ABC, DEF ]'
+ ) ;
+
+ ok( mock_capability( $myimap, 'ABC', 'DEF' ),
+ 'mock_capability: (1) two args ABC, DEF => capability [ ABC, DEF ]'
+ ) ;
+ is_deeply( [ 'ABC', 'DEF' ], [ $myimap->capability( ) ],
+ 'mock_capability: (2) two args ABC, DEF => capability capability [ ABC, DEF ]'
+ ) ;
+
+ ok( mock_capability( $myimap, 'IMAP4rev1', 'APPENDLIMIT=123456' ),
+ 'mock_capability: (1) two args IMAP4rev1, APPENDLIMIT=123456 => capability [ IMAP4rev1, APPENDLIMIT=123456 ]'
+ ) ;
+ is_deeply( [ 'IMAP4rev1', 'APPENDLIMIT=123456' ], [ $myimap->capability( ) ],
+ 'mock_capability: (2) two args IMAP4rev1, APPENDLIMIT=123456 => capability capability [ IMAP4rev1, APPENDLIMIT=123456 ]'
+ ) ;
+
+ note( 'Leaving tests_mock_capability()' ) ;
+ return ;
+}
+
+sub sig_install_toggle_sleep
+{
+ my $mysync = shift ;
+ if ( 'MSWin32' ne $OSNAME ) {
+ #myprint( "sig_install( $mysync, \&toggle_sleep, 'USR1' )\n" ) ;
+ sig_install( $mysync, 'toggle_sleep', 'USR1' ) ;
+ }
+ #myprint( "Leaving sig_install_toggle_sleep\n" ) ;
+ return ;
+}
+
+
+sub mock_capability
+{
+ my $myimap = shift ;
+ my @has_capability_value = @ARG ;
+ my ( $has_capability_value ) = @has_capability_value ;
+
+ if ( ! $myimap )
+ {
+ require_ok( "Test::MockObject" ) ;
+ $myimap = Test::MockObject->new( ) ;
+ }
+
+ $myimap->mock(
+ 'capability',
+ sub { return wantarray ?
+ @has_capability_value
+ : $has_capability_value ;
+ }
+ ) ;
+
+ return $myimap ;
+}
+
+
+sub tests_capability_of
+{
+ note( 'Entering tests_capability_of()' ) ;
+
+ is( undef, capability_of( ),
+ 'capability_of: no args => undef' ) ;
+
+ my $myimap ;
+ is( undef, capability_of( $myimap ),
+ 'capability_of: undef => undef' ) ;
+
+
+ $myimap = mock_capability( $myimap, 'IMAP4rev1', 'APPENDLIMIT=123456' ) ;
+
+ is( undef, capability_of( $myimap, 'CACA' ),
+ 'capability_of: two args unknown capability => undef' ) ;
+
+
+ is( $NUMBER_123456, capability_of( $myimap, 'APPENDLIMIT' ),
+ 'capability_of: two args APPENDLIMIT 123456 => 123456 yeah!' ) ;
+
+ note( 'Leaving tests_capability_of()' ) ;
+ return ;
+}
+
+
+sub capability_of
+{
+ my $imap = shift || return ;
+ my $capability_keyword = shift || return ;
+
+ my @capability = $imap->capability ;
+
+ if ( ! @capability ) { return ; }
+ my $capability_value = search_in_array( $capability_keyword, @capability ) ;
+
+ return $capability_value ;
+}
+
+
+sub tests_search_in_array
+{
+ note( 'Entering tests_search_in_array()' ) ;
+
+ is( undef, search_in_array( 'KA' ),
+ 'search_in_array: no array => undef ' ) ;
+
+ is( 'VA', search_in_array( 'KA', ( 'KA=VA' ) ),
+ 'search_in_array: KA KA=VA => VA ' ) ;
+
+ is( 'VA', search_in_array( 'KA', ( 'KA=VA', 'KB=VB' ) ),
+ 'search_in_array: KA KA=VA KB=VB => VA ' ) ;
+
+ is( 'VB', search_in_array( 'KB', ( 'KA=VA', 'KB=VB' ) ),
+ 'search_in_array: KA=VA KB=VB => VB ' ) ;
+
+ note( 'Leaving tests_search_in_array()' ) ;
+ return ;
+}
+
+sub search_in_array
+{
+ my ( $key, @array ) = @ARG ;
+
+ foreach my $item ( @array )
+ {
+
+ if ( $item =~ /([^=]+)=(.*)/ )
+ {
+ if ( $1 eq $key )
+ {
+ return $2 ;
+ }
+ }
+ }
+
+ return ;
+}
+
+
+
+
+sub tests_appendlimit_from_capability
+{
+ note( 'Entering tests_appendlimit_from_capability()' ) ;
+
+ is( undef, appendlimit_from_capability( ),
+ 'appendlimit_from_capability: no args => undef'
+ ) ;
+
+ my $myimap ;
+ is( undef, appendlimit_from_capability( $myimap ),
+ 'appendlimit_from_capability: undef arg => undef'
+ ) ;
+
+
+ $myimap = mock_capability( $myimap, 'IMAP4rev1', 'APPENDLIMIT=123456' ) ;
+
+ # Normal behavior
+ is( $NUMBER_123456, appendlimit_from_capability( $myimap ),
+ 'appendlimit_from_capability: APPENDLIMIT=123456 => 123456'
+ ) ;
+
+ # Not a number
+ $myimap = mock_capability( $myimap, 'IMAP4rev1', 'APPENDLIMIT=ABC' ) ;
+
+ is( undef, appendlimit_from_capability( $myimap ),
+ 'appendlimit_from_capability: not a number => undef'
+ ) ;
+
+ note( 'Leaving tests_appendlimit_from_capability()' ) ;
+ return ;
+}
+
+
+sub appendlimit_from_capability
+{
+ my $myimap = shift ;
+ if ( ! $myimap )
+ {
+ myprint( "Warn: no imap with call to appendlimit_from_capability\n" ) ;
+ return ;
+ }
+
+ #myprint( Data::Dumper->Dump( [ \$myimap ] ) ) ;
+ my $appendlimit = capability_of( $myimap, 'APPENDLIMIT' ) ;
+ #myprint( "has_capability APPENDLIMIT $appendlimit\n" ) ;
+ if ( is_an_integer( $appendlimit ) )
+ {
+ return $appendlimit ;
+ }
+ return ;
+}
+
+
+sub tests_appendlimit
+{
+ note( 'Entering tests_appendlimit()' ) ;
+
+ is( undef, appendlimit( ),
+ 'appendlimit: no args => undef'
+ ) ;
+
+ my $mysync = { } ;
+
+ is( undef, appendlimit( $mysync ),
+ 'appendlimit: no imap2 => undef'
+ ) ;
+
+ my $myimap ;
+ $myimap = mock_capability( $myimap, 'IMAP4rev1', 'APPENDLIMIT=123456' ) ;
+
+ $mysync->{ imap2 } = $myimap ;
+
+ is( 123456, appendlimit( $mysync ),
+ 'appendlimit: imap2 with APPENDLIMIT=123456 => 123456'
+ ) ;
+
+ note( 'Leaving tests_appendlimit()' ) ;
+ return ;
+}
+
+sub appendlimit
+{
+ my $mysync = shift || return ;
+ my $myimap = $mysync->{ imap2 } ;
+
+ my $appendlimit = appendlimit_from_capability( $myimap ) ;
+ if ( defined $appendlimit )
+ {
+ myprint( "Host2: found APPENDLIMIT=$appendlimit in CAPABILITY (use --appendlimit xxxx to override this automatic setting)\n" ) ;
+ return $appendlimit ;
+ }
+ return ;
+
+}
+
+
+sub tests_maxsize_setting
+{
+ note( 'Entering tests_maxsize_setting()' ) ;
+
+ is( undef, maxsize_setting( ),
+ 'maxsize_setting: no args => undef'
+ ) ;
+
+ my $mysync ;
+
+ is( undef, maxsize_setting( $mysync ),
+ 'maxsize_setting: undef arg => undef'
+ ) ;
+
+ $mysync = { } ;
+ $mysync->{ maxsize } = $NUMBER_123456 ;
+
+ # --maxsize alone
+ is( $NUMBER_123456, maxsize_setting( $mysync ),
+ 'maxsize_setting: --maxsize 123456 alone => 123456'
+ ) ;
+
+
+ $mysync = { } ;
+ my $myimap ;
+
+ $myimap = mock_capability( $myimap, 'IMAP4rev1', 'APPENDLIMIT=654321' ) ;
+ $mysync->{ imap2 } = $myimap ;
+
+ # APPENDLIMIT alone
+ is( $NUMBER_654321, maxsize_setting( $mysync ),
+ 'maxsize_setting: APPENDLIMIT 654321 alone => 654321'
+ ) ;
+
+ is( $NUMBER_654321, $mysync->{ maxsize },
+ 'maxsize_setting: APPENDLIMIT 654321 alone => maxsize 654321'
+ ) ;
+
+ # APPENDLIMIT with --appendlimit => --appendlimit wins
+ $mysync->{ appendlimit } = $NUMBER_123456 ;
+
+ is( $NUMBER_123456, maxsize_setting( $mysync ),
+ 'maxsize_setting: APPENDLIMIT 654321 + --appendlimit 123456 => 123456'
+ ) ;
+
+ is( $NUMBER_123456, $mysync->{ maxsize },
+ 'maxsize_setting: APPENDLIMIT 654321 + --appendlimit 123456 => maxsize 123456'
+ ) ;
+
+ # Fresh
+ $mysync = { } ;
+ $mysync->{ imap2 } = $myimap = mock_capability( $myimap, 'IMAP4rev1', 'APPENDLIMIT=654321' ) ;
+
+ # Case: "APPENDLIMIT >= --maxsize" => maxsize.
+ $mysync->{ maxsize } = $NUMBER_123456 ;
+
+ is( $NUMBER_123456, maxsize_setting( $mysync ),
+ 'maxsize_setting: APPENDLIMIT 654321 --maxsize 123456 => 123456'
+ ) ;
+
+ # Case: "APPENDLIMIT < --maxsize" => APPENDLIMIT.
+
+
+ # Fresh
+ $mysync = { } ;
+ $mysync->{ imap2 } = $myimap = mock_capability( $myimap, 'IMAP4rev1', 'APPENDLIMIT=123456' ) ;
+ $mysync->{ maxsize } = $NUMBER_654321 ;
+
+ is( $NUMBER_123456, maxsize_setting( $mysync ),
+ 'maxsize_setting: APPENDLIMIT 123456 --maxsize 654321 => 123456 '
+ ) ;
+
+ # Now --truncmess stuff
+
+
+
+ note( 'Leaving tests_maxsize_setting()' ) ;
+
+ return ;
+}
+
+# Three variables to take account of
+# appendlimit (given by --appendlimit or CAPABILITY...)
+# maxsize
+# truncmess
+
+sub maxsize_setting
+{
+ my $mysync = shift || return ;
+
+ if ( defined $mysync->{ appendlimit } )
+ {
+ myprint( "Host2: Getting appendlimit from --appendlimit $mysync->{ appendlimit }\n" ) ;
+ }
+ else
+ {
+ $mysync->{ appendlimit } = appendlimit( $mysync ) ;
+ }
+
+
+ if ( all_defined( $mysync->{ appendlimit }, $mysync->{ maxsize } ) )
+ {
+ my $min_maxsize_appendlimit = min( $mysync->{ maxsize }, $mysync->{ appendlimit } ) ;
+ myprint( "Host2: Setting maxsize to $min_maxsize_appendlimit (min of --maxsize $mysync->{ maxsize } and appendlimit $mysync->{ appendlimit }\n" ) ;
+ $mysync->{ maxsize } = $min_maxsize_appendlimit ;
+ return $mysync->{ maxsize } ;
+ }
+ elsif ( defined $mysync->{ appendlimit } )
+ {
+ myprint( "Host2: Setting maxsize to appendlimit $mysync->{ appendlimit }\n" ) ;
+ $mysync->{ maxsize } = $mysync->{ appendlimit } ;
+ return $mysync->{ maxsize } ;
+ }elsif ( defined $mysync->{ maxsize } )
+ {
+ return $mysync->{ maxsize } ;
+ }else
+ {
+ return ;
+ }
+}
+
+
+
+
+sub all_defined
+{
+ if ( not @ARG ) {
+ return 0 ;
+ }
+ foreach my $elem ( @ARG ) {
+ if ( not defined $elem ) {
+ return 0 ;
+ }
+ }
+ return 1 ;
+}
+
+sub tests_all_defined
+{
+ note( 'Entering tests_all_defined()' ) ;
+
+ is( 0, all_defined( ), 'all_defined: no param => 0' ) ;
+ is( 0, all_defined( () ), 'all_defined: void list => 0' ) ;
+ is( 0, all_defined( undef ), 'all_defined: undef => 0' ) ;
+ is( 0, all_defined( undef, undef ), 'all_defined: undef => 0' ) ;
+ is( 0, all_defined( 1, undef ), 'all_defined: 1 undef => 0' ) ;
+ is( 0, all_defined( undef, 1 ), 'all_defined: undef 1 => 0' ) ;
+ is( 1, all_defined( 1, 1 ), 'all_defined: 1 1 => 1' ) ;
+ is( 1, all_defined( (1, 1) ), 'all_defined: (1 1) => 1' ) ;
+
+ note( 'Leaving tests_all_defined()' ) ;
+ return ;
+}
+
+
+sub tests_hashsynclocal
+{
+ note( 'Entering tests_hashsynclocal()' ) ;
+
+ my $mysync = {
+ host1 => q{},
+ user1 => q{},
+ password1 => q{},
+ host2 => q{},
+ user2 => q{},
+ password2 => q{},
+ } ;
+
+ is( undef, hashsynclocal( $mysync ), 'hashsynclocal: no hashfile name' ) ;
+
+ $mysync->{ hashfile } = q{} ;
+ is( undef, hashsynclocal( $mysync ), 'hashsynclocal: empty hashfile name' ) ;
+
+ $mysync->{ hashfile } = './noexist/rrr' ;
+ is( undef, hashsynclocal( $mysync ), 'hashsynclocal: no exists hashfile dir' ) ;
+
+ SKIP: {
+ if ( 'MSWin32' eq $OSNAME or '0' eq $EFFECTIVE_USER_ID ) { skip( 'Tests only for non-root Unix', 1 ) ; }
+ $mysync->{ hashfile } = '/rrr' ;
+ is( undef, hashsynclocal( $mysync ), 'hashsynclocal: permission denied' ) ;
+ }
+ ok( (-d 'W/tmp/tests/' or mkpath( 'W/tmp/tests/' ) ), 'hashsynclocal: mkpath W/tmp/tests/' ) ;
+ $mysync->{ hashfile } = 'W/tmp/tests/imapsync_hash' ;
+
+ ok( ! -e 'W/tmp/tests/imapsync_hash' || unlink 'W/tmp/tests/imapsync_hash', 'hashsynclocal: unlink W/tmp/tests/imapsync_hash' ) ;
+ ok( ! -e 'W/tmp/tests/imapsync_hash', 'hashsynclocal: verify there is no W/tmp/tests/imapsync_hash' ) ;
+ is( 'ecdeb4ede672794d173da4e08c52b8ee19b7d252', hashsynclocal( $mysync, 'mukksyhpmbixkxkpjlqivmlqsulpictj' ), 'hashsynclocal: creating/reading W/tmp/tests/imapsync_hash' ) ;
+ # A second time now
+ is( 'ecdeb4ede672794d173da4e08c52b8ee19b7d252', hashsynclocal( $mysync ), 'hashsynclocal: reading W/tmp/tests/imapsync_hash second time => same' ) ;
+
+ note( 'Leaving tests_hashsynclocal()' ) ;
+ return ;
+}
+
+sub hashsynclocal
+{
+ my $mysync = shift ;
+ my $hashkey = shift ; # Optional, only there for tests
+ my $hashfile = $mysync->{ hashfile } ;
+ $hashfile = createhashfileifneeded( $hashfile, $hashkey ) ;
+ if ( ! $hashfile ) {
+ return ;
+ }
+ $hashkey = firstline( $hashfile ) ;
+ if ( ! $hashkey ) {
+ myprint( "No hashkey!\n" ) ;
+ return ;
+ }
+ my $hashsynclocal = hashsync( $mysync, $hashkey ) ;
+ return( $hashsynclocal ) ;
+
+}
+
+sub tests_hashsync
+{
+ note( 'Entering tests_hashsync()' ) ;
+
+
+ is( 'fbdb1d1b18aa6c08324b7d64b71fb76370690e1d', hashsync( {}, q{} ), 'hashsync: empty args' ) ;
+ my $mysync ;
+ $mysync->{ host1 } = 'zzz' ;
+ is( 'e86a28a3611c1e7bbaf8057cd00ae122781a11fe', hashsync( $mysync, q{} ), 'hashsync: host1 zzz => ' ) ;
+ is( 'e86a28a3611c1e7bbaf8057cd00ae122781a11fe', hashsync( $mysync, q{} ), 'hashsync: host1 zzz => ' ) ;
+ $mysync->{ host2 } = 'zzz' ;
+ is( '15959573e4a86763253a7aedb1a2b0c60d133dc2', hashsync( $mysync, q{} ), 'hashsync: + host2 zzz => ' ) ;
+ is( 'b8d4ab541b209c75928528020ca28ee43488bd8f', hashsync( $mysync, 'A' ), 'hashsync: + hashkey A => ' ) ;
+
+ note( 'Leaving tests_hashsync()' ) ;
+ return ;
+}
+
+sub hashsync
+{
+ my $mysync = shift ;
+ my $hashkey = shift ;
+
+ my $mystring = join( q{},
+ $mysync->{ host1 } || q{},
+ $mysync->{ user1 } || q{},
+ $mysync->{ password1 } || q{},
+ $mysync->{ host2 } || q{},
+ $mysync->{ user2 } || q{},
+ $mysync->{ password2 } || q{},
+ ) ;
+ my $hashsync = hmac_sha1_hex( $mystring, $hashkey ) ;
+ #myprint( "$hashsync\n" ) ;
+ return( $hashsync ) ;
+}
+
+
+sub tests_createhashfileifneeded
+{
+ note( 'Entering tests_createhashfileifneeded()' ) ;
+
+ is( undef, createhashfileifneeded( ), 'createhashfileifneeded: no parameters => undef' ) ;
+
+ note( 'Leaving tests_createhashfileifneeded()' ) ;
+ return ;
+}
+
+sub createhashfileifneeded
+{
+ my $hashfile = shift ;
+ my $hashkey = shift || rand32( ) ;
+
+ # no name
+ if ( ! $hashfile ) {
+ return ;
+ }
+ # already there
+ if ( -e -r $hashfile ) {
+ return $hashfile ;
+ }
+ # not creatable
+ if ( ! -w dirname( $hashfile ) ) {
+ return ;
+ }
+ # creatable
+ open my $FILE_HANDLE, '>', $hashfile
+ or do {
+ myprint( "Could not open $hashfile for writing. Check permissions or disk space." ) ;
+ return ;
+ } ;
+ myprint( "Writing random hashkey in $hashfile, once for all times\n" ) ;
+ print $FILE_HANDLE $hashkey ;
+ close $FILE_HANDLE ;
+ # Should be there now
+ if ( -e -r $hashfile ) {
+ return $hashfile ;
+ }
+ # unknown failure
+ return ;
+}
+
+sub tests_rand32
+{
+ note( 'Entering tests_rand32()' ) ;
+
+ my $string = rand32( ) ;
+ myprint( "$string\n" ) ;
+ is( 32, length( $string ), 'rand32: 32 characters long' ) ;
+ is( 32, length( rand32( ) ), 'rand32: 32 characters long, another one' ) ;
+
+ note( 'Leaving tests_rand32()' ) ;
+ return ;
+}
+
+sub rand32
+{
+ my @chars = ( "a".."z" ) ;
+ my $string;
+ $string .= $chars[rand @chars] for 1..32 ;
+ return $string ;
+}
+
+sub imap_id_stuff
+{
+ my $mysync = shift ;
+
+ if ( not $mysync->{id} ) { return ; } ;
+
+ $mysync->{h1_imap_id} = imap_id( $mysync, $mysync->{imap1}, 'Host1' ) ;
+ #myprint( 'Host1: ' . $mysync->{h1_imap_id} ) ;
+ $mysync->{h2_imap_id} = imap_id( $mysync, $mysync->{imap2}, 'Host2' ) ;
+ #myprint( 'Host2: ' . $mysync->{h2_imap_id} ) ;
+
+ return ;
+}
+
+sub imap_id
+{
+ my ( $mysync, $imap, $Side ) = @_ ;
+
+ if ( not $mysync->{id} ) { return q{} ; } ;
+
+ $Side ||= q{} ;
+ my $imap_id_response = q{} ;
+
+ if ( not $imap->has_capability( 'ID' ) ) {
+ $imap_id_response = 'No ID capability' ;
+ myprint( "$Side: No ID capability\n" ) ;
+ }else{
+ my $id_inp = imapsync_id( $mysync, { side => lc $Side } ) ;
+ myprint( "\n$Side: found ID capability. Sending/receiving ID, presented in raw IMAP for now.\n"
+ . "In order to avoid sending/receiving ID, use option --noid\n" ) ;
+ my $debug_before = $imap->Debug( ) ;
+ $imap->Debug( 1 ) ;
+ my $id_out = $imap->tag_and_run( 'ID ' . $id_inp ) ;
+ #my $id_out = $imap->tag_and_run( 'ID NIL' ) ;
+ myprint( "\n" ) ;
+ $imap->Debug( $debug_before ) ;
+ #$imap_id_response = Data::Dumper->Dump( [ $id_out ], [ 'IMAP_ID' ] ) ;
+ }
+ return( $imap_id_response ) ;
+}
+
+sub imapsync_id
+{
+ my $mysync = shift ;
+ my $overhashref = shift ;
+ # See http://tools.ietf.org/html/rfc2971.html
+
+ my $imapsync_id = { } ;
+
+ my $imapsync_id_lamiral = {
+ name => 'imapsync',
+ version => imapsync_version( $mysync ),
+ os => $OSNAME,
+ vendor => 'Gilles LAMIRAL',
+ 'support-url' => 'https://imapsync.lamiral.info/',
+ # Example of date-time: 19-Sep-2015 08:56:07
+ date => date_from_rcs( q{$Date: 2019/12/23 20:18:02 $ } ),
+ } ;
+
+ my $imapsync_id_github = {
+ name => 'imapsync',
+ version => imapsync_version( $mysync ),
+ os => $OSNAME,
+ vendor => 'github',
+ 'support-url' => 'https://github.com/imapsync/imapsync',
+ date => date_from_rcs( q{$Date: 2019/12/23 20:18:02 $ } ),
+ } ;
+
+ $imapsync_id = $imapsync_id_lamiral ;
+ #$imapsync_id = $imapsync_id_github ;
+ my %mix = ( %{ $imapsync_id }, %{ $overhashref } ) ;
+ my $imapsync_id_str = format_for_imap_arg( \%mix ) ;
+ #myprint( "$imapsync_id_str\n" ) ;
+ return( $imapsync_id_str ) ;
+}
+
+sub tests_imapsync_id
+{
+ note( 'Entering tests_imapsync_id()' ) ;
+
+ my $mysync ;
+ ok( '("name" "imapsync" "version" "111" "os" "beurk" "vendor" "Gilles LAMIRAL" "support-url" "https://imapsync.lamiral.info/" "date" "22-12-1968" "side" "host1")'
+ eq imapsync_id( $mysync,
+ {
+ version => 111,
+ os => 'beurk',
+ date => '22-12-1968',
+ side => 'host1'
+ }
+ ),
+ 'tests_imapsync_id override'
+ ) ;
+
+ note( 'Leaving tests_imapsync_id()' ) ;
+ return ;
+}
+
+sub format_for_imap_arg
+{
+ my $ref = shift ;
+
+ my $string = q{} ;
+ my %terms = %{ $ref } ;
+ my @terms = ( ) ;
+ if ( not ( %terms ) ) { return( 'NIL' ) } ;
+ # sort like in RFC then add extra key/values
+ foreach my $key ( qw( name version os os-version vendor support-url address date command arguments environment) ) {
+ if ( $terms{ $key } ) {
+ push @terms, $key, $terms{ $key } ;
+ delete $terms{ $key } ;
+ }
+ }
+ push @terms, %terms ;
+ $string = '(' . ( join q{ }, map { '"' . $_ . '"' } @terms ) . ')' ;
+ return( $string ) ;
+}
+
+
+
+sub tests_format_for_imap_arg
+{
+ note( 'Entering tests_format_for_imap_arg()' ) ;
+
+ ok( 'NIL' eq format_for_imap_arg( { } ), 'format_for_imap_arg empty hash ref' ) ;
+ ok( '("name" "toto")' eq format_for_imap_arg( { name => 'toto' } ), 'format_for_imap_arg { name => toto }' ) ;
+ ok( '("name" "toto" "key" "val")' eq format_for_imap_arg( { name => 'toto', key => 'val' } ), 'format_for_imap_arg 2 x key val' ) ;
+
+ note( 'Leaving tests_format_for_imap_arg()' ) ;
+ return ;
+}
+
+sub quota
+{
+ my ( $mysync, $imap, $side ) = @_ ;
+
+ my %side = (
+ h1 => 'Host1',
+ h2 => 'Host2',
+ ) ;
+ my $Side = $side{ $side } ;
+ my $debug_before = $imap->Debug( ) ;
+ $imap->Debug( 1 ) ;
+ if ( not $imap->has_capability( 'QUOTA' ) ) {
+ $imap->Debug( $debug_before ) ;
+ return ;
+ } ;
+ myprint( "\n$Side: found quota, presented in raw IMAP\n" ) ;
+ my $getquotaroot = $imap->getquotaroot( 'INBOX' ) ;
+ # Gmail INBOX quotaroot is "" but with it Mail::IMAPClient does a literal GETQUOTA {2} \n ""
+ #$imap->quota( 'ROOT' ) ;
+ #$imap->quota( '""' ) ;
+ myprint( "\n" ) ;
+ $imap->Debug( $debug_before ) ;
+ my $quota_limit_bytes = quota_extract_storage_limit_in_bytes( $mysync, $getquotaroot ) ;
+ my $quota_current_bytes = quota_extract_storage_current_in_bytes( $mysync, $getquotaroot ) ;
+ $mysync->{$side}->{quota_limit_bytes} = $quota_limit_bytes ;
+ $mysync->{$side}->{quota_current_bytes} = $quota_current_bytes ;
+ my $quota_percent ;
+ if ( $quota_limit_bytes > 0 ) {
+ $quota_percent = mysprintf( '%.2f', $NUMBER_100 * $quota_current_bytes / $quota_limit_bytes ) ;
+ }else{
+ $quota_percent = 0 ;
+ }
+ myprint( "$Side: Quota current storage is $quota_current_bytes bytes. Limit is $quota_limit_bytes bytes. So $quota_percent % full\n" ) ;
+ if ( $QUOTA_PERCENT_LIMIT < $quota_percent ) {
+ my $error = "$Side: $quota_percent % full: it is time to find a bigger place! ( $quota_current_bytes bytes / $quota_limit_bytes bytes )\n" ;
+ errors_incr( $mysync, $error ) ;
+ }
+ return ;
+}
+
+sub tests_quota_extract_storage_limit_in_bytes
+{
+ note( 'Entering tests_quota_extract_storage_limit_in_bytes()' ) ;
+
+ my $mysync = {} ;
+ my $imap_output = [
+ '* QUOTAROOT "INBOX" "Storage quota" "Messages quota"',
+ '* QUOTA "Storage quota" (STORAGE 1 104857600)',
+ '* QUOTA "Messages quota" (MESSAGE 2 100000)',
+ '5 OK Getquotaroot completed.'
+ ] ;
+ ok( $NUMBER_104_857_600 * $KIBI == quota_extract_storage_limit_in_bytes( $mysync, $imap_output ), 'quota_extract_storage_limit_in_bytes ') ;
+
+ note( 'Leaving tests_quota_extract_storage_limit_in_bytes()' ) ;
+ return ;
+}
+
+sub quota_extract_storage_limit_in_bytes
+{
+ my $mysync = shift ;
+ my $imap_output = shift ;
+
+ my $limit_kb ;
+ $limit_kb = ( map { /.*\(\s*STORAGE\s+\d+\s+(\d+)\s*\)/x ? $1 : () } @{ $imap_output } )[0] ;
+ $limit_kb ||= 0 ;
+ $mysync->{ debug } and myprint( "storage_limit_kb = $limit_kb\n" ) ;
+ return( $KIBI * $limit_kb ) ;
+}
+
+
+sub tests_quota_extract_storage_current_in_bytes
+{
+ note( 'Entering tests_quota_extract_storage_current_in_bytes()' ) ;
+
+ my $mysync = {} ;
+ my $imap_output = [
+ '* QUOTAROOT "INBOX" "Storage quota" "Messages quota"',
+ '* QUOTA "Storage quota" (STORAGE 1 104857600)',
+ '* QUOTA "Messages quota" (MESSAGE 2 100000)',
+ '5 OK Getquotaroot completed.'
+ ] ;
+ ok( 1*$KIBI == quota_extract_storage_current_in_bytes( $mysync, $imap_output ), 'quota_extract_storage_current_in_bytes: 1 => 1024 ') ;
+
+ note( 'Leaving tests_quota_extract_storage_current_in_bytes()' ) ;
+ return ;
+}
+
+sub quota_extract_storage_current_in_bytes
+{
+ my $mysync = shift ;
+ my $imap_output = shift ;
+
+ my $current_kb ;
+ $current_kb = ( map { /.*\(\s*STORAGE\s+(\d+)\s+\d+\s*\)/x ? $1 : () } @{ $imap_output } )[0] ;
+ $current_kb ||= 0 ;
+ $mysync->{ debug } and myprint( "storage_current_kb = $current_kb\n" ) ;
+ return( $KIBI * $current_kb ) ;
+
+}
+
+
+sub automap
+{
+ my ( $mysync ) = @_ ;
+
+ if ( $mysync->{automap} ) {
+ myprint( "Turned on automapping folders ( use --noautomap to turn off automapping )\n" ) ;
+ }else{
+ myprint( "Turned off automapping folders ( use --automap to turn on automapping )\n" ) ;
+ return ;
+ }
+
+ $mysync->{h1_special} = special_from_folders_hash( $mysync, $mysync->{imap1}, 'Host1' ) ;
+ $mysync->{h2_special} = special_from_folders_hash( $mysync, $mysync->{imap2}, 'Host2' ) ;
+
+ build_possible_special( $mysync ) ;
+ build_guess_special( $mysync ) ;
+ build_automap( $mysync ) ;
+
+ return ;
+}
+
+
+
+
+sub build_guess_special
+{
+ my ( $mysync ) = shift ;
+
+ foreach my $h1_fold ( sort keys %{ $mysync->{h1_folders_all} } ) {
+ my $special = guess_special( $h1_fold, $mysync->{possible_special}, $mysync->{h1_prefix} ) ;
+ if ( $special ) {
+ $mysync->{h1_special_guessed}{$h1_fold} = $special ;
+ my $already_guessed = $mysync->{h1_special_guessed}{$special} ;
+ if ( $already_guessed ) {
+ myprint( "Host1: $h1_fold not $special because set to $already_guessed\n" ) ;
+ }else{
+ $mysync->{h1_special_guessed}{$special} = $h1_fold ;
+ }
+ }
+ }
+ foreach my $h2_fold ( sort keys %{ $mysync->{h2_folders_all} } ) {
+ my $special = guess_special( $h2_fold, $mysync->{possible_special}, $mysync->{h2_prefix} ) ;
+ if ( $special ) {
+ $mysync->{h2_special_guessed}{$h2_fold} = $special ;
+ my $already_guessed = $mysync->{h2_special_guessed}{$special} ;
+ if ( $already_guessed ) {
+ myprint( "Host2: $h2_fold not $special because set to $already_guessed\n" ) ;
+ }else{
+ $mysync->{h2_special_guessed}{$special} = $h2_fold ;
+ }
+ }
+ }
+ return ;
+}
+
+sub guess_special
+{
+ my( $folder, $possible_special_ref, $prefix ) = @_ ;
+
+ my $folder_no_prefix = $folder ;
+ $folder_no_prefix =~ s/\Q${prefix}\E//xms ;
+ #$debug and myprint( "folder_no_prefix: $folder_no_prefix\n" ) ;
+
+ my $guess_special = $possible_special_ref->{ $folder }
+ || $possible_special_ref->{ $folder_no_prefix }
+ || q{} ;
+
+ return( $guess_special ) ;
+}
+
+sub tests_guess_special
+{
+ note( 'Entering tests_guess_special()' ) ;
+
+ my $possible_special_ref = build_possible_special( my $mysync ) ;
+ ok( '\Sent' eq guess_special( 'Sent', $possible_special_ref, q{} ) ,'guess_special: Sent => \Sent' ) ;
+ ok( q{} eq guess_special( 'Blabla', $possible_special_ref, q{} ) ,'guess_special: Blabla => q{}' ) ;
+ ok( '\Sent' eq guess_special( 'INBOX.Sent', $possible_special_ref, 'INBOX.' ) ,'guess_special: INBOX.Sent => \Sent' ) ;
+ ok( '\Sent' eq guess_special( 'IN BOX.Sent', $possible_special_ref, 'IN BOX.' ) ,'guess_special: IN BOX.Sent => \Sent' ) ;
+
+ note( 'Leaving tests_guess_special()' ) ;
+ return ;
+}
+
+sub build_automap
+{
+ my $mysync = shift ;
+ $mysync->{ debug } and myprint( "Entering build_automap\n" ) ;
+ foreach my $h1_fold ( @{ $mysync->{h1_folders_wanted} } ) {
+ my $h2_fold ;
+ my $h1_special = $mysync->{h1_special}{$h1_fold} ;
+ my $h1_special_guessed = $mysync->{h1_special_guessed}{$h1_fold} ;
+
+ # Case 1: special on both sides.
+ if ( $h1_special
+ and exists $mysync->{h2_special}{$h1_special} ) {
+ $h2_fold = $mysync->{h2_special}{$h1_special} ;
+ $mysync->{f1f2auto}{ $h1_fold } = $h2_fold ;
+ next ;
+ }
+ # Case 2: special on host1, not on host2
+ if ( $h1_special
+ and ( not exists $mysync->{h2_special}{$h1_special} )
+ and ( exists $mysync->{h2_special_guessed}{$h1_special} )
+ ) {
+ # special_guessed on host2
+ $h2_fold = $mysync->{h2_special_guessed}{$h1_special} ;
+ $mysync->{f1f2auto}{ $h1_fold } = $h2_fold ;
+ next ;
+ }
+ # Case 3: no special on host1, special on host2
+ if ( ( not $h1_special )
+ and ( $h1_special_guessed )
+ and ( exists $mysync->{h2_special}{$h1_special_guessed} )
+ ) {
+ $h2_fold = $mysync->{h2_special}{$h1_special_guessed} ;
+ $mysync->{f1f2auto}{ $h1_fold } = $h2_fold ;
+ next ;
+ }
+ # Case 4: no special on both sides.
+ if ( ( not $h1_special )
+ and ( $h1_special_guessed )
+ and ( not exists $mysync->{h2_special}{$h1_special_guessed} )
+ and ( exists $mysync->{h2_special_guessed}{$h1_special_guessed} )
+ ) {
+ $h2_fold = $mysync->{h2_special_guessed}{$h1_special_guessed} ;
+ $mysync->{f1f2auto}{ $h1_fold } = $h2_fold ;
+ next ;
+ }
+ }
+ return( $mysync->{f1f2auto} ) ;
+}
+
+# I will not add what there is at:
+# http://stackoverflow.com/questions/2185391/localized-gmail-imap-folders/2185548#2185548
+# because it works well without
+sub build_possible_special
+{
+ my $mysync = shift ;
+ my $possible_special = { } ;
+ # All|Archive|Drafts|Flagged|Junk|Sent|Trash
+
+ $possible_special->{'\All'} = [ 'All', 'All Messages', '&BBIEQQQ1-' ] ;
+ $possible_special->{'\Archive'} = [ 'Archive', 'Archives', '&BBAEQARFBDgEMg-' ] ;
+ $possible_special->{'\Drafts'} = [ 'Drafts', 'DRAFTS', '&BCcENQRABD0EPgQyBDgEOgQ4-', 'Szkice', 'Wersje robocze' ] ;
+ $possible_special->{'\Flagged'} = [ 'Flagged', 'Starred', '&BB8EPgQ8BDUERwQ1BD0EPQRLBDU-' ] ;
+ $possible_special->{'\Junk'} = [ 'Junk', 'junk', 'Spam', 'SPAM', '&BCEEPwQwBDw-',
+ 'Potwierdzony spam', 'Wiadomo&AVs-ci-&AVs-mieci',
+ 'Junk E-Mail', 'Junk Email'] ;
+ $possible_special->{'\Sent'} = [ 'Sent', 'Sent Messages', 'Sent Items',
+ 'Gesendete Elemente', 'Gesendete Objekte',
+ '&AMk-l&AOk-ments envoy&AOk-s', 'Envoy&AOk-', 'Objets envoy&AOk-s',
+ 'Elementos enviados',
+ '&kAFP4W4IMH8wojCkMMYw4A-',
+ '&BB4EQgQ,BEAEMAQyBDsENQQ9BD0ESwQ1-',
+ 'Elementy wys&AUI-ane'] ;
+ $possible_special->{'\Trash'} = [ 'Trash', 'TRASH',
+ '&BCMENAQwBDsENQQ9BD0ESwQ1-', '&BBoEPgRABDcEOAQ9BDA-',
+ 'Kosz',
+ 'Deleted Items', 'Deleted Messages' ] ;
+
+
+ foreach my $special ( qw( \All \Archive \Drafts \Flagged \Junk \Sent \Trash ) ){
+ foreach my $possible_folder ( @{ $possible_special->{$special} } ) {
+ $possible_special->{ $possible_folder } = $special ;
+ } ;
+ }
+ $mysync->{possible_special} = $possible_special ;
+ $mysync->{ debug } and myprint( Data::Dumper->Dump( [ $possible_special ], [ 'possible_special' ] ) ) ;
+ return( $possible_special ) ;
+}
+
+sub tests_special_from_folders_hash
+{
+ note( 'Entering tests_special_from_folders_hash()' ) ;
+
+ my $mysync = {} ;
+ require_ok( "Test::MockObject" ) ;
+ my $imapT = Test::MockObject->new( ) ;
+
+ is( undef, special_from_folders_hash( ), 'special_from_folders_hash: no args' ) ;
+ is( undef, special_from_folders_hash( $mysync ), 'special_from_folders_hash: undef args' ) ;
+ is_deeply( {}, special_from_folders_hash( $mysync, $imapT ), 'special_from_folders_hash: $imap void' ) ;
+
+ $imapT->mock( 'folders_hash', sub { return( [ { name => 'Sent', attrs => [ '\Sent' ] } ] ) } ) ;
+
+ is_deeply( { Sent => '\Sent', '\Sent' => 'Sent' },
+ special_from_folders_hash( $mysync, $imapT ), 'special_from_folders_hash: $imap \Sent' ) ;
+
+ note( 'Leaving tests_special_from_folders_hash()' ) ;
+ return( ) ;
+}
+
+sub special_from_folders_hash
+{
+ my ( $mysync, $imap, $side ) = @_ ;
+ my %special = ( ) ;
+
+ if ( ! defined $imap ) { return ; }
+ $side = defined $side ? $side : 'Host?' ;
+
+ if ( ! $imap->can( 'folders_hash' ) ) {
+ my $error = "$side: To have automagic rfc6154 folder mapping, upgrade Mail::IMAPClient >= 3.34\n" ;
+ errors_incr( $mysync, $error ) ;
+ return( \%special ) ; # empty hash ref
+ }
+ my $folders_hash = $imap->folders_hash( ) ;
+ foreach my $fhash (@{ $folders_hash } ) {
+ my @special = grep { /\\(?:All|Archive|Drafts|Flagged|Junk|Sent|Trash)/x } @{ $fhash->{attrs} } ;
+ if ( @special ) {
+ my $special = $special[0] ; # keep first one. Could be not very good.
+ if ( exists $special{ $special } ) {
+ myprintf( "%s: special %-20s = %s already assigned to %s\n",
+ $side, $fhash->{name}, join( q{ }, @special ), $special{ $special } ) ;
+ }else{
+ myprintf( "%s: special %-20s = %s\n",
+ $side, $fhash->{name}, join( q{ }, @special ) ) ;
+ $special{ $special } = $fhash->{name} ;
+ $special{ $fhash->{name} } = $special ; # double entry value => key
+ }
+ }
+ }
+ myprint( "\n" ) if ( %special ) ;
+ return( \%special ) ;
+}
+
+sub errors_incr
+{
+ my ( $mysync, @error ) = @ARG ;
+ $mysync->{nb_errors}++ ;
+
+ if ( @error ) {
+ errors_log( $mysync, @error ) ;
+ myprint( @error ) ;
+ }
+
+ $mysync->{errorsmax} ||= $ERRORS_MAX ;
+ if ( $mysync->{nb_errors} >= $mysync->{errorsmax} ) {
+ myprint( "Maximum number of errors $mysync->{errorsmax} reached ( you can change $mysync->{errorsmax} to any value, for example 100 with --errorsmax 100 ). Exiting.\n" ) ;
+ if ( $mysync->{errorsdump} ) {
+ myprint( errorsdump( $mysync->{nb_errors}, errors_log( $mysync ) ) ) ;
+ # again since errorsdump( ) can be very verbose and masquerade previous warning
+ myprint( "Maximum number of errors $mysync->{errorsmax} reached ( you can change $mysync->{errorsmax} to any value, for example 100 with --errorsmax 100 ). Exiting.\n" ) ;
+ }
+ exit_clean( $mysync, $EXIT_WITH_ERRORS_MAX ) ;
+ }
+ return ;
+}
+
+sub tests_errors_log
+{
+ note( 'Entering tests_errors_log()' ) ;
+ is( undef, errors_log( ), 'errors_log: no args => undef' ) ;
+ my $mysync = {} ;
+ is( undef, errors_log( $mysync ), 'errors_log: empty => undef' ) ;
+ is_deeply( [ 'aieaie' ], [ errors_log( $mysync, 'aieaie' ) ], 'errors_log: aieaie => aieaie' ) ;
+ # cumulative
+ is_deeply( [ 'aieaie' ], [ errors_log( $mysync ) ], 'errors_log: nothing more => aieaie' ) ;
+ is_deeply( [ 'aieaie', 'ouille' ], [ errors_log( $mysync, 'ouille' ) ], 'errors_log: ouille => aieaie ouille' ) ;
+ is_deeply( [ 'aieaie', 'ouille' ], [ errors_log( $mysync ) ], 'errors_log: nothing more => aieaie ouille' ) ;
+ note( 'Leaving tests_errors_log()' ) ;
+ return ;
+}
+
+sub errors_log
+{
+ my ( $mysync, @error ) = @ARG ;
+
+ if ( ! $mysync->{errors_log} ) {
+ $mysync->{errors_log} = [] ;
+ }
+
+ if ( @error ) {
+ push @{ $mysync->{errors_log} }, join( q{}, @error ) ;
+ }
+ if ( @{ $mysync->{errors_log} } ) {
+ return @{ $mysync->{errors_log} } ;
+ }
+ else {
+ return ;
+ }
+}
+
+
+sub errorsdump
+{
+ my( $nb_errors, @errors_log ) = @ARG ;
+ my $error_num = 0 ;
+ my $errors_list = q{} ;
+ if ( @errors_log ) {
+ $errors_list = "++++ Listing $nb_errors errors encountered during the sync ( avoid this listing with --noerrorsdump ).\n" ;
+ foreach my $error ( @errors_log ) {
+ $error_num++ ;
+ $errors_list .= "Err $error_num/$nb_errors: $error" ;
+ }
+ }
+ return( $errors_list ) ;
+}
+
+
+sub tests_live_result
+{
+ note( 'Entering tests_live_result()' ) ;
+
+ my $nb_errors = shift ;
+ if ( $nb_errors ) {
+ myprint( "Live tests failed with $nb_errors errors\n" ) ;
+ } else {
+ myprint( "Live tests ended successfully\n" ) ;
+ }
+ note( 'Leaving tests_live_result()' ) ;
+ return ;
+}
+
+
+sub size_filtered_flag
+{
+ my $mysync = shift ;
+ my $h1_size = shift ;
+
+ if ( defined $mysync->{ maxsize } and $h1_size >= $mysync->{ maxsize } ) {
+ return( 1 ) ;
+ }
+ if ( defined $minsize and $h1_size <= $minsize ) {
+ return( 1 ) ;
+ }
+ return( 0 ) ;
+}
+
+sub sync_flags_fir
+{
+ my ( $mysync, $h1_fold, $h1_msg, $h2_fold, $h2_msg, $permanentflags2, $h1_fir_ref, $h2_fir_ref ) = @_ ;
+
+ if ( not defined $h1_msg ) { return } ;
+ if ( not defined $h2_msg ) { return } ;
+
+ my $h1_size = $h1_fir_ref->{$h1_msg}->{'RFC822.SIZE'} ;
+ return if size_filtered_flag( $mysync, $h1_size ) ;
+
+ # used cached flag values for efficiency
+ my $h1_flags = $h1_fir_ref->{ $h1_msg }->{ 'FLAGS' } || q{} ;
+ my $h2_flags = $h2_fir_ref->{ $h2_msg }->{ 'FLAGS' } || q{} ;
+
+ sync_flags( $mysync, $h1_fold, $h1_msg, $h1_flags, $h2_fold, $h2_msg, $h2_flags, $permanentflags2 ) ;
+
+ return ;
+}
+
+sub sync_flags_after_copy
+{
+ # Activated with option --syncflagsaftercopy
+ my( $mysync, $h1_fold, $h1_msg, $h1_flags, $h2_fold, $h2_msg, $permanentflags2 ) = @_ ;
+
+ if ( my @h2_flags = $mysync->{imap2}->flags( $h2_msg ) ) {
+ my $h2_flags = "@h2_flags" ;
+ ( $mysync->{ debug } or $debugflags ) and myprint( "Host2: msg $h2_fold/$h2_msg flags before sync flags after copy ( $h2_flags )\n" ) ;
+ sync_flags( $mysync, $h1_fold, $h1_msg, $h1_flags, $h2_fold, $h2_msg, $h2_flags, $permanentflags2 ) ;
+ }else{
+ myprint( "Host2: msg $h2_fold/$h2_msg could not get its flags for sync flags after copy\n" ) ;
+ }
+ return ;
+}
+
+# Globals
+# $debug
+# $debugflags
+# $permanentflags2
+
+
+sub sync_flags
+{
+ my( $mysync, $h1_fold, $h1_msg, $h1_flags, $h2_fold, $h2_msg, $h2_flags, $permanentflags2 ) = @_ ;
+
+ ( $mysync->{ debug } or $debugflags ) and
+ myprint( "Host1: flags init msg $h1_fold/$h1_msg flags( $h1_flags ) Host2 msg $h2_fold/$h2_msg flags( $h2_flags )\n" ) ;
+
+ $h1_flags = flags_for_host2( $h1_flags, $permanentflags2 ) ;
+
+ $h2_flags = flagscase( $h2_flags ) ;
+
+ ( $mysync->{ debug } or $debugflags ) and
+ myprint( "Host1: flags filt msg $h1_fold/$h1_msg flags( $h1_flags ) Host2 msg $h2_fold/$h2_msg flags( $h2_flags )\n" ) ;
+
+
+ # compare flags - set flags if there a difference
+ my @h1_flags = sort split(q{ }, $h1_flags );
+ my @h2_flags = sort split(q{ }, $h2_flags );
+ my $diff = compare_lists( \@h1_flags, \@h2_flags );
+
+ $diff and ( $mysync->{ debug } or $debugflags )
+ and myprint( "Host2: flags msg $h2_fold/$h2_msg replacing h2 flags( $h2_flags ) with h1 flags( $h1_flags )\n" ) ;
+
+ # This sets flags exactly. So flags can be removed with this.
+ # When you remove a \Seen flag on host1 you want it
+ # to be removed on host2. Just add flags is not what
+ # we need most of the time, so no + like in "+FLAGS.SILENT".
+
+ if ( not $mysync->{dry} and $diff and not $mysync->{imap2}->store( $h2_msg, "FLAGS.SILENT (@h1_flags)" ) ) {
+ my $error_msg = join q{}, "Host2: flags msg $h2_fold/$h2_msg could not add flags [@h1_flags]: ",
+ $mysync->{imap2}->LastError || q{}, "\n" ;
+ errors_incr( $mysync, $error_msg ) ;
+ }
+
+ return ;
+}
+
+
+
+sub _filter
+{
+ my $mysync = shift ;
+ my $str = shift or return q{} ;
+ my $sz = $SIZE_MAX_STR ;
+ my $len = length $str ;
+ if ( not $mysync->{ debug } and $len > $sz*2 ) {
+ my $beg = substr $str, 0, $sz ;
+ my $end = substr $str, -$sz, $sz ;
+ $str = $beg . '...' . $end ;
+ }
+ $str =~ s/\012?\015$//x ;
+ return "(len=$len) " . $str ;
+}
+
+
+
+sub lost_connection
+{
+ my( $mysync, $imap, $error_message ) = @_;
+ if ( $imap->IsUnconnected( ) ) {
+ $mysync->{nb_errors}++ ;
+ my $lcomm = $imap->LastIMAPCommand || q{} ;
+ my $einfo = $imap->LastError || @{$imap->History}[$LAST] || q{} ;
+
+ # if string is long try reduce to a more reasonable size
+ $lcomm = _filter( $mysync, $lcomm ) ;
+ $einfo = _filter( $mysync, $einfo ) ;
+ myprint( "Failure: last command: $lcomm\n") if ( $mysync->{ debug } && $lcomm) ;
+ myprint( "Failure: lost connection $error_message: ", $einfo, "\n") ;
+ return( 1 ) ;
+ }
+ else{
+ return( 0 ) ;
+ }
+}
+
+sub tests_max
+{
+ note( 'Entering tests_max()' ) ;
+
+ is( 0, max( 0 ), 'max 0 => 0' ) ;
+ is( 1, max( 1 ), 'max 1 => 1' ) ;
+ is( $MINUS_ONE, max( $MINUS_ONE ), 'max -1 => -1') ;
+ is( undef, max( ), 'max no arg => undef' ) ;
+ is( undef, max( undef ), 'undef => undef' ) ;
+ is( undef, max( undef, undef ), 'undef, undef => undef' ) ;
+
+ is( $NUMBER_100, max( 1, $NUMBER_100 ), 'max 1 100 => 100' ) ;
+ is( $NUMBER_100, max( $NUMBER_100, 1 ), 'max 100 1 => 100' ) ;
+ is( $NUMBER_100, max( $NUMBER_100, $NUMBER_42, 1 ), 'max 100 42 1 => 100' ) ;
+ is( $NUMBER_100, max( $NUMBER_100, '42', 1 ), 'max 100 42 1 => 100' ) ;
+ is( $NUMBER_100, max( '100', '42', 1 ), 'max 100 42 1 => 100' ) ;
+ is( $NUMBER_100, max( $NUMBER_100, 'haha', 1 ), 'max 100 haha 1 => 100') ;
+ is( $NUMBER_100, max( 'bb', $NUMBER_100, 'haha' ), 'max bb 100 haha => 100') ;
+ is( $MINUS_ONE, max( q{}, $MINUS_ONE, 'haha' ), 'max "" -1 haha => -1') ;
+ is( $MINUS_ONE, max( q{}, $MINUS_ONE, $MINUS_TWO ), 'max "" -1 -2 => -1') ;
+ is( $MINUS_ONE, max( 'haha', $MINUS_ONE, $MINUS_TWO ), 'max haha -1 -2 => -1') ;
+ is( 1, max( $MINUS_ONE, 1 ), 'max -1 1 => 1') ;
+ is( 1, max( undef, 1 ), 'max undef 1 => 1' ) ;
+ is( 0, max( undef, 0 ), 'max undef 0 => 0' ) ;
+ is( 'haha', max( 'haha' ), 'max haha => haha') ;
+ is( 'bb', max( 'aa', 'bb' ), 'max aa bb => bb') ;
+ is( 'bb', max( 'bb', 'aa' ), 'max bb aa => bb') ;
+ is( 'bb', max( 'bb', 'aa', 'bb' ), 'max bb aa bb => bb') ;
+ note( 'Leaving tests_max()' ) ;
+ return ;
+}
+
+sub max
+{
+ my @list = @_ ;
+ return( undef ) if ( 0 == scalar @list ) ;
+
+ my( @numbers, @notnumbers ) ;
+ foreach my $item ( @list )
+ {
+ if ( is_number( $item ) )
+ {
+ push @numbers, $item ;
+ }
+ elsif ( defined $item )
+ {
+ push @notnumbers, $item ;
+ }
+ }
+
+ my @sorted ;
+
+ if ( @numbers )
+ {
+ @sorted = sort { $a <=> $b } @numbers ;
+ }
+ elsif ( @notnumbers )
+ {
+ @sorted = sort { $a cmp $b } @notnumbers ;
+ }
+ else
+ {
+ return ;
+ }
+
+ return( pop @sorted ) ;
+}
+
+sub tests_is_number
+{
+ note( 'Entering tests_is_number()' ) ;
+
+ is( undef, is_number( ), 'is_number: no args => undef ' ) ;
+ is( undef, is_number( undef ), 'is_number: undef => undef ' ) ;
+ ok( is_number( 1 ), 'is_number: 1 => 1' ) ;
+ ok( is_number( 1.1 ), 'is_number: 1.1 => 1' ) ;
+ ok( is_number( 0 ), 'is_number: 0 => 1' ) ;
+ ok( is_number( -1 ), 'is_number: -1 => 1' ) ;
+ ok( ! is_number( 1.1.1 ), 'is_number: 1.1.1 => no' ) ;
+ ok( ! is_number( q{} ), 'is_number: q{} => no' ) ;
+ ok( ! is_number( 'haha' ), 'is_number: haha => no' ) ;
+ ok( ! is_number( '0haha' ), 'is_number: 0haha => no' ) ;
+ ok( ! is_number( '2haha' ), 'is_number: 2haha => no' ) ;
+ ok( ! is_number( 'haha2' ), 'is_number: haha2 => no' ) ;
+
+ note( 'Leaving tests_is_number()' ) ;
+ return ;
+}
+
+
+
+sub is_number
+{
+ my $item = shift ;
+
+ if ( ! defined $item ) { return ; }
+
+ if ( $item =~ /\A$RE{num}{real}\Z/ ) {
+ return 1 ;
+ }
+ return ;
+}
+
+sub tests_min
+{
+ note( 'Entering tests_min()' ) ;
+
+ is( 0, min( 0 ), 'min 0 => 0' ) ;
+ is( 1, min( 1 ), 'min 1 => 1' ) ;
+ is( $MINUS_ONE, min( $MINUS_ONE ), 'min -1 => -1' ) ;
+ is( undef, min( ), 'min no arg => undef' ) ;
+ is( 1, min( 1, $NUMBER_100 ), 'min 1 100 => 1' ) ;
+ is( 1, min( $NUMBER_100, 1 ), 'min 100 1 => 1' ) ;
+ is( 1, min( $NUMBER_100, $NUMBER_42, 1 ), 'min 100 42 1 => 1' ) ;
+ is( 1, min( $NUMBER_100, '42', 1 ), 'min 100 42 1 => 1' ) ;
+ is( 1, min( '100', '42', 1 ), 'min 100 42 1 => 1' ) ;
+ is( 1, min( $NUMBER_100, 'haha', 1 ), 'min 100 haha 1 => 1') ;
+ is( $MINUS_ONE, min( $MINUS_ONE, 1 ), 'min -1 1 => -1') ;
+
+ is( 1, min( undef, 1 ), 'min undef 1 => 1' ) ;
+ is( 0, min( undef, 0 ), 'min undef 0 => 0' ) ;
+ is( 1, min( undef, 1 ), 'min undef 1 => 1' ) ;
+ is( 0, min( undef, 2, 0, 1 ), 'min undef, 2, 0, 1 => 0' ) ;
+
+ is( 'haha', min( 'haha' ), 'min haha => haha') ;
+ is( 'aa', min( 'aa', 'bb' ), 'min aa bb => aa') ;
+ is( 'aa', min( 'bb', 'aa' ), 'min bb aa bb => aa') ;
+ is( 'aa', min( 'bb', 'aa', 'bb' ), 'min bb aa bb => aa') ;
+
+ note( 'Leaving tests_min()' ) ;
+ return ;
+}
+
+
+sub min
+{
+ my @list = @_ ;
+ return( undef ) if ( 0 == scalar @list ) ;
+
+ my( @numbers, @notnumbers ) ;
+ foreach my $item ( @list ) {
+ if ( is_number( $item ) ) {
+ push @numbers, $item ;
+ }else{
+ push @notnumbers, $item ;
+ }
+ }
+
+ my @sorted ;
+ if ( @numbers ) {
+ @sorted = sort { $a <=> $b } @numbers ;
+ }elsif( @notnumbers ) {
+ @sorted = sort { $a cmp $b } @notnumbers ;
+ }else{
+ return ;
+ }
+
+ return( shift @sorted ) ;
+}
+
+
+sub check_lib_version
+{
+ my $mysync = shift ;
+ $mysync->{ debug } and myprint( "IMAPClient $Mail::IMAPClient::VERSION\n" ) ;
+ if ( '2.2.9' eq $Mail::IMAPClient::VERSION ) {
+ myprint( "imapsync no longer supports Mail::IMAPClient 2.2.9, upgrade it\n" ) ;
+ return 0 ;
+ }
+ else{
+ # 3.x.x is no longer buggy with imapsync.
+ # 3.30 or currently superior is imposed in the Perl "use Mail::IMAPClient line".
+ return 1 ;
+ }
+ return ;
+}
+
+sub module_version_str
+{
+ my( $module_name, $module_version ) = @_ ;
+ my $str = mysprintf( "%-20s %s\n", $module_name, $module_version ) ;
+ return( $str ) ;
+}
+
+sub modulesversion
+{
+
+ my @list_version;
+
+ my %modulesversion = (
+ 'Authen::NTLM' => sub { $Authen::NTLM::VERSION },
+ 'CGI' => sub { $CGI::VERSION },
+ 'Compress::Zlib' => sub { $Compress::Zlib::VERSION },
+ 'Crypt::OpenSSL::RSA' => sub { $Crypt::OpenSSL::RSA::VERSION },
+ 'Data::Uniqid' => sub { $Data::Uniqid::VERSION },
+ 'Digest::HMAC_MD5' => sub { $Digest::HMAC_MD5::VERSION },
+ 'Digest::HMAC_SHA1' => sub { $Digest::HMAC_SHA1::VERSION },
+ 'Digest::MD5' => sub { $Digest::MD5::VERSION },
+ 'Encode' => sub { $Encode::VERSION },
+ 'Encode::IMAPUTF7' => sub { $Encode::IMAPUTF7::VERSION },
+ 'File::Copy::Recursive' => sub { $File::Copy::Recursive::VERSION },
+ 'File::Spec' => sub { $File::Spec::VERSION },
+ 'Getopt::Long' => sub { $Getopt::Long::VERSION },
+ 'HTML::Entities' => sub { $HTML::Entities::VERSION },
+ 'IO::Socket' => sub { $IO::Socket::VERSION },
+ 'IO::Socket::INET' => sub { $IO::Socket::INET::VERSION },
+ 'IO::Socket::INET6' => sub { $IO::Socket::INET6::VERSION },
+ 'IO::Socket::IP' => sub { $IO::Socket::IP::VERSION },
+ 'IO::Socket::SSL' => sub { $IO::Socket::SSL::VERSION },
+ 'IO::Tee' => sub { $IO::Tee::VERSION },
+ 'JSON' => sub { $JSON::VERSION },
+ 'JSON::WebToken' => sub { $JSON::WebToken::VERSION },
+ 'LWP' => sub { $LWP::VERSION },
+ 'Mail::IMAPClient' => sub { $Mail::IMAPClient::VERSION },
+ 'MIME::Base64' => sub { $MIME::Base64::VERSION },
+ 'Net::Ping' => sub { $Net::Ping::VERSION },
+ 'Net::SSLeay' => sub { $Net::SSLeay::VERSION },
+ 'Term::ReadKey' => sub { $Term::ReadKey::VERSION },
+ 'Test::MockObject' => sub { $Test::MockObject::VERSION },
+ 'Time::HiRes' => sub { $Time::HiRes::VERSION },
+ 'Unicode::String' => sub { $Unicode::String::VERSION },
+ 'URI::Escape' => sub { $URI::Escape::VERSION },
+ #'Lalala' => sub { $Lalala::VERSION },
+ ) ;
+
+ foreach my $module_name ( sort keys %modulesversion ) {
+ # trick from http://www.perlmonks.org/?node_id=152122
+ my $file_name = $module_name . '.pm' ;
+ $file_name =~s,::,/,xmgs; # Foo::Bar::Baz => Foo/Bar/Baz.pm
+ my $v ;
+ eval {
+ require $file_name ;
+ $v = defined $modulesversion{ $module_name } ? $modulesversion{ $module_name }->() : q{?} ;
+ } or $v = q{Not installed} ;
+
+ push @list_version, module_version_str( $module_name, $v ) ;
+ }
+ return( @list_version ) ;
+}
+
+
+sub tests_command_line_nopassword
+{
+ note( 'Entering tests_command_line_nopassword()' ) ;
+
+ ok( q{} eq command_line_nopassword(), 'command_line_nopassword void' );
+ my $mysync = {} ;
+ ok( '--blabla' eq command_line_nopassword( $mysync, '--blabla' ), 'command_line_nopassword --blabla' );
+ #myprint( command_line_nopassword((qw{ --password1 secret1 })), "\n" ) ;
+ ok( '--password1 MASKED' eq command_line_nopassword( $mysync, qw{ --password1 secret1}), 'command_line_nopassword --password1' );
+ ok( '--blabla --password1 MASKED --blibli'
+ eq command_line_nopassword( $mysync, qw{ --blabla --password1 secret1 --blibli } ), 'command_line_nopassword --password1 --blibli' );
+ $mysync->{showpasswords} = 1 ;
+ ok( q{} eq command_line_nopassword(), 'command_line_nopassword void' );
+ ok( '--blabla' eq command_line_nopassword( $mysync, '--blabla'), 'command_line_nopassword --blabla' );
+ #myprint( command_line_nopassword((qw{ --password1 secret1 })), "\n" ) ;
+ ok( '--password1 secret1' eq command_line_nopassword( $mysync, qw{ --password1 secret1} ), 'command_line_nopassword --password1' );
+ ok( '--blabla --password1 secret1 --blibli'
+ eq command_line_nopassword( $mysync, qw{ --blabla --password1 secret1 --blibli } ), 'command_line_nopassword --password1 --blibli' );
+
+ note( 'Leaving tests_command_line_nopassword()' ) ;
+ return ;
+}
+
+# Construct a command line copy with passwords replaced by MASKED.
+sub command_line_nopassword
+{
+ my $mysync = shift @ARG ;
+ my @argv = @ARG ;
+ my @argv_nopassword ;
+
+ if ( $mysync->{ cmdcgi } ) {
+ @argv_nopassword = mask_password_value( @{ $mysync->{ cmdcgi } } ) ;
+ return( "@argv_nopassword" ) ;
+ }
+
+ if ( $mysync->{showpasswords} )
+ {
+ return( "@argv" ) ;
+ }
+
+ @argv_nopassword = mask_password_value( @argv ) ;
+ return("@argv_nopassword") ;
+}
+
+sub mask_password_value
+{
+ my @argv = @ARG ;
+ my @argv_nopassword ;
+ while ( @argv ) {
+ my $arg = shift @argv ; # option name or value
+ if ( $arg =~ m/-password[12]/x ) {
+ shift @argv ; # password value
+ push @argv_nopassword, $arg, 'MASKED' ; # option name and fake value
+ }else{
+ push @argv_nopassword, $arg ; # same option or value
+ }
+ }
+ return @argv_nopassword ;
+}
+
+
+sub tests_get_stdin_masked
+{
+ note( 'Entering tests_get_stdin_masked()' ) ;
+
+ is( q{}, get_stdin_masked( ), 'get_stdin_masked: no args' ) ;
+ is( q{}, get_stdin_masked( 'Please ENTER: ' ), 'get_stdin_masked: ENTER' ) ;
+
+ note( 'Leaving tests_get_stdin_masked()' ) ;
+ return ;
+}
+
+#######################################################
+# The issue is that prompt() does not prompt the prompt
+# when the program is used like
+# { sleep 2 ; echo blablabla ; } | ./imapsync ...--host1 lo --user1 tata --host2 lo --user2 titi
+
+# use IO::Prompter ;
+sub get_stdin_masked
+{
+ my $prompt = shift || 'Say something: ' ;
+ local @ARGV = () ;
+ my $input = prompt(
+ -prompt => $prompt,
+ -echo => '*',
+ ) ;
+ #myprint( "You said: $input\n" ) ;
+ return $input ;
+}
+
+sub ask_for_password_new
+{
+ my $prompt = shift ;
+ my $password = get_stdin_masked( $prompt ) ;
+ return $password ;
+}
+#########################################################
+
+
+sub ask_for_password
+{
+ my $prompt = shift ;
+ myprint( $prompt ) ;
+ Term::ReadKey::ReadMode( 2 ) ;
+ ## no critic (InputOutput::ProhibitExplicitStdin)
+ my $password = <STDIN> ;
+ chomp $password ;
+ myprint( "\nGot it\n" ) ;
+ Term::ReadKey::ReadMode( 0 ) ;
+ return $password ;
+}
+
+# Have to refactor get_password1() get_password2()
+# to have only get_password() and two calls
+sub get_password1
+{
+
+ my $mysync = shift ;
+
+ $mysync->{password1}
+ || $mysync->{ passfile1 }
+ || 'PREAUTH' eq $authmech1
+ || 'EXTERNAL' eq $authmech1
+ || $ENV{IMAPSYNC_PASSWORD1}
+ || do
+ {
+ myprint( << 'FIN_PASSFILE' ) ;
+
+If you are afraid of giving password on the command line arguments, you can put the
+password of user1 in a file named file1 and use "--passfile1 file1" instead of typing it.
+Then give this file restrictive permissions with the command "chmod 600 file1".
+An other solution is to set the environment variable IMAPSYNC_PASSWORD1
+FIN_PASSFILE
+ my $user = $authuser1 || $mysync->{user1} ;
+ my $host = $mysync->{host1} ;
+ my $prompt = "What's the password for $user" . ' at ' . "$host? (not visible while you type, then enter RETURN) " ;
+ $mysync->{password1} = ask_for_password( $prompt ) ;
+ } ;
+
+ if ( defined $mysync->{ passfile1 } ) {
+ if ( ! -e -r $mysync->{ passfile1 } ) {
+ myprint( "Failure: file from parameter --passfile1 $mysync->{ passfile1 } does not exist or is not readable\n" ) ;
+ $mysync->{nb_errors}++ ;
+ exit_clean( $mysync, $EX_NOINPUT ) ;
+ }
+ # passfile1 readable
+ $mysync->{password1} = firstline ( $mysync->{ passfile1 } ) ;
+ return ;
+ }
+ if ( $ENV{IMAPSYNC_PASSWORD1} ) {
+ $mysync->{password1} = $ENV{IMAPSYNC_PASSWORD1} ;
+ return ;
+ }
+ return ;
+}
+
+sub get_password2
+{
+
+ my $mysync = shift ;
+
+ $mysync->{password2}
+ || $mysync->{ passfile2 }
+ || 'PREAUTH' eq $authmech2
+ || 'EXTERNAL' eq $authmech2
+ || $ENV{IMAPSYNC_PASSWORD2}
+ || do
+ {
+ myprint( << 'FIN_PASSFILE' ) ;
+
+If you are afraid of giving password on the command line arguments, you can put the
+password of user2 in a file named file2 and use "--passfile2 file2" instead of typing it.
+Then give this file restrictive permissions with the command "chmod 600 file2".
+An other solution is to set the environment variable IMAPSYNC_PASSWORD2
+FIN_PASSFILE
+ my $user = $authuser2 || $mysync->{user2} ;
+ my $host = $mysync->{host2} ;
+ my $prompt = "What's the password for $user" . ' at ' . "$host? (not visible while you type, then enter RETURN) " ;
+ $mysync->{password2} = ask_for_password( $prompt ) ;
+ } ;
+
+
+ if ( defined $mysync->{ passfile2 } ) {
+ if ( ! -e -r $mysync->{ passfile2 } ) {
+ myprint( "Failure: file from parameter --passfile2 $mysync->{ passfile2 } does not exist or is not readable\n" ) ;
+ $mysync->{nb_errors}++ ;
+ exit_clean( $mysync, $EX_NOINPUT ) ;
+ }
+ # passfile2 readable
+ $mysync->{password2} = firstline ( $mysync->{ passfile2 } ) ;
+ return ;
+ }
+ if ( $ENV{IMAPSYNC_PASSWORD2} ) {
+ $mysync->{password2} = $ENV{IMAPSYNC_PASSWORD2} ;
+ return ;
+ }
+ return ;
+}
+
+
+
+
+sub remove_tmp_files
+{
+ my $mysync = shift or return ;
+ $mysync->{pidfile} or return ;
+ if ( -e $mysync->{pidfile} ) {
+ unlink $mysync->{pidfile} ;
+ }
+ return ;
+}
+
+sub cleanup_before_exit
+{
+ my $mysync = shift ;
+ remove_tmp_files( $mysync ) ;
+ if ( $mysync->{imap1} and $mysync->{imap1}->IsConnected() )
+ {
+ myprint( "Disconnecting from host1 $mysync->{ host1 } user1 $mysync->{ user1 }\n" ) ;
+ $mysync->{imap1}->logout( ) ;
+ }
+ if ( $mysync->{imap2} and $mysync->{imap2}->IsConnected() )
+ {
+ myprint( "Disconnecting from host2 $mysync->{ host2 } user2 $mysync->{ user2 }\n" ) ;
+ $mysync->{imap2}->logout( ) ;
+ }
+ if ( $mysync->{log} ) {
+ myprint( "Log file is $mysync->{logfile} ( to change it, use --logfile filepath ; or use --nolog to turn off logging )\n" ) ;
+ }
+ if ( $mysync->{log} and $mysync->{logfile_handle} ) {
+ #myprint( "Closing $mysync->{ logfile }\n" ) ;
+ close $mysync->{logfile_handle} ;
+ }
+ return ;
+}
+
+
+
+sub exit_clean
+{
+ my $mysync = shift @ARG ;
+ my $status = shift @ARG ;
+ my @messages = @ARG ;
+ if ( @messages )
+ {
+ myprint( @messages ) ;
+ }
+ myprint( "Exiting with return value $status ($EXIT_TXT{$status}) $mysync->{nb_errors}/$mysync->{errorsmax} nb_errors/max_errors\n" ) ;
+ cleanup_before_exit( $mysync ) ;
+
+ exit $status ;
+}
+
+sub missing_option
+{
+ my $mysync = shift ;
+ my $option = shift ;
+ $mysync->{nb_errors}++ ;
+ exit_clean( $mysync, $EX_USAGE, "$option option is mandatory, for help run $PROGRAM_NAME --help\n" ) ;
+ return ;
+}
+
+
+sub catch_ignore
+{
+ my $mysync = shift ;
+ my $signame = shift ;
+
+ my $sigcounter = ++$mysync->{ sigcounter }{ $signame } ;
+ myprint( "\nGot a signal $signame (my PID is $PROCESS_ID my PPID is ", getppid( ),
+ "). Received $sigcounter $signame signals so far. Thanks!\n" ) ;
+ stats( $mysync ) ;
+ return ;
+}
+
+
+sub catch_exit
+{
+ my $mysync = shift ;
+ my $signame = shift || q{} ;
+ if ( $signame ) {
+ myprint( "\nGot a signal $signame (my PID is $PROCESS_ID my PPID is ", getppid( ),
+ "). Asked to terminate\n" ) ;
+ if ( $mysync->{stats} ) {
+ myprint( "Here are the final stats of this sync not completely finished so far\n" ) ;
+ stats( $mysync ) ;
+ myprint( "Ended by a signal $signame (my PID is $PROCESS_ID my PPID is ",
+ getppid( ), "). I am asked to terminate immediately.\n" ) ;
+ myprint( "You should resynchronize those accounts by running a sync again,\n",
+ "since some messages and entire folders might still be missing on host2.\n" ) ;
+ }
+ ## no critic (RequireLocalizedPunctuationVars)
+ $SIG{ $signame } = 'DEFAULT'; # restore default action
+ # kill myself with $signame
+ # https://www.cons.org/cracauer/sigint.html
+ myprint( "Killing myself with signal $signame\n" ) ;
+ cleanup_before_exit( $mysync ) ;
+ kill( $signame, $PROCESS_ID ) ;
+ sleep 1 ;
+ $mysync->{nb_errors}++ ;
+ exit_clean( $mysync, $EXIT_BY_SIGNAL,
+ "Still there after killing myself with signal $signame...\n"
+ ) ;
+ }
+ else
+ {
+ $mysync->{nb_errors}++ ;
+ exit_clean( $mysync, $EXIT_BY_SIGNAL, "Exiting in catch_exit with no signal...\n" ) ;
+ }
+ return ;
+}
+
+
+sub catch_print
+{
+ my $mysync = shift ;
+ my $signame = shift ;
+
+ my $sigcounter = ++$mysync->{ sigcounter }{ $signame } ;
+ myprint( "\nGot a signal $signame (my PID is $PROCESS_ID my PPID is ", getppid( ),
+ "). Received $sigcounter $signame signals so far. Thanks!\n" ) ;
+ return ;
+}
+
+sub here_twice
+{
+ my $mysync = shift ;
+ my $now = time ;
+ my $previous = $mysync->{lastcatch} || 0 ;
+ $mysync->{lastcatch} = $now ;
+
+ if ( $INTERVAL_TO_EXIT >= $now - $previous ) {
+ return $TRUE ;
+ }else{
+ return $FALSE ;
+ }
+}
+
+
+sub catch_reconnect
+{
+ my $mysync = shift ;
+ my $signame = shift ;
+ if ( here_twice( $mysync ) ) {
+ myprint( "Got two signals $signame within $INTERVAL_TO_EXIT seconds. Exiting...\n" ) ;
+ catch_exit( $mysync, $signame ) ;
+ }else{
+ myprint( "\nGot a signal $signame (my PID is $PROCESS_ID my PPID is ", getppid( ), ")\n",
+ "Hit 2 ctr-c within 2 seconds to exit the program\n",
+ "Hit only 1 ctr-c to reconnect to both imap servers\n",
+ ) ;
+ myprint( "For now only one signal $signame within $INTERVAL_TO_EXIT seconds.\n" ) ;
+
+ if ( ! defined $mysync->{imap1} ) { return ; }
+ if ( ! defined $mysync->{imap2} ) { return ; }
+
+ myprint( "Info: reconnecting to host1 imap server $mysync->{host1}\n" ) ;
+ $mysync->{imap1}->State( Mail::IMAPClient::Unconnected ) ;
+ $mysync->{imap1}->{IMAPSYNC_RECONNECT_COUNT} += 1 ;
+ if ( $mysync->{imap1}->reconnect( ) )
+ {
+ myprint( "Info: reconnected to host1 imap server $mysync->{host1}\n" ) ;
+ }
+ else
+ {
+ $mysync->{nb_errors}++ ;
+ exit_clean( $mysync, $EXIT_CONNECTION_FAILURE ) ;
+ }
+ myprint( "Info: reconnecting to host2 imap server\n" ) ;
+ $mysync->{imap2}->State( Mail::IMAPClient::Unconnected ) ;
+ $mysync->{imap2}->{IMAPSYNC_RECONNECT_COUNT} += 1 ;
+ if ( $mysync->{imap2}->reconnect( ) )
+ {
+ myprint( "Info: reconnected to host2 imap server $mysync->{host2}\n" ) ;
+ }
+ else
+ {
+ $mysync->{nb_errors}++ ;
+ exit_clean( $mysync, $EXIT_CONNECTION_FAILURE ) ;
+ }
+ myprint( "Info: reconnected to both imap servers\n" ) ;
+ }
+ return ;
+}
+
+sub install_signals
+{
+ my $mysync = shift ;
+
+ if ( under_docker_context( $mysync ) )
+ {
+ # output( $mysync, "Under docker context so leaving signals as they are\n" ) ;
+ output( $mysync, "Under docker context so installing only signals to exit\n" ) ;
+ @{ $mysync->{ sigexit } } = ( defined( $mysync->{ sigexit } ) ) ? @{ $mysync->{ sigexit } } : ( 'INT', 'QUIT', 'TERM' ) ;
+ sig_install( $mysync, 'catch_exit', @{ $mysync->{ sigexit } } ) ;
+ }
+ else
+ {
+ # Unix signals
+ @{ $mysync->{ sigexit } } = ( defined( $mysync->{ sigexit } ) ) ? @{ $mysync->{ sigexit } } : ( 'QUIT', 'TERM' ) ;
+ @{ $mysync->{ sigreconnect } } = ( defined( $mysync->{ sigreconnect } ) ) ? @{ $mysync->{ sigreconnect } } : ( 'INT' ) ;
+ @{ $mysync->{ sigprint } } = ( defined( $mysync->{ sigprint } ) ) ? @{ $mysync->{ sigprint } } : ( 'HUP' ) ;
+ @{ $mysync->{ sigignore } } = ( defined( $mysync->{ sigignore } ) ) ? @{ $mysync->{ sigignore } } : ( ) ;
+
+ #local %SIG = %SIG ;
+ sig_install( $mysync, 'catch_exit', @{ $mysync->{ sigexit } } ) ;
+ sig_install( $mysync, 'catch_reconnect', @{ $mysync->{ sigreconnect } } ) ;
+ sig_install( $mysync, 'catch_print', @{ $mysync->{ sigprint } } ) ;
+ # --sigignore can override sigexit, sigreconnect and sigprint (for the same signals only)
+ sig_install( $mysync, 'catch_ignore', @{ $mysync->{ sigignore } } ) ;
+
+ sig_install_toggle_sleep( $mysync ) ;
+ }
+
+ return ;
+}
+
+
+
+sub tests_reconnect_12_if_needed
+{
+ note( 'Entering tests_reconnect_12_if_needed()' ) ;
+
+ my $mysync ;
+
+ $mysync->{imap1} = Mail::IMAPClient->new( ) ;
+ $mysync->{imap2} = Mail::IMAPClient->new( ) ;
+ $mysync->{imap1}->Server( 'test1.lamiral.info' ) ;
+ $mysync->{imap2}->Server( 'test2.lamiral.info' ) ;
+ is( 2, reconnect_12_if_needed( $mysync ), 'reconnect_12_if_needed: test1&test2 .lamiral.info => 1' ) ;
+ is( 1, $mysync->{imap1}->{IMAPSYNC_RECONNECT_COUNT}, 'reconnect_12_if_needed: test1.lamiral.info IMAPSYNC_RECONNECT_COUNT => 1' ) ;
+ is( 1, $mysync->{imap2}->{IMAPSYNC_RECONNECT_COUNT}, 'reconnect_12_if_needed: test2.lamiral.info IMAPSYNC_RECONNECT_COUNT => 1' ) ;
+
+ note( 'Leaving tests_reconnect_12_if_needed()' ) ;
+ return ;
+}
+
+sub reconnect_12_if_needed
+{
+ my $mysync = shift ;
+ #return 2 ;
+ if ( ! reconnect_if_needed( $mysync->{imap1} ) ) {
+ return ;
+ }
+ if ( ! reconnect_if_needed( $mysync->{imap2} ) ) {
+ return ;
+ }
+ # both were good
+ return 2 ;
+}
+
+
+sub tests_reconnect_if_needed
+{
+ note( 'Entering tests_reconnect_if_needed()' ) ;
+
+
+ my $myimap ;
+
+ is( undef, reconnect_if_needed( ), 'reconnect_if_needed: no args => undef' ) ;
+ is( undef, reconnect_if_needed( $myimap ), 'reconnect_if_needed: undef arg => undef' ) ;
+
+ $myimap = Mail::IMAPClient->new( ) ;
+ $myimap->Debug( 1 ) ;
+ is( undef, reconnect_if_needed( $myimap ), 'reconnect_if_needed: empty new Mail::IMAPClient => undef' ) ;
+ $myimap->Server( 'test.lamiral.info' ) ;
+ is( 1, reconnect_if_needed( $myimap ), 'reconnect_if_needed: test.lamiral.info => 1' ) ;
+ is( 1, $myimap->{IMAPSYNC_RECONNECT_COUNT}, 'reconnect_if_needed: test.lamiral.info IMAPSYNC_RECONNECT_COUNT => 1' ) ;
+
+ note( 'Leaving tests_reconnect_if_needed()' ) ;
+ return ;
+}
+
+sub reconnect_if_needed
+{
+ # return undef upon failure.
+ # return 1 upon connection success, with or without reconnection.
+
+ my $imap = shift ;
+
+ if ( ! defined $imap ) { return ; }
+ if ( ! $imap->Server( ) ) { return ; }
+
+ if ( $imap->IsUnconnected( ) ) {
+ $imap->{IMAPSYNC_RECONNECT_COUNT} += 1 ;
+ if ( $imap->reconnect( ) ) {
+ return 1 ;
+ }
+ }else{
+ return 1 ;
+ }
+
+ # A last forced one
+ $imap->State( Mail::IMAPClient::Unconnected ) ;
+ $imap->reconnect( ) ;
+ $imap->{IMAPSYNC_RECONNECT_COUNT} += 1 ;
+ if ( $imap->noop ) {
+ # NOOP is ok
+ return 1 ;
+ }
+
+ return ;
+}
+
+
+
+# $sync->{id} = defined $sync->{id} ? $sync->{id} : 1 ;
+# imap_id_stuff( $sync ) ;
+
+sub justconnect
+{
+ my $mysync = shift ;
+ my $justconnect1 = justconnect1( $sync ) ;
+ my $justconnect2 = justconnect2( $sync ) ;
+ return "$justconnect1 $justconnect2";
+}
+
+sub justconnect1
+{
+ my $mysync = shift ;
+ if ( $mysync->{host1} )
+ {
+ myprint( "Host1: Will just connect to $mysync->{host1} without login\n" ) ;
+ $mysync->{imap1} = connect_imap(
+ $mysync->{host1}, $mysync->{port1}, $debugimap1,
+ $mysync->{ssl1}, $mysync->{tls1}, 'Host1',
+ $mysync->{h1}->{timeout}, $mysync->{h1} ) ;
+ imap_id( $mysync, $mysync->{imap1}, 'Host1' ) ;
+ $mysync->{imap1}->logout( ) ;
+ return $mysync->{host1} ;
+ }
+
+ return q{} ;
+}
+
+sub justconnect2
+{
+ my $mysync = shift ;
+ if ( $mysync->{host2} )
+ {
+ myprint( "Host2: Will just connect to $mysync->{host2} without login\n" ) ;
+ $mysync->{imap2} = connect_imap(
+ $mysync->{host2}, $mysync->{port2}, $debugimap2,
+ $mysync->{ssl2}, $mysync->{tls2}, 'Host2',
+ $mysync->{h2}->{timeout}, $mysync->{h2} ) ;
+ imap_id( $mysync, $mysync->{imap2}, 'Host2' ) ;
+ $mysync->{imap2}->logout( ) ;
+ return $mysync->{host2} ;
+ }
+
+ return q{} ;
+}
+
+sub skip_macosx
+{
+ #return ;
+ return( 'macosx.polarhome.com' eq hostname() ) ;
+}
+
+sub tests_mailimapclient_connect
+{
+ note( 'Entering tests_mailimapclient_connect()' ) ;
+
+ my $imap ;
+ # ipv4
+ ok( $imap = Mail::IMAPClient->new( ), 'mailimapclient_connect ipv4: new' ) ;
+ is( 'Mail::IMAPClient', ref( $imap ), 'mailimapclient_connect ipv4: ref is Mail::IMAPClient' ) ;
+
+ # Mail::IMAPClient 3.40 die on this... So we skip it, thanks to "mature" IO::Socket::IP
+ # Mail::IMAPClient 3.42 is ok so this test is back.
+ is( undef, $imap->connect( ), 'mailimapclient_connect ipv4: connect with no server => failure' ) ;
+
+
+ is( 'test.lamiral.info', $imap->Server( 'test.lamiral.info' ), 'mailimapclient_connect ipv4: setting Server(test.lamiral.info)' ) ;
+ is( 1, $imap->Debug( 1 ), 'mailimapclient_connect ipv4: setting Debug( 1 )' ) ;
+ is( 143, $imap->Port( 143 ), 'mailimapclient_connect ipv4: setting Port( 143 )' ) ;
+ is( 3, $imap->Timeout( 3 ), 'mailimapclient_connect ipv4: setting Timout( 3 )' ) ;
+ like( ref( $imap->connect( ) ), qr/IO::Socket::INET|IO::Socket::IP/, 'mailimapclient_connect ipv4: connect to test.lamiral.info' ) ;
+ like( $imap->logout( ), qr/Mail::IMAPClient/, 'mailimapclient_connect ipv4: logout' ) ;
+ is( undef, undef $imap, 'mailimapclient_connect ipv4: free variable' ) ;
+
+ # ipv4 + ssl
+ ok( $imap = Mail::IMAPClient->new( ), 'mailimapclient_connect ipv4 + ssl: new' ) ;
+ is( 'test.lamiral.info', $imap->Server( 'test.lamiral.info' ), 'mailimapclient_connect ipv4 + ssl: setting Server(test.lamiral.info)' ) ;
+ is( 1, $imap->Debug( 1 ), 'mailimapclient_connect ipv4 + ssl: setting Debug( 1 )' ) ;
+ ok( $imap->Ssl( [ SSL_verify_mode => SSL_VERIFY_NONE, SSL_cipher_list => 'DEFAULT:!DH' ] ), 'mailimapclient_connect ipv4 + ssl: setting Ssl( SSL_VERIFY_NONE )' ) ;
+ is( 993, $imap->Port( 993 ), 'mailimapclient_connect ipv4 + ssl: setting Port( 993 )' ) ;
+ like( ref( $imap->connect( ) ), qr/IO::Socket::SSL/, 'mailimapclient_connect ipv4 + ssl: connect to test.lamiral.info' ) ;
+ like( $imap->logout( ), qr/Mail::IMAPClient/, 'mailimapclient_connect ipv4 + ssl: logout in ssl does not cause failure' ) ;
+ is( undef, undef $imap, 'mailimapclient_connect ipv4 + ssl: free variable' ) ;
+
+ # ipv6 + ssl
+ # Fails often on ks2ipv6.lamiral.info
+
+ ok( $imap = Mail::IMAPClient->new( ), 'mailimapclient_connect ipv6 + ssl: new' ) ;
+ is( 'petiteipv6.lamiral.info', $imap->Server( 'petiteipv6.lamiral.info' ), 'mailimapclient_connect ipv6 + ssl: setting Server petiteipv6.lamiral.info' ) ;
+ is( 3, $imap->Timeout( 3 ), 'mailimapclient_connect ipv4: setting Timout( 3 )' ) ;
+ ok( $imap->Ssl( [ SSL_verify_mode => SSL_VERIFY_NONE, SSL_cipher_list => 'DEFAULT:!DH' ] ), 'mailimapclient_connect ipv6 + ssl: setting Ssl( SSL_VERIFY_NONE )' ) ;
+ is( 993, $imap->Port( 993 ), 'mailimapclient_connect ipv6 + ssl: setting Port( 993 )' ) ;
+ SKIP: {
+ if (
+ 'CUILLERE' eq hostname()
+ or
+ skip_macosx()
+ or
+ -e '/.dockerenv'
+ or
+ 'pcHPDV7-HP' eq hostname()
+ )
+ {
+ skip( 'Tests avoided on CUILLERE/pcHPDV7-HP/macosx.polarhome.com/docker cannot do ipv6', 4 ) ;
+ }
+
+ is( 1, $imap->Debug( 1 ), 'mailimapclient_connect ipv4 + ssl: setting Debug( 1 )' ) ;
+
+ # It sounds stupid but it avoids failures on the next test about $imap->connect
+ is( '2a01:e34:ecde:70d0:223:54ff:fec2:36d7', resolv( 'petiteipv6.lamiral.info' ), 'resolv: petiteipv6.lamiral.info => 2001:41d0:8:bebd::1' ) ;
+
+ like( ref( $imap->connect( ) ), qr/IO::Socket::SSL/, 'mailimapclient_connect ipv6 + ssl: connect to petiteipv6.lamiral.info' ) ;
+ # This one is ok on petite, not on ks2, do not know why, so commented.
+ like( ref( $imap->logout( ) ), qr/Mail::IMAPClient/, 'mailimapclient_connect ipv6 + ssl: logout in ssl is ok on petiteipv6.lamiral.info' ) ;
+ }
+
+ is( undef, undef $imap, 'mailimapclient_connect ipv6 + ssl: free variable' ) ;
+
+
+ note( 'Leaving tests_mailimapclient_connect()' ) ;
+ return ;
+}
+
+
+sub tests_mailimapclient_connect_bug
+{
+ note( 'Entering tests_mailimapclient_connect_bug()' ) ;
+
+ my $imap ;
+
+ # ipv6
+ ok( $imap = Mail::IMAPClient->new( ), 'mailimapclient_connect_bug ipv6: new' ) ;
+ is( 'ks2ipv6.lamiral.info', $imap->Server( 'ks2ipv6.lamiral.info' ), 'mailimapclient_connect_bug ipv6: setting Server(ks2ipv6.lamiral.info)' ) ;
+ is( 143, $imap->Port( 143 ), 'mailimapclient_connect_bug ipv6: setting Port( 993 )' ) ;
+
+ SKIP: {
+ if (
+ 'CUILLERE' eq hostname()
+ or
+ skip_macosx()
+ or
+ -e '/.dockerenv'
+ or
+ 'pcHPDV7-HP' eq hostname()
+ )
+ {
+ skip( 'Tests avoided on CUILLERE/pcHPDV7-HP/macosx.polarhome.com/docker cannot do ipv6', 1 ) ;
+ }
+ like( ref( $imap->connect( ) ), qr/IO::Socket::INET/, 'mailimapclient_connect_bug ipv6: connect to ks2ipv6.lamiral.info' )
+ or diag( 'mailimapclient_connect_bug ipv6: ', $imap->LastError( ), $!, ) ;
+ }
+ #is( $imap->logout( ), undef, 'mailimapclient_connect_bug ipv6: logout in ssl causes failure' ) ;
+ is( undef, undef $imap, 'mailimapclient_connect_bug ipv6: free variable' ) ;
+
+ note( 'Leaving tests_mailimapclient_connect_bug()' ) ;
+ return ;
+}
+
+
+
+sub tests_connect_socket
+{
+ note( 'Entering tests_connect_socket()' ) ;
+
+ is( undef, connect_socket( ), 'connect_socket: no args' ) ;
+
+ my $socket ;
+ my $imap ;
+ SKIP: {
+ if (
+ 'CUILLERE' eq hostname()
+ or
+ skip_macosx()
+ or
+ -e '/.dockerenv'
+ or
+ 'pcHPDV7-HP' eq hostname()
+ )
+ {
+ skip( 'Tests avoided on CUILLERE/pcHPDV7-HP/macosx.polarhome.com/docker cannot do ipv6', 2 ) ;
+ }
+
+ $socket = IO::Socket::INET6->new(
+ PeerAddr => 'ks2ipv6.lamiral.info',
+ PeerPort => 143,
+ ) ;
+
+
+ ok( $imap = connect_socket( $socket ), 'connect_socket: ks2ipv6.lamiral.info port 143 IO::Socket::INET6' ) ;
+ #$imap->Debug( 1 ) ;
+ # myprint( $imap->capability( ) ) ;
+ if ( $imap ) {
+ $imap->logout( ) ;
+ }
+
+ $IO::Socket::SSL::DEBUG = 4 ;
+ $socket = IO::Socket::SSL->new(
+ PeerHost => 'ks2ipv6.lamiral.info',
+ PeerPort => 993,
+ SSL_verify_mode => SSL_VERIFY_NONE,
+ SSL_cipher_list => 'DEFAULT:!DH',
+ ) ;
+ # myprint( $socket ) ;
+ ok( $imap = connect_socket( $socket ), 'connect_socket: ks2ipv6.lamiral.info port 993 IO::Socket::SSL' ) ;
+ #$imap->Debug( 1 ) ;
+ # myprint( $imap->capability( ) ) ;
+ # $socket->close( ) ;
+ if ( $imap ) {
+ $socket->close( ) ;
+ }
+ #$socket->close(SSL_no_shutdown => 1) ;
+ #$imap->logout( ) ;
+ #myprint( "\n" ) ;
+ #$imap->logout( ) ;
+ }
+ note( 'Leaving tests_connect_socket()' ) ;
+ return ;
+}
+
+sub connect_socket
+{
+ my( $socket ) = @ARG ;
+
+ if ( ! defined $socket ) { return ; }
+
+ my $host = $socket->peerhost( ) ;
+ my $port = $socket->peerport( ) ;
+ #print "socket->peerhost: ", $socket->peerhost( ), "\n" ;
+ #print "socket->peerport: ", $socket->peerport( ), "\n" ;
+ my $imap = Mail::IMAPClient->new( ) ;
+ $imap->Socket( $socket ) ;
+ my $banner = $imap->Results()->[0] ;
+ #myprint( "banner: $banner" ) ;
+ return $imap ;
+}
+
+
+sub tests_probe_imapssl
+{
+ note( 'Entering tests_probe_imapssl()' ) ;
+
+ is( undef, probe_imapssl( ), 'probe_imapssl: no args => undef' ) ;
+ is( undef, probe_imapssl( 'unknown' ), 'probe_imapssl: unknown => undef' ) ;
+
+ note( "hostname is: ", hostname() ) ;
+ SKIP: {
+ if (
+ 'CUILLERE' eq hostname()
+ or
+ skip_macosx()
+ or
+ -e '/.dockerenv'
+ or
+ 'pcHPDV7-HP' eq hostname()
+ )
+ {
+ skip( 'Tests avoided on CUILLERE or pcHPDV7-HP or Mac or docker: cannot do ipv6', 0 ) ;
+ }
+ # fed up with this one
+ #like( probe_imapssl( 'ks2ipv6.lamiral.info' ), qr/^\* OK/, 'probe_imapssl: ks2ipv6.lamiral.info matches "* OK"' ) ;
+ } ;
+
+
+ # It sounds stupid but it avoids failures on the next test about $imap->connect
+ ok( resolv( 'imap.gmail.com' ), 'resolv: imap.gmail.com => something' ) ;
+ like( probe_imapssl( 'imap.gmail.com' ), qr/^\* OK/, 'probe_imapssl: imap.gmail.com matches "* OK"' ) ;
+
+ like( probe_imapssl( 'test1.lamiral.info' ), qr/^\* OK/, 'probe_imapssl: test1.lamiral.info matches "* OK"' ) ;
+
+ note( 'Leaving tests_probe_imapssl()' ) ;
+ return ;
+}
+
+
+sub probe_imapssl
+{
+ my $host = shift ;
+
+ if ( ! $host ) { return ; }
+ $sync->{ debug } and $IO::Socket::SSL::DEBUG = 4 ;
+ my $socket = IO::Socket::SSL->new(
+ PeerHost => $host,
+ PeerPort => $IMAP_SSL_PORT,
+ SSL_verifycn_scheme => 'imap',
+ SSL_verify_mode => $SSL_VERIFY_POLICY,
+ SSL_cipher_list => 'DEFAULT:!DH',
+ ) ;
+ if ( ! $socket ) { return ; }
+ $sync->{ debug } and print "socket: $socket\n" ;
+
+ my $banner ;
+ $socket->sysread( $banner, 65_536 ) ;
+ $sync->{ debug } and print "banner: $banner" ;
+ $socket->close( ) ;
+ return $banner ;
+
+}
+
+sub connect_imap
+{
+ my( $host, $port, $mydebugimap, $ssl, $tls, $Side, $mytimeout, $h ) = @_ ;
+ my $imap = Mail::IMAPClient->new( ) ;
+
+ if ( $ssl ) { set_ssl( $imap, $h ) }
+ $imap->Server( $host ) ;
+ $imap->Port( $port ) ;
+ $imap->Debug( $mydebugimap ) ;
+ $imap->Timeout( $mytimeout ) ;
+
+ my $side = lc $Side ;
+ myprint( "$Side: connecting on $side [$host] port [$port]\n" ) ;
+
+ if ( ! $imap->connect( ) )
+ {
+ $sync->{nb_errors}++ ;
+ exit_clean( $sync, $EXIT_CONNECTION_FAILURE,
+ "$Side: Can not open imap connection on [$host]: ",
+ $imap->LastError,
+ " $OS_ERROR\n"
+ ) ;
+ }
+ myprint( "$Side IP address: ", $imap->Socket->peerhost(), "\n" ) ;
+
+ my $banner = $imap->Results()->[0] ;
+
+ myprint( "$Side banner: $banner" ) ;
+ myprint( "$Side capability: ", join(q{ }, @{ $imap->capability() || [] }), "\n" ) ;
+
+ if ( $tls ) {
+ set_tls( $imap, $h ) ;
+ if ( ! $imap->starttls( ) )
+ {
+ $sync->{nb_errors}++ ;
+ exit_clean( $sync, $EXIT_TLS_FAILURE,
+ "$Side: Can not go to tls encryption on $side [$host]:",
+ $imap->LastError, "\n"
+ ) ;
+ }
+ myprint( "$Side: Socket successfuly converted to SSL\n" ) ;
+ }
+ return( $imap ) ;
+}
+
+
+sub login_imap
+{
+
+ my @allargs = @_ ;
+ my(
+ $host, $port, $user, $domain, $password,
+ $mydebugimap, $mytimeout, $fastio,
+ $ssl, $tls, $authmech, $authuser, $reconnectretry,
+ $proxyauth, $uid, $split, $Side, $h, $mysync ) = @allargs ;
+
+ my $side = lc $Side ;
+ myprint( "$Side: connecting and login on $side [$host] port [$port] with user [$user]\n" ) ;
+
+ my $imap = init_imap( @allargs ) ;
+
+ if ( ! $imap->connect() )
+ {
+ $mysync->{nb_errors}++ ;
+ exit_clean( $mysync, $EXIT_CONNECTION_FAILURE,
+ "$Side failure: can not open imap connection on $side [$host] with user [$user]: ",
+ $imap->LastError . " $OS_ERROR\n"
+ ) ;
+ }
+ myprint( "$Side IP address: ", $imap->Socket->peerhost(), "\n" ) ;
+ my $banner = $imap->Results()->[0] ;
+
+ myprint( "$Side banner: $banner" ) ;
+ myprint( "$Side capability before authentication: ", join(q{ }, @{ $imap->capability() || [] }), "\n" ) ;
+
+ if ( (! $ssl) and (! defined $tls ) and $imap->has_capability( 'STARTTLS' ) ) {
+ myprint( "$Side: going to ssl because STARTTLS is in CAPABILITY. Use --notls1 or --notls2 to avoid that behavior\n" ) ;
+ $tls = 1 ;
+ }
+
+ if ( $authmech eq 'PREAUTH' ) {
+ if ( $imap->IsAuthenticated( ) ) {
+ $imap->Socket ;
+ myprintf("%s: Assuming PREAUTH for %s\n", $Side, $imap->Server ) ;
+ }else{
+ $mysync->{nb_errors}++ ;
+ exit_clean(
+ $mysync, $EXIT_AUTHENTICATION_FAILURE,
+ "$Side failure: error login on $side [$host] with user [$user] auth [PREAUTH]\n"
+ ) ;
+ }
+ }
+
+ if ( $tls ) {
+ set_tls( $imap, $h ) ;
+ if ( ! $imap->starttls( ) )
+ {
+ $mysync->{nb_errors}++ ;
+ exit_clean( $mysync, $EXIT_TLS_FAILURE,
+ "$Side failure: Can not go to tls encryption on $side [$host]:",
+ $imap->LastError, "\n"
+ ) ;
+ }
+ myprint( "$Side: Socket successfuly converted to SSL\n" ) ;
+ }
+
+ authenticate_imap( $imap, @allargs ) ;
+
+ myprint( "$Side: success login on [$host] with user [$user] auth [$authmech]\n" ) ;
+ return( $imap ) ;
+}
+
+
+sub authenticate_imap
+{
+ my( $imap,
+ $host, $port, $user, $domain, $password,
+ $mydebugimap, $mytimeout, $fastio,
+ $ssl, $tls, $authmech, $authuser, $reconnectretry,
+ $proxyauth, $uid, $split, $Side, $h, $mysync ) = @_ ;
+
+ check_capability( $imap, $authmech, $Side ) ;
+ $imap->User( $user ) ;
+ $imap->Domain( $domain ) if ( defined $domain ) ;
+ $imap->Authuser( $authuser ) ;
+ $imap->Password( $password ) ;
+
+ if ( 'X-MASTERAUTH' eq $authmech )
+ {
+ xmasterauth( $imap ) ;
+ return ;
+ }
+
+ if ( $proxyauth ) {
+ $imap->Authmechanism(q{}) ;
+ $imap->User( $authuser ) ;
+ } else {
+ $imap->Authmechanism( $authmech ) unless ( $authmech eq 'LOGIN' or $authmech eq 'PREAUTH' ) ;
+ }
+
+ $imap->Authcallback(\&xoauth) if ( 'XOAUTH' eq $authmech ) ;
+ $imap->Authcallback(\&xoauth2) if ( 'XOAUTH2' eq $authmech ) ;
+ $imap->Authcallback(\&plainauth) if ( ( 'PLAIN' eq $authmech ) or ( 'EXTERNAL' eq $authmech ) ) ;
+
+
+ unless ( $authmech eq 'PREAUTH' or $imap->login( ) ) {
+ my $info = "$Side failure: Error login on [$host] with user [$user] auth" ;
+ my $einfo = $imap->LastError || @{$imap->History}[$LAST] ;
+ chomp $einfo ;
+ my $error = "$info [$authmech]: $einfo\n" ;
+ if ( ( $authmech eq 'LOGIN' ) or $imap->IsUnconnected( ) or $authuser ) {
+ $authuser ||= "" ;
+ myprint( "$Side info: authmech [$authmech] user [$user] authuser [$authuser] IsUnconnected [", $imap->IsUnconnected( ), "]\n" ) ;
+ $mysync->{nb_errors}++ ;
+ exit_clean( $mysync, $EXIT_AUTHENTICATION_FAILURE, $error ) ;
+ }else{
+ myprint( $error ) ;
+ }
+ # It is not secure to try plain text LOGIN when another authmech failed
+ # but I do it.
+ # I shell remove this code one day.
+ myprint( "$Side info: trying LOGIN Auth mechanism on [$host] with user [$user]\n" ) ;
+ $imap->Authmechanism(q{}) ;
+ if ( ! $imap->login( ) )
+ {
+ $mysync->{nb_errors}++ ;
+ exit_clean( $mysync, $EXIT_AUTHENTICATION_FAILURE,
+ "$info [LOGIN]: ",
+ $imap->LastError, "\n"
+ ) ;
+ }
+ }
+
+ if ( $proxyauth ) {
+ if ( ! $imap->proxyauth( $user ) ) {
+ my $info = "$Side failure: Error doing proxyauth as user [$user] on [$host] using proxy-login as [$authuser]" ;
+ my $einfo = $imap->LastError || @{$imap->History}[$LAST] ;
+ chomp $einfo ;
+ $mysync->{nb_errors}++ ;
+ exit_clean( $mysync,
+ $EXIT_AUTHENTICATION_FAILURE,
+ "$info: $einfo\n"
+ ) ;
+ }
+ }
+
+ return ;
+}
+
+sub check_capability
+{
+
+ my( $imap, $authmech, $Side ) = @_ ;
+
+
+ if ( $imap->has_capability( "AUTH=$authmech" )
+ or $imap->has_capability( $authmech ) )
+ {
+ myprintf("%s: %s says it has CAPABILITY for AUTHENTICATE %s\n",
+ $Side, $imap->Server, $authmech) ;
+ return ;
+ }
+
+ if ( $authmech eq 'LOGIN' )
+ {
+ # Well, the warning is so common and useless that I prefer to remove it
+ # No more "... says it has NO CAPABILITY for AUTHENTICATE LOGIN"
+ return ;
+ }
+
+
+ myprintf( "%s: %s says it has NO CAPABILITY for AUTHENTICATE %s\n",
+ $Side, $imap->Server, $authmech ) ;
+
+ if ( $authmech eq 'PLAIN' )
+ {
+ myprint( "$Side: frequently PLAIN is only supported with SSL, try --ssl or --tls options\n" ) ;
+ }
+
+ return ;
+}
+
+sub set_ssl
+{
+ my ( $imap, $h ) = @_ ;
+ # SSL_version can be
+ # SSLv3 SSLv2 SSLv23 SSLv23:!SSLv2 (last one is the default in IO-Socket-SSL-1.953)
+ #
+
+ my $sslargs_hash = $h->{sslargs} ;
+
+ my $sslargs_default = {
+ SSL_verify_mode => $SSL_VERIFY_POLICY,
+ SSL_verifycn_scheme => 'imap',
+ SSL_cipher_list => 'DEFAULT:!DH',
+ } ;
+
+ # initiate with default values
+ my %sslargs_mix = %{ $sslargs_default } ;
+ # now override with passed values
+ @sslargs_mix{ keys %{ $sslargs_hash } } = values %{ $sslargs_hash } ;
+ # remove keys with undef values
+ foreach my $key ( keys %sslargs_mix ) {
+ delete $sslargs_mix{ $key } if ( not defined $sslargs_mix{ $key } ) ;
+ }
+ # back to an ARRAY
+ my @sslargs_mix = %sslargs_mix ;
+ #myprint( Data::Dumper->Dump( [ $sslargs_hash, $sslargs_default, \%sslargs_mix, \@sslargs_mix ] ) ) ;
+ $imap->Ssl( \@sslargs_mix ) ;
+ return ;
+}
+
+sub set_tls
+{
+ my ( $imap, $h ) = @_ ;
+
+ my $sslargs_hash = $h->{sslargs} ;
+
+ my $sslargs_default = {
+ SSL_verify_mode => $SSL_VERIFY_POLICY,
+ SSL_cipher_list => 'DEFAULT:!DH',
+ } ;
+
+ # initiate with default values
+ my %sslargs_mix = %{ $sslargs_default } ;
+ # now override with passed values
+ @sslargs_mix{ keys %{ $sslargs_hash } } = values %{ $sslargs_hash } ;
+ # remove keys with undef values
+ foreach my $key ( keys %sslargs_mix ) {
+ delete $sslargs_mix{ $key } if ( not defined $sslargs_mix{ $key } ) ;
+ }
+ # back to an ARRAY
+ my @sslargs_mix = %sslargs_mix ;
+
+ $imap->Starttls( \@sslargs_mix ) ;
+ return ;
+}
+
+
+
+
+sub init_imap
+{
+ my(
+ $host, $port, $user, $domain, $password,
+ $mydebugimap, $mytimeout, $fastio,
+ $ssl, $tls, $authmech, $authuser, $reconnectretry,
+ $proxyauth, $uid, $split, $Side, $h, $mysync ) = @_ ;
+
+ my ( $imap ) ;
+
+ $imap = Mail::IMAPClient->new() ;
+
+ if ( $mysync->{ tee } )
+ {
+ # Well, it does not change anything, does it?
+ # It does when suppressing the hack with *STDERR
+ $imap->Debug_fh( $mysync->{ tee } ) ;
+ }
+
+ if ( $ssl ) { set_ssl( $imap, $h ) }
+ if ( $tls ) { } # can not do set_tls() here because connect() will directly do a STARTTLS
+ $imap->Clear(1);
+ $imap->Server($host);
+ $imap->Port($port);
+ $imap->Fast_io($fastio);
+ $imap->Buffer($buffersize || $DEFAULT_BUFFER_SIZE);
+ $imap->Uid($uid);
+
+
+ $imap->Peek(1);
+ $imap->Debug($mydebugimap);
+ if ( $mysync->{ showpasswords } ) {
+ $imap->Showcredentials( 1 ) ;
+ }
+ defined $mytimeout and $imap->Timeout( $mytimeout ) ;
+
+ $imap->Reconnectretry( $reconnectretry ) if ( $reconnectretry ) ;
+ $imap->{IMAPSYNC_RECONNECT_COUNT} = 0 ;
+ $imap->Ignoresizeerrors( $allowsizemismatch ) ;
+ $split and $imap->Maxcommandlength( $SPLIT_FACTOR * $split ) ;
+
+
+ return( $imap ) ;
+
+}
+
+sub plainauth
+{
+ my $code = shift;
+ my $imap = shift;
+
+ my $string = mysprintf("%s\x00%s\x00%s", $imap->User,
+ $imap->Authuser, $imap->Password);
+ return encode_base64("$string", q{});
+}
+
+# Copy from https://github.com/imapsync/imapsync/pull/25/files
+# Changes "use" pragmas to "require".
+# The openssl system call shall be replaced by pure Perl and
+# https://metacpan.org/pod/Crypt::OpenSSL::PKCS12
+
+# Now the Joaquin Lopez code:
+#
+# Used this as an example: https://gist.github.com/gsainio/6322375
+#
+# And this as a reference: https://developers.google.com/accounts/docs/OAuth2ServiceAccount
+# (note there is an http/rest tab, where the real info is hidden away... went on a witch hunt
+# until I noticed that...)
+#
+# This is targeted at gmail to maintain compatibility after google's oauth1 service is deactivated
+# on May 5th, 2015: https://developers.google.com/gmail/oauth_protocol
+# If there are other oauth2 implementations out there, this would need to be modified to be
+# compatible
+#
+# This is a good guide on setting up the google api/apps side of the equation:
+# http://www.limilabs.com/blog/oauth2-gmail-imap-service-account
+#
+# 2016/05/27: Updated to support oauth/key data in the .json files Google now defaults to
+# when creating gmail service accounts. They're easier to work with since they neither
+# requiring decrypting nor specifying the oauth2 client id separately.
+#
+# If the password arg ends in .json, it will assume this new json method, otherwise it
+# will fallback to the "oauth client id;.p12" format it was previously using.
+sub xoauth2
+{
+ require JSON::WebToken ;
+ require LWP::UserAgent ;
+ require HTML::Entities ;
+ require JSON ;
+ require JSON::WebToken::Crypt::RSA ;
+ require Crypt::OpenSSL::RSA ;
+ require Encode::Byte ;
+ require IO::Socket::SSL ;
+
+ my $code = shift;
+ my $imap = shift;
+
+ my ($iss,$key);
+
+ if( $imap->Password =~ /^(.*\.json)$/x )
+ {
+ my $json = JSON->new( ) ;
+ my $filename = $1;
+ $sync->{ debug } and myprint( "XOAUTH2 json file: $filename\n" ) ;
+ my $FILE ;
+ if ( ! open( $FILE, '<', $filename ) )
+ {
+ $sync->{nb_errors}++ ;
+ exit_clean( $sync, $EXIT_AUTHENTICATION_FAILURE,
+ "error [$filename]: $OS_ERROR\n"
+ ) ;
+ }
+ my $jsonfile = $json->decode( join q{}, <$FILE> ) ;
+ close $FILE ;
+
+ $iss = $jsonfile->{client_id};
+ $key = $jsonfile->{private_key};
+ $sync->{ debug } and myprint( "Service account: $iss\n");
+ $sync->{ debug } and myprint( "Private key:\n$key\n");
+ }
+ else
+ {
+ # Get iss (service account address), keyfile name, and keypassword if necessary
+ ( $iss, my $keyfile, my $keypass ) = $imap->Password =~ /([\-\d\w\@\.]+);([a-zA-Z0-9 \_\-\.\/]+);?(.*)?/x ;
+
+ # Assume key password is google default if not provided
+ $keypass = 'notasecret' if not $keypass;
+
+ $sync->{ debug } and myprint( "Service account: $iss\nKey file: $keyfile\nKey password: $keypass\n");
+
+ # Get private key from p12 file (would be better in perl...)
+ $key = `openssl pkcs12 -in "$keyfile" -nodes -nocerts -passin pass:$keypass -nomacver`;
+
+ $sync->{ debug } and myprint( "Private key:\n$key\n");
+ }
+
+ # Create jwt of oauth2 request
+ my $time = time ;
+ my $jwt = JSON::WebToken->encode( {
+ 'iss' => $iss, # service account
+ 'scope' => 'https://mail.google.com/',
+ 'aud' => 'https://www.googleapis.com/oauth2/v3/token',
+ 'exp' => $time + $DEFAULT_EXPIRATION_TIME_OAUTH2_PK12,
+ 'iat' => $time,
+ 'prn' => $imap->User # user to auth as
+ },
+ $key, 'RS256', {'typ' => 'JWT'} ); # Crypt::OpenSSL::RSA needed here.
+
+ # Post oauth2 request
+ my $ua = LWP::UserAgent->new( ) ;
+ $ua->env_proxy( ) ;
+
+ my $response = $ua->post('https://www.googleapis.com/oauth2/v3/token',
+ { grant_type => HTML::Entities::encode_entities('urn:ietf:params:oauth:grant-type:jwt-bearer'),
+ assertion => $jwt } ) ;
+
+ unless( $response->is_success( ) ) {
+ $sync->{nb_errors}++ ;
+ exit_clean( $sync, $EXIT_AUTHENTICATION_FAILURE,
+ $response->code, "\n", $response->content, "\n"
+ ) ;
+ }else{
+ $sync->{ debug } and myprint( $response->content ) ;
+ }
+
+ # access_token in response is what we need
+ my $data = JSON::decode_json( $response->content ) ;
+
+ # format as oauth2 auth data
+ my $xoauth2_string = encode_base64( 'user=' . $imap->User . "\1auth=Bearer " . $data->{access_token} . "\1\1", q{} ) ;
+
+ $sync->{ debug } and myprint( "XOAUTH2 String: $xoauth2_string\n");
+ return($xoauth2_string);
+}
+
+
+
+
+# xoauth() thanks to Eduardo Bortoluzzi Junior
+sub xoauth
+{
+ require URI::Escape ;
+ require Data::Uniqid ;
+
+ my $code = shift;
+ my $imap = shift;
+
+ # The base information needed to construct the OAUTH authentication
+ my $method = 'GET' ;
+ my $url = mysprintf( 'https://mail.google.com/mail/b/%s/imap/', $imap->User ) ;
+ my $urlparm = mysprintf( 'xoauth_requestor_id=%s', URI::Escape::uri_escape( $imap->User ) ) ;
+
+ # For Google Apps, the consumer key is the primary domain
+ # TODO: create a command line argument to define the consumer key
+ my @user_parts = split /@/x, $imap->User ;
+ $sync->{ debug } and myprint( "XOAUTH: consumer key: $user_parts[1]\n" ) ;
+
+ # All the parameters needed to be signed on the XOAUTH
+ my %hash = ();
+ $hash { 'xoauth_requestor_id' } = URI::Escape::uri_escape($imap->User);
+ $hash { 'oauth_consumer_key' } = $user_parts[1];
+ $hash { 'oauth_nonce' } = md5_hex(Data::Uniqid::uniqid(rand(), 1==1));
+ $hash { 'oauth_signature_method' } = 'HMAC-SHA1';
+ $hash { 'oauth_timestamp' } = time ;
+ $hash { 'oauth_version' } = '1.0';
+
+ # Base will hold the string to be signed
+ my $base = "$method&" . URI::Escape::uri_escape( $url ) . q{&} ;
+
+ # The parameters must be in dictionary order before signing
+ my $baseparms = q{} ;
+ foreach my $key ( sort keys %hash ) {
+ if ( length( $baseparms ) > 0 ) {
+ $baseparms .= q{&} ;
+ }
+
+ $baseparms .= "$key=$hash{$key}" ;
+ }
+
+ $base .= URI::Escape::uri_escape($baseparms);
+ $sync->{ debug } and myprint( "XOAUTH: base request to sign: $base\n" ) ;
+ # Sign it with the consumer secret, informed on the command line (password)
+ my $digest = hmac_sha1( $base, URI::Escape::uri_escape( $imap->Password ) . q{&} ) ;
+
+ # The parameters signed become a parameter and...
+ $hash { 'oauth_signature' } = URI::Escape::uri_escape( substr encode_base64( $digest ), 0, $MINUS_ONE ) ;
+
+ # ... we don't need the requestor_id anymore.
+ delete $hash{'xoauth_requestor_id'} ;
+
+ # Create the final authentication string
+ my $string = $method . q{ } . $url . q{?} . $urlparm .q{ } ;
+
+ # All the parameters must be sorted
+ $baseparms = q{};
+ foreach my $key (sort keys %hash) {
+ if(length($baseparms)>0) {
+ $baseparms .= q{,} ;
+ }
+
+ $baseparms .= "$key=\"$hash{$key}\"";
+ }
+
+ $string .= $baseparms;
+
+ $sync->{ debug } and myprint( "XOAUTH: authentication string: $string\n" ) ;
+
+ # It must be base64 encoded
+ return encode_base64("$string", q{});
+}
+
+
+sub xmasterauth
+{
+ # This is Kerio auth admin
+ # This code comes from
+ # https://github.com/imapsync/imapsync/pull/53/files
+
+ my $imap = shift ;
+
+ my $user = $imap->User( ) ;
+ my $password = $imap->Password( ) ;
+ my $authmech = 'X-MASTERAUTH' ;
+
+ my @challenge = $imap->tag_and_run( $authmech, "+" ) ;
+ if ( not defined $challenge[0] )
+ {
+ $sync->{nb_errors}++ ;
+ exit_clean( $sync, $EXIT_AUTHENTICATION_FAILURE,
+ "Failure authenticate with $authmech: ",
+ $imap->LastError, "\n"
+ ) ;
+ return ; # hahaha!
+ }
+ $sync->{ debug } and myprint( "X-MASTERAUTH challenge: [@challenge]\n" ) ;
+
+ $challenge[1] =~ s/^\+ |^\s+|\s+$//g ;
+ if ( ! $imap->_imap_command( { addcrlf => 1, addtag => 0, tag => $imap->Count }, md5_hex( $challenge[1] . $password ) ) )
+ {
+ $sync->{nb_errors}++ ;
+ exit_clean( $sync, $EXIT_AUTHENTICATION_FAILURE,
+ "Failure authenticate with $authmech: ",
+ $imap->LastError, "\n"
+ ) ;
+ }
+
+ if ( ! $imap->tag_and_run( 'X-SETUSER ' . $user ) )
+ {
+ $sync->{nb_errors}++ ;
+ exit_clean( $sync, $EXIT_AUTHENTICATION_FAILURE,
+ "Failure authenticate with $authmech: ",
+ "X-SETUSER ", $imap->LastError, "\n"
+ ) ;
+ }
+
+ $imap->State( Mail::IMAPClient::Authenticated ) ;
+ # I comment this state because "Selected" state is usually done by SELECT or EXAMINE imap commands
+ # $imap->State( Mail::IMAPClient::Selected ) ;
+
+ return ;
+}
+
+
+sub tests_do_valid_directory
+{
+ note( 'Entering tests_do_valid_directory()' ) ;
+
+ Readonly my $NB_UNIX_tests_do_valid_directory => 2 ;
+ SKIP: {
+ skip( 'Tests only for Unix', $NB_UNIX_tests_do_valid_directory ) if ( 'MSWin32' eq $OSNAME ) ;
+ ok( 1 == do_valid_directory( '.'), 'do_valid_directory: . good' ) ;
+ ok( 1 == do_valid_directory( './W/tmp/tests/valid/sub'), 'do_valid_directory: ./W/tmp/tests/valid/sub good' ) ;
+ }
+ Readonly my $NB_UNIX_tests_do_valid_directory_non_root => 2 ;
+ SKIP: {
+ skip( 'Tests only for Unix', $NB_UNIX_tests_do_valid_directory_non_root ) if ( 'MSWin32' eq $OSNAME or '0' eq $EFFECTIVE_USER_ID ) ;
+ diag( 'Error / not writable is on purpose' ) ;
+ ok( 0 == do_valid_directory( '/'), 'do_valid_directory: / bad' ) ;
+ diag( 'Error permission denied on /noway is on purpose' ) ;
+ ok( 0 == do_valid_directory( '/noway'), 'do_valid_directory: /noway bad' ) ;
+ }
+
+
+ note( 'Leaving tests_do_valid_directory()' ) ;
+ return ;
+}
+
+sub banner_imapsync
+{
+ my $mysync = shift @ARG ;
+ my @argv = @ARG ;
+
+ my $banner_imapsync = join q{},
+ q{$RCSfile: imapsync,v $ },
+ q{$Revision: 1.977 $ },
+ q{$Date: 2019/12/23 20:18:02 $ },
+ "\n",
+ "Command line used, run by $EXECUTABLE_NAME:\n",
+ "$PROGRAM_NAME ", command_line_nopassword( $mysync, @argv ), "\n" ;
+
+ return( $banner_imapsync ) ;
+}
+
+sub do_valid_directory
+{
+ my $dir = shift @ARG ;
+
+ # all good => return ok.
+ return( 1 ) if ( -d $dir and -r _ and -w _ ) ;
+
+ # exist but bad
+ if ( -e $dir and not -d _ ) {
+ myprint( "Error: $dir exists but is not a directory\n" ) ;
+ return( 0 ) ;
+ }
+ if ( -e $dir and not -w _ ) {
+ my $sb = stat $dir ;
+ myprintf( "Error: directory %s is not writable for user %s, permissions are %04o and owner is %s ( uid %s )\n",
+ $dir, getpwuid_any_os( $EFFECTIVE_USER_ID ), ($sb->mode & oct($PERMISSION_FILTER) ), getpwuid_any_os( $sb->uid ), $sb->uid( ) ) ;
+ return( 0 ) ;
+ }
+ # Trying to create it
+ myprint( "Creating directory $dir\n" ) ;
+ if ( ! eval { mkpath( $dir ) } ) {
+ myprint( "$EVAL_ERROR" ) if ( $EVAL_ERROR ) ;
+ }
+ return( 1 ) if ( -d $dir and -r _ and -w _ ) ;
+ return( 0 ) ;
+}
+
+
+sub tests_match_a_pid_number
+{
+ note( 'Entering tests_match_a_pid_number()' ) ;
+
+ is( undef, match_a_pid_number( ), 'match_a_pid_number: no args => undef' ) ;
+ is( undef, match_a_pid_number( q{} ), 'match_a_pid_number: "" => undef' ) ;
+ is( undef, match_a_pid_number( 'lalala' ), 'match_a_pid_number: lalala => undef' ) ;
+ is( 1, match_a_pid_number( 1 ), 'match_a_pid_number: 1 => 1' ) ;
+ is( 1, match_a_pid_number( 123 ), 'match_a_pid_number: 123 => 1' ) ;
+ is( 1, match_a_pid_number( -123 ), 'match_a_pid_number: -123 => 1' ) ;
+ is( 1, match_a_pid_number( '123' ), 'match_a_pid_number: "123" => 1' ) ;
+ is( 1, match_a_pid_number( '-123' ), 'match_a_pid_number: "-123" => 1' ) ;
+ is( undef, match_a_pid_number( 'a123' ), 'match_a_pid_number: a123 => undef' ) ;
+ is( undef, match_a_pid_number( '-a123' ), 'match_a_pid_number: -a123 => undef' ) ;
+ is( 1, match_a_pid_number( 99999 ), 'match_a_pid_number: 99999 => 1' ) ;
+ is( 1, match_a_pid_number( -99999 ), 'match_a_pid_number: -99999 => 1' ) ;
+ is( undef, match_a_pid_number( 0 ), 'match_a_pid_number: 0 => undef' ) ;
+ is( undef, match_a_pid_number( 100000 ), 'match_a_pid_number: 100000 => undef' ) ;
+ is( undef, match_a_pid_number( 123456 ), 'match_a_pid_number: 123456 => undef' ) ;
+ is( undef, match_a_pid_number( '-0' ), 'match_a_pid_number: "-0" => undef' ) ;
+ is( undef, match_a_pid_number( -100000 ), 'match_a_pid_number: -100000 => undef' ) ;
+ is( undef, match_a_pid_number( -123456 ), 'match_a_pid_number: -123456 => undef' ) ;
+
+ note( 'Leaving tests_match_a_pid_number()' ) ;
+ return ;
+}
+
+sub match_a_pid_number
+{
+ my $pid = shift @ARG ;
+ if ( ! defined $pid ) { return ; }
+ #print "$pid\n" ;
+ if ( ! match( $pid, '^-?\d+$' ) ) { return ; }
+ #print "$pid\n" ;
+ # can be negative on Windows
+ #if ( 0 > $pid ) { return ; }
+ #if ( 65535 < $pid ) { return ; }
+ if ( 99999 < abs( $pid ) ) { return ; }
+ if ( 0 == abs( $pid ) ) { return ; }
+ return 1 ;
+}
+
+sub tests_remove_pidfile_not_running
+{
+ note( 'Entering tests_remove_pidfile_not_running()' ) ;
+
+ ok( (-d 'W/tmp/tests/' or mkpath( 'W/tmp/tests/' ) ), 'remove_pidfile_not_running: mkpath W/tmp/tests/' ) ;
+ is( undef, remove_pidfile_not_running( ), 'remove_pidfile_not_running: no args => undef' ) ;
+ is( undef, remove_pidfile_not_running( './W' ), 'remove_pidfile_not_running: a dir => undef' ) ;
+ is( undef, remove_pidfile_not_running( 'noexists' ), 'remove_pidfile_not_running: noexists => undef' ) ;
+ is( 1, touch( 'W/tmp/tests/empty.pid' ), 'remove_pidfile_not_running: prepa empty W/tmp/tests/empty.pid' ) ;
+ is( undef, remove_pidfile_not_running( 'W/tmp/tests/empty.pid' ), 'remove_pidfile_not_running: W/tmp/tests/empty.pid => undef' ) ;
+ is( 'lalala', string_to_file( 'lalala', 'W/tmp/tests/lalala.pid' ), 'remove_pidfile_not_running: prepa W/tmp/tests/lalala.pid' ) ;
+ is( undef, remove_pidfile_not_running( 'W/tmp/tests/lalala.pid' ), 'remove_pidfile_not_running: W/tmp/tests/lalala.pid => undef' ) ;
+ is( '55555', string_to_file( '55555', 'W/tmp/tests/notrunning.pid' ), 'remove_pidfile_not_running: prepa W/tmp/tests/notrunning.pid' ) ;
+ is( 1, remove_pidfile_not_running( 'W/tmp/tests/notrunning.pid' ), 'remove_pidfile_not_running: W/tmp/tests/notrunning.pid => 1' ) ;
+ is( $PROCESS_ID, string_to_file( $PROCESS_ID, 'W/tmp/tests/running.pid' ), 'remove_pidfile_not_running: prepa W/tmp/tests/running.pid' ) ;
+ is( undef, remove_pidfile_not_running( 'W/tmp/tests/running.pid' ), 'remove_pidfile_not_running: W/tmp/tests/running.pid => undef' ) ;
+
+ note( 'Leaving tests_remove_pidfile_not_running()' ) ;
+ return ;
+}
+
+sub remove_pidfile_not_running
+{
+ #
+ my $pid_filename = shift @ARG ;
+
+ if ( ! $pid_filename ) { myprint( "No variable pid_filename\n" ) ; return } ;
+ if ( ! -e $pid_filename ) { myprint( "File $pid_filename does not exist\n" ) ; return } ;
+ if ( ! -f $pid_filename ) { myprint( "File $pid_filename is not a file\n" ) ; return } ;
+
+ my $pid = firstline( $pid_filename ) ;
+ if ( ! match_a_pid_number( $pid ) ) { myprint( "pid $pid in $pid_filename is not a number\n" ) ; return } ;
+ # can't kill myself => do nothing
+ if ( ! kill 'ZERO', $PROCESS_ID ) { myprint( "Can not kill ZERO myself $PROCESS_ID\n" ) ; return } ;
+
+ # can't kill ZERO the pid => it is gone or own by another user => remove pidfile
+ if ( ! kill 'ZERO', $pid ) {
+ myprint( "Removing old $pid_filename since its PID $pid is not running anymore (oo-killed?)\n" ) ;
+ if ( unlink $pid_filename ) {
+ myprint( "Removed old $pid_filename\n" ) ;
+ return 1 ;
+ }else{
+ myprint( "Could not remove old $pid_filename because $!\n" ) ;
+ return ;
+ }
+ }
+ myprint( "Another imapsync process $pid is running as says pidfile $pid_filename\n" ) ;
+ return ;
+}
+
+
+sub tests_tail
+{
+ note( 'Entering tests_tail()' ) ;
+
+ ok( (-d 'W/tmp/tests/' or mkpath( 'W/tmp/tests/' ) ), 'tail: mkpath W/tmp/tests/' ) ;
+ ok( ( ! -e 'W/tmp/tests/tail.pid' || unlink 'W/tmp/tests/tail.pid' ), 'tail: unlink W/tmp/tests/tail.pid' ) ;
+ ok( ( ! -e 'W/tmp/tests/tail.txt' || unlink 'W/tmp/tests/tail.txt' ), 'tail: unlink W/tmp/tests/tail.txt' ) ;
+
+ is( undef, tail( ), 'tail: no args => undef' ) ;
+ my $mysync ;
+ is( undef, tail( $mysync ), 'tail: no pidfile => undef' ) ;
+
+ $mysync->{pidfile} = 'W/tmp/tests/tail.pid' ;
+ is( undef, tail( $mysync ), 'tail: no pidfilelocking => undef' ) ;
+
+ $mysync->{pidfilelocking} = 1 ;
+ is( undef, tail( $mysync ), 'tail: pidfile no exists => undef' ) ;
+
+
+ my $pidandlog = "33333\nW/tmp/tests/tail.txt\n" ;
+ is( $pidandlog, string_to_file( $pidandlog, $mysync->{pidfile} ), 'tail: put pid 33333 and tail.txt in pidfile' ) ;
+ is( undef, tail( $mysync ), 'tail: logfile to tail no exists => undef' ) ;
+
+ my $tailcontent = "L1\nL2\nL3\nL4\nL5\n" ;
+ is( $tailcontent, string_to_file( $tailcontent, 'W/tmp/tests/tail.txt' ),
+ 'tail: put L1\nL2\nL3\nL4\nL5\n in W/tmp/tests/tail.txt' ) ;
+
+ is( undef, tail( $mysync ), 'tail: fake pid in pidfile + tail off => 1' ) ;
+
+ $mysync->{ tail } = 1 ;
+ is( 1, tail( $mysync ), 'tail: fake pid in pidfile + tail on=> 1' ) ;
+
+ # put my own pid, won't do tail
+ $pidandlog = "$PROCESS_ID\nW/tmp/tests/tail.txt\n" ;
+ is( $pidandlog, string_to_file( $pidandlog, $mysync->{pidfile} ), 'tail: put my own PID in pidfile' ) ;
+ is( undef, tail( $mysync ), 'tail: my own pid in pidfile => undef' ) ;
+
+ note( 'Leaving tests_tail()' ) ;
+ return ;
+}
+
+
+
+sub tail
+{
+ # return undef on failures
+ # return 1 on success
+
+ my $mysync = shift ;
+
+ # no tail when aborting!
+ if ( $mysync->{ abort } ) { return ; }
+
+ my $pidfile = $mysync->{pidfile} ;
+ my $lock = $mysync->{pidfilelocking} ;
+ my $tail = $mysync->{tail} ;
+
+ if ( ! $pidfile ) { return ; }
+ if ( ! $lock ) { return ; }
+ if ( ! $tail ) { return ; }
+
+ my $pidtotail = firstline( $pidfile ) ;
+ if ( ! $pidtotail ) { return ; }
+
+
+
+ # It should not happen but who knows...
+ if ( $pidtotail eq $PROCESS_ID ) { return ; }
+
+
+ my $filetotail = secondline( $pidfile ) ;
+ if ( ! $filetotail ) { return ; }
+
+ if ( ! -r $filetotail )
+ {
+ #myprint( "Error: can not read $filetotail\n" ) ;
+ return ;
+ }
+
+ myprint( "Doing a tail -f on $filetotail for processus pid $pidtotail until it is finished.\n" ) ;
+ my $file = File::Tail->new(
+ name => $filetotail,
+ nowait => 1,
+ interval => 1,
+ tail => 1,
+ adjustafter => 2
+ );
+
+ my $moretimes = 200 ;
+ # print one line at least
+ my $line = $file->read ;
+ myprint( $line ) ;
+ while ( isrunning( $pidtotail, \$moretimes ) and defined( $line = $file->read ) )
+ {
+ myprint( $line );
+ sleep( 0.02 ) ;
+ }
+
+ return 1 ;
+}
+
+sub isrunning
+{
+ my $pidtocheck = shift ;
+ my $moretimes_ref = shift ;
+
+ if ( kill 'ZERO', $pidtocheck )
+ {
+ #myprint( "$pidtocheck running\n" ) ;
+ return 1 ;
+ }
+ elsif ( $$moretimes_ref >= 0 )
+ {
+ # continue to consider it running
+ $$moretimes_ref-- ;
+ return 1 ;
+ }
+ else
+ {
+ myprint( "Tailed processus $pidtocheck ended\n" ) ;
+ return ;
+ }
+}
+
+sub tests_write_pidfile
+{
+ note( 'Entering tests_write_pidfile()' ) ;
+
+ my $mysync ;
+
+ is( 1, write_pidfile( ), 'write_pidfile: no args => 1' ) ;
+
+ # no pidfile => ok
+ $mysync->{pidfile} = q{} ;
+ is( 1, write_pidfile( $mysync ), 'write_pidfile: no pidfile => undef' ) ;
+
+ # The pidfile path is bad => failure
+ $mysync->{pidfile} = '/no/no/no.pid' ;
+ is( undef, write_pidfile( $mysync ), 'write_pidfile: no permission for /no/no/no.pid, no lock => undef' ) ;
+
+ $mysync->{pidfilelocking} = 1 ;
+ is( undef, write_pidfile( $mysync ), 'write_pidfile: no permission for /no/no/no.pid + lock => undef' ) ;
+
+ $mysync->{pidfile} = 'W/tmp/tests/test.pid' ;
+ ok( (-d 'W/tmp/tests/' or mkpath( 'W/tmp/tests/' ) ), 'write_pidfile: mkpath W/tmp/tests/' ) ;
+ is( 1, touch( $mysync->{pidfile} ), 'write_pidfile: lock prepa' ) ;
+
+ $mysync->{pidfilelocking} = 0 ;
+ is( 1, write_pidfile( $mysync ), 'write_pidfile: W/tmp/tests/test.pid + no lock => 1' ) ;
+ is( $PROCESS_ID, firstline( 'W/tmp/tests/test.pid' ), "write_pidfile: W/tmp/tests/test.pid contains $PROCESS_ID" ) ;
+ is( q{}, secondline( 'W/tmp/tests/test.pid' ), "write_pidfile: W/tmp/tests/test.pid contains no second line" ) ;
+
+ $mysync->{pidfilelocking} = 1 ;
+ is( undef, write_pidfile( $mysync ), 'write_pidfile: W/tmp/tests/test.pid + lock => undef' ) ;
+
+
+ $mysync->{pidfilelocking} = 0 ;
+ $mysync->{ logfile } = 'rrrr.txt' ;
+ is( 1, write_pidfile( $mysync ), 'write_pidfile: W/tmp/tests/test.pid + no lock + logfile => 1' ) ;
+ is( $PROCESS_ID, firstline( 'W/tmp/tests/test.pid' ), "write_pidfile: + no lock + logfile W/tmp/tests/test.pid contains $PROCESS_ID" ) ;
+ is( q{rrrr.txt}, secondline( 'W/tmp/tests/test.pid' ), "write_pidfile: + no lock + logfile W/tmp/tests/test.pid contains rrrr.txt" ) ;
+
+
+ note( 'Leaving tests_write_pidfile()' ) ;
+ return ;
+}
+
+
+
+sub write_pidfile
+{
+ # returns undef if something is considered fatal
+ # returns 1 otherwise
+
+ if ( ! @ARG ) { return 1 ; }
+
+ my $mysync = shift @ARG ;
+
+ # Do not write the pid file if this process goal is to abort the process designed by the pid file
+ if ( $mysync->{abort} ) { return 1 ; }
+
+ #
+ my $pid_filename = $mysync->{ pidfile } ;
+ my $lock = $mysync->{ pidfilelocking } ;
+
+ if ( ! $pid_filename )
+ {
+ myprint( "PID file is unset ( to set it, use --pidfile filepath ; to avoid it use --pidfile \"\" )\n" ) ;
+ return( 1 ) ;
+ }
+
+ myprint( "PID file is $pid_filename ( to change it, use --pidfile filepath ; to avoid it use --pidfile \"\" )\n" ) ;
+ if ( -e $pid_filename and $lock ) {
+ myprint( "$pid_filename already exists, another imapsync may be curently running. Aborting imapsync.\n" ) ;
+ return ;
+
+ }
+
+ if ( -e $pid_filename ) {
+ myprint( "$pid_filename already exists, overwriting it ( use --pidfilelocking to avoid concurrent runs )\n" ) ;
+ }
+
+ my $pid_string = "$PROCESS_ID\n" ;
+ my $pid_message = "Writing my PID $PROCESS_ID in $pid_filename\n" ;
+
+ if ( $mysync->{ logfile } )
+ {
+ $pid_string .= "$mysync->{ logfile }\n" ;
+ $pid_message .= "Writing also my logfile name in $pid_filename : $mysync->{ logfile }\n" ;
+ }
+
+ if ( open my $FILE_HANDLE, '>', $pid_filename ) {
+ myprint( $pid_message ) ;
+ print $FILE_HANDLE $pid_string ;
+ close $FILE_HANDLE ;
+ return( 1 ) ;
+ }
+ else
+ {
+ myprint( "Could not open $pid_filename for writing. Check permissions or disk space: $OS_ERROR\n" ) ;
+ return ;
+ }
+}
+
+
+sub fix_Inbox_INBOX_mapping
+{
+ my( $h1_all, $h2_all ) = @_ ;
+
+ my $regex = q{} ;
+ SWITCH: {
+ if ( exists $h1_all->{INBOX} and exists $h2_all->{INBOX} ) { $regex = q{} ; last SWITCH ; } ;
+ if ( exists $h1_all->{Inbox} and exists $h2_all->{Inbox} ) { $regex = q{} ; last SWITCH ; } ;
+ if ( exists $h1_all->{INBOX} and exists $h2_all->{Inbox} ) { $regex = q{s/^INBOX$/Inbox/x} ; last SWITCH ; } ;
+ if ( exists $h1_all->{Inbox} and exists $h2_all->{INBOX} ) { $regex = q{s/^Inbox$/INBOX/x} ; last SWITCH ; } ;
+ } ;
+ return( $regex ) ;
+}
+
+sub tests_fix_Inbox_INBOX_mapping
+{
+ note( 'Entering tests_fix_Inbox_INBOX_mapping()' ) ;
+
+
+ my( $h1_all, $h2_all ) ;
+
+ $h1_all = { 'INBOX' => q{} } ;
+ $h2_all = { 'INBOX' => q{} } ;
+ ok( q{} eq fix_Inbox_INBOX_mapping( $h1_all, $h2_all ), 'fix_Inbox_INBOX_mapping: INBOX INBOX' ) ;
+
+ $h1_all = { 'Inbox' => q{} } ;
+ $h2_all = { 'Inbox' => q{} } ;
+ ok( q{} eq fix_Inbox_INBOX_mapping( $h1_all, $h2_all ), 'fix_Inbox_INBOX_mapping: Inbox Inbox' ) ;
+
+ $h1_all = { 'INBOX' => q{} } ;
+ $h2_all = { 'Inbox' => q{} } ;
+ ok( q{s/^INBOX$/Inbox/x} eq fix_Inbox_INBOX_mapping( $h1_all, $h2_all ), 'fix_Inbox_INBOX_mapping: INBOX Inbox' ) ;
+
+ $h1_all = { 'Inbox' => q{} } ;
+ $h2_all = { 'INBOX' => q{} } ;
+ ok( q{s/^Inbox$/INBOX/x} eq fix_Inbox_INBOX_mapping( $h1_all, $h2_all ), 'fix_Inbox_INBOX_mapping: Inbox INBOX' ) ;
+
+ $h1_all = { 'INBOX' => q{} } ;
+ $h2_all = { 'rrrrr' => q{} } ;
+ ok( q{} eq fix_Inbox_INBOX_mapping( $h1_all, $h2_all ), 'fix_Inbox_INBOX_mapping: INBOX rrrrrr' ) ;
+
+ $h1_all = { 'rrrrr' => q{} } ;
+ $h2_all = { 'Inbox' => q{} } ;
+ ok( q{} eq fix_Inbox_INBOX_mapping( $h1_all, $h2_all ), 'fix_Inbox_INBOX_mapping: rrrrr Inbox' ) ;
+
+ note( 'Leaving tests_fix_Inbox_INBOX_mapping()' ) ;
+ return ;
+}
+
+
+sub jux_utf8_list
+{
+ my @s_inp = @_ ;
+ my $s_out = q{} ;
+ foreach my $s ( @s_inp ) {
+ $s_out .= jux_utf8( $s ) . "\n" ;
+ }
+ return( $s_out ) ;
+}
+
+sub tests_jux_utf8_list
+{
+ note( 'Entering tests_jux_utf8_list()' ) ;
+
+ use utf8 ;
+ is( q{}, jux_utf8_list( ), 'jux_utf8_list: void' ) ;
+ is( "[]\n", jux_utf8_list( q{} ), 'jux_utf8_list: empty string' ) ;
+ is( "[INBOX]\n", jux_utf8_list( 'INBOX' ), 'jux_utf8_list: INBOX' ) ;
+ is( "[&ANY-] = [Ö]\n", jux_utf8_list( '&ANY-' ), 'jux_utf8_list: [&ANY-] = [Ö]' ) ;
+
+ note( 'Leaving tests_jux_utf8_list()' ) ;
+ return( 0 ) ;
+}
+
+# editing utf8 can be tricky without an utf8 editor
+sub tests_jux_utf8_old
+{
+ note( 'Entering tests_jux_utf8_old()' ) ;
+
+ no utf8 ;
+
+ is( '[]', jux_utf8_old( q{} ), 'jux_utf8_old: void => []' ) ;
+ is( '[INBOX]', jux_utf8_old( 'INBOX'), 'jux_utf8_old: INBOX => [INBOX]' ) ;
+ is( '[&ZTZO9nux-] = [收件箱]', jux_utf8_old( '&ZTZO9nux-'), 'jux_utf8_old: => [&ZTZO9nux-] = [收件箱]' ) ;
+ is( '[&ANY-] = [Ö]', jux_utf8_old( '&ANY-'), 'jux_utf8_old: &ANY- => [&ANY-] = [Ö]' ) ;
+ # +BD8EQAQ1BDQEOwQ+BDM- SHOULD stay as is!
+ is( '[+BD8EQAQ1BDQEOwQ+BDM-] = [предлог]', jux_utf8_old( '+BD8EQAQ1BDQEOwQ+BDM-' ), 'jux_utf8_old: => [+BD8EQAQ1BDQEOwQ+BDM-] = [предлог]' ) ;
+ is( '[&BB8EQAQ+BDUEOgRC-] = [Проект]', jux_utf8_old( '&BB8EQAQ+BDUEOgRC-' ), 'jux_utf8_old: => [&BB8EQAQ+BDUEOgRC-] = [Проект]' ) ;
+
+ note( 'Leaving tests_jux_utf8_old()' ) ;
+ return ;
+}
+
+sub jux_utf8_old
+{
+ # juxtapose utf8 at the right if different
+ my ( $s_utf7 ) = shift ;
+ my ( $s_utf8 ) = imap_utf7_decode_old( $s_utf7 ) ;
+
+ if ( $s_utf7 eq $s_utf8 ) {
+ #myprint( "[$s_utf7]\n" ) ;
+ return( "[$s_utf7]" ) ;
+ }else{
+ #myprint( "[$s_utf7] = [$s_utf8]\n" ) ;
+ return( "[$s_utf7] = [$s_utf8]" ) ;
+ }
+}
+
+# Copied from http://cpansearch.perl.org/src/FABPOT/Unicode-IMAPUtf7-2.01/lib/Unicode/IMAPUtf7.pm
+# and then fixed with
+# https://rt.cpan.org/Public/Bug/Display.html?id=11172
+sub imap_utf7_decode_old
+{
+ my ( $s ) = shift ;
+
+ # Algorithm
+ # On remplace , par / dans les BASE 64 (, entre & et -)
+ # On remplace les &, non suivi d'un - par +
+ # On remplace les &- par &
+ $s =~ s/&([^,&\-]*),([^,\-&]*)\-/&$1\/$2\-/xg ;
+ $s =~ s/&(?!\-)/\+/xg ;
+ $s =~ s/&\-/&/xg ;
+ return( Unicode::String::utf7( $s )->utf8 ) ;
+}
+
+
+
+
+
+sub tests_jux_utf8
+{
+ note( 'Entering tests_jux_utf8()' ) ;
+ #no utf8 ;
+ use utf8 ;
+
+ #binmode STDOUT, ":encoding(UTF-8)" ;
+ binmode STDERR, ":encoding(UTF-8)" ;
+
+ # This test is because the binary can fail on it, a PAR.pm issue.
+ # The failure was with the underlying Encode::IMAPUTF7 module line 66 release 1.05
+ # Was solved by including Encode in imapsync and using "pp -x".
+ ok( find_encoding( "UTF-16BE"), 'jux_utf8: Encode::find_encoding: UTF-16BE' ) ;
+
+ #
+ is( '[]', jux_utf8( q{} ), 'jux_utf8: void => []' ) ;
+ is( '[INBOX]', jux_utf8( 'INBOX'), 'jux_utf8: INBOX => [INBOX]' ) ;
+ is( '[&ANY-] = [Ö]', jux_utf8( '&ANY-'), 'jux_utf8: &ANY- => [&ANY-] = [Ö]' ) ;
+ # +BD8EQAQ1BDQEOwQ+BDM- must stay as is
+ is( '[+BD8EQAQ1BDQEOwQ+BDM-]', jux_utf8( '+BD8EQAQ1BDQEOwQ+BDM-' ), 'jux_utf8: => [+BD8EQAQ1BDQEOwQ+BDM-] = [+BD8EQAQ1BDQEOwQ+BDM-]' ) ;
+ is( '[&BB8EQAQ+BDUEOgRC-] = [Проект]', jux_utf8( '&BB8EQAQ+BDUEOgRC-' ), 'jux_utf8: => [&BB8EQAQ+BDUEOgRC-] = [Проект]' ) ;
+
+ is( '[R&AOk-ponses 1200+1201+1202] = [Réponses 1200+1201+1202]', jux_utf8( q{R&AOk-ponses 1200+1201+1202} ), 'jux_utf8: [R&AOk-ponses 1200+1201+1202] = [Réponses 1200+1201+1202]' ) ;
+ my $str = Encode::IMAPUTF7::encode("IMAP-UTF-7", 'Réponses 1200+1201+1202' ) ;
+ is( '[R&AOk-ponses 1200+1201+1202] = [Réponses 1200+1201+1202]', jux_utf8( $str ), "jux_utf8: [$str] = [Réponses 1200+1201+1202]" ) ;
+
+ is( '[INBOX.&AOkA4ADnAPk-&-*] = [INBOX.éà çù&*]', jux_utf8( 'INBOX.&AOkA4ADnAPk-&-*' ), "jux_utf8: [INBOX.&AOkA4ADnAPk-&-*] = [INBOX.éà çù&*]" ) ;
+
+ is( '[&ZTZO9nux-] = [收件箱]', jux_utf8( '&ZTZO9nux-'), 'jux_utf8: => [&ZTZO9nux-] = [收件箱]' ) ;
+ #
+ note( 'Leaving tests_jux_utf8()' ) ;
+ return ;
+}
+
+sub jux_utf8
+{
+ #use utf8 ;
+ # juxtapose utf8 at the right if different
+ my ( $s_utf7 ) = shift ;
+ my ( $s_utf8 ) = imap_utf7_decode( $s_utf7 ) ;
+
+ if ( $s_utf7 eq $s_utf8 ) {
+ #myprint( "[$s_utf7]\n" ) ;
+ return( "[$s_utf7]" ) ;
+ }else{
+ #myprint( "[$s_utf7] = [$s_utf8]\n" ) ;
+ return( "[$s_utf7] = [$s_utf8]" ) ;
+ }
+}
+
+sub imap_utf7_decode
+{
+ #use utf8 ;
+ my ( $s ) = shift ;
+ return( Encode::IMAPUTF7::decode("IMAP-UTF-7", $s ) ) ;
+}
+
+sub imap_utf7_encode
+{
+ #use utf8 ;
+ my ( $s ) = shift ;
+ return( Encode::IMAPUTF7::encode("IMAP-UTF-7", $s ) ) ;
+}
+
+
+
+sub imap_utf7_encode_old
+{
+ my ( $s ) = @_ ;
+
+ $s = Unicode::String::utf8( $s )->utf7 ;
+
+ $s =~ s/\+([^\/&\-]*)\/([^\/\-&]*)\-/\+$1,$2\-/xg ;
+ $s =~ s/&/&\-/xg ;
+ $s =~ s/\+([^+\-]+)?\-/&$1\-/xg ;
+ return( $s ) ;
+}
+
+
+
+
+sub select_folder
+{
+ my ( $mysync, $imap, $folder, $hostside ) = @_ ;
+ if ( ! $imap->select( $folder ) ) {
+ my $error = join q{},
+ "$hostside folder $folder: Could not select: ",
+ $imap->LastError, "\n" ;
+ errors_incr( $mysync, $error ) ;
+ return( 0 ) ;
+ }else{
+ # ok select succeeded
+ return( 1 ) ;
+ }
+}
+
+sub examine_folder
+{
+ my ( $mysync, $imap, $folder, $hostside ) = @_ ;
+ if ( ! $imap->examine( $folder ) ) {
+ my $error = join q{},
+ "$hostside folder $folder: Could not examine: ",
+ $imap->LastError, "\n" ;
+ errors_incr( $mysync, $error ) ;
+ return( 0 ) ;
+ }else{
+ # ok select succeeded
+ return( 1 ) ;
+ }
+}
+
+
+sub count_from_select
+{
+ my @lines = @ARG ;
+ my $count ;
+ foreach my $line ( @lines ) {
+ #myprint( "line = [$line]\n" ) ;
+ if ( $line =~ m/^\*\s+(\d+)\s+EXISTS/x ) {
+ $count = $1 ;
+ return( $count ) ;
+ }
+ }
+ return( undef ) ;
+}
+
+
+
+sub create_folder_old
+{
+ my $mysync = shift @ARG ;
+ my( $imap, $h2_fold, $h1_fold ) = @ARG ;
+
+ myprint( "Creating (old way) folder [$h2_fold] on host2\n" ) ;
+ if ( ( 'INBOX' eq uc $h2_fold )
+ and ( $imap->exists( $h2_fold ) ) ) {
+ myprint( "Folder [$h2_fold] already exists\n" ) ;
+ return( 1 ) ;
+ }
+ if ( ! $mysync->{dry} ){
+ if ( ! $imap->create( $h2_fold ) ) {
+ my $error = join q{},
+ "Could not create folder [$h2_fold] from [$h1_fold]: ",
+ $imap->LastError( ), "\n" ;
+ errors_incr( $mysync, $error ) ;
+ # success if folder exists ("already exists" error)
+ return( 1 ) if $imap->exists( $h2_fold ) ;
+ # failure since create failed
+ return( 0 ) ;
+ }else{
+ #create succeeded
+ myprint( "Created ( the old way ) folder [$h2_fold] on host2\n" ) ;
+ return( 1 ) ;
+ }
+ }else{
+ # dry mode, no folder so many imap will fail, assuming failure
+ myprint( "Created ( the old way ) folder [$h2_fold] on host2 $mysync->{dry_message}\n" ) ;
+ return( 0 ) ;
+ }
+}
+
+
+sub create_folder
+{
+ my $mysync = shift @ARG ;
+ my( $myimap2 , $h2_fold , $h1_fold ) = @ARG ;
+ my( @parts , $parent ) ;
+
+ if ( $myimap2->IsUnconnected( ) ) {
+ myprint( "Host2: Unconnected state\n" ) ;
+ return( 0 ) ;
+ }
+
+ if ( $create_folder_old ) {
+ return( create_folder_old( $mysync, $myimap2 , $h2_fold , $h1_fold ) ) ;
+ }
+ myprint( "Creating folder [$h2_fold] on host2\n" ) ;
+ if ( ( 'INBOX' eq uc $h2_fold )
+ and ( $myimap2->exists( $h2_fold ) ) ) {
+ myprint( "Folder [$h2_fold] already exists\n" ) ;
+ return( 1 ) ;
+ }
+
+ if ( $mixfolders and $myimap2->exists( $h2_fold ) ) {
+ myprint( "Folder [$h2_fold] already exists (--nomixfolders is not set)\n" ) ;
+ return( 1 ) ;
+ }
+
+
+ if ( ( not $mixfolders ) and ( $myimap2->exists( $h2_fold ) ) ) {
+ myprint( "Folder [$h2_fold] already exists and --nomixfolders is set\n" ) ;
+ return( 0 ) ;
+ }
+
+ @parts = split /\Q$mysync->{ h2_sep }\E/x, $h2_fold ;
+ pop @parts ;
+ $parent = join $mysync->{ h2_sep }, @parts ;
+ $parent =~ s/^\s+|\s+$//xg ;
+ if ( ( $parent ne q{} ) and ( ! $myimap2->exists( $parent ) ) ) {
+ create_folder( $mysync, $myimap2 , $parent , $h1_fold ) ;
+ }
+
+ if ( ! $mysync->{dry} ) {
+ if ( ! $myimap2->create( $h2_fold ) ) {
+ my $error = join q{},
+ "Could not create folder [$h2_fold] from [$h1_fold]: " ,
+ $myimap2->LastError( ), "\n" ;
+ errors_incr( $mysync, $error ) ;
+ # success if folder exists ("already exists" error)
+ return( 1 ) if $myimap2->exists( $h2_fold ) ;
+ # failure since create failed
+ return( 0 ) ;
+ }else{
+ #create succeeded
+ myprint( "Created folder [$h2_fold] on host2\n" ) ;
+ return( 1 ) ;
+ }
+ }else{
+ # dry mode, no folder so many imap will fail, assuming failure
+ myprint( "Created folder [$h2_fold] on host2 $mysync->{dry_message}\n" ) ;
+ if ( ! $mysync->{ justfolders } ) {
+ myprint( "Since --dry mode is on and folder [$h2_fold] on host2 does not exist yet, syncing messages will not be simulated.\n"
+ . "To simulate message syncing, use --justfolders without --dry to first create the missing folders then rerun the --dry sync.\n" ) ;
+ }
+ return( 0 ) ;
+ }
+}
+
+
+
+sub tests_folder_routines
+{
+ note( 'Entering tests_folder_routines()' ) ;
+
+ ok( !is_requested_folder('folder_foo'), 'is_requested_folder folder_foo 1' );
+ ok( add_to_requested_folders('folder_foo'), 'add_to_requested_folders folder_foo' );
+ ok( is_requested_folder('folder_foo'), 'is_requested_folder folder_foo 2' );
+ ok( !is_requested_folder('folder_NO_EXIST'), 'is_requested_folder folder_NO_EXIST' );
+
+ is_deeply( [ 'folder_foo' ], [ remove_from_requested_folders( 'folder_foo' ) ], 'removed folder_foo => folder_foo' ) ;
+ ok( !is_requested_folder('folder_foo'), 'is_requested_folder folder_foo 3' );
+ my @f ;
+ ok( @f = add_to_requested_folders('folder_bar', 'folder_toto'), "add result: @f" );
+ ok( is_requested_folder('folder_bar'), 'is_requested_folder 4' );
+ ok( is_requested_folder('folder_toto'), 'is_requested_folder 5' );
+ ok( remove_from_requested_folders('folder_toto'), 'remove_from_requested_folders: ' );
+ ok( !is_requested_folder('folder_toto'), 'is_requested_folder 6' );
+
+ is_deeply( [ 'folder_bar' ], [ remove_from_requested_folders('folder_bar') ], 'remove_from_requested_folders: empty' ) ;
+
+ ok( 0 == compare_lists( [ sort_requested_folders( ) ], [] ), 'sort_requested_folders: all empty' ) ;
+ ok( add_to_requested_folders( 'A_99', 'M_55', 'Z_11' ), 'add_to_requested_folders M_55 Z_11' );
+ ok( 0 == compare_lists( [ sort_requested_folders( ) ], [ 'A_99', 'M_55', 'Z_11' ] ), 'sort_requested_folders: middle' ) ;
+
+
+ @folderfirst = ( 'Z_11' ) ;
+
+ ok( 0 == compare_lists( [ sort_requested_folders( ) ], [ 'Z_11', 'A_99', 'M_55' ] ), 'sort_requested_folders: first+middle' ) ;
+
+ is_deeply( [ 'Z_11', 'A_99', 'M_55' ], [ sort_requested_folders( ) ], 'sort_requested_folders: first+middle is_deeply' ) ;
+
+ @folderlast = ( 'A_99' ) ;
+ ok( 0 == compare_lists( [ sort_requested_folders( ) ], [ 'Z_11', 'M_55', 'A_99' ] ), 'sort_requested_folders: first+middle+last 1' ) ;
+
+ ok( add_to_requested_folders('M_55', 'M_44',), 'add_to_requested_folders M_55 M_44' ) ;
+
+ ok( 0 == compare_lists( [ sort_requested_folders( ) ], [ 'Z_11', 'M_44', 'M_55', 'A_99'] ), 'sort_requested_folders: first+middle+last 2' ) ;
+
+
+ ok( add_to_requested_folders('A_88', 'Z_22',), 'add_to_requested_folders A_88 Z_22' ) ;
+ @folderfirst = qw( Z_22 Z_11 ) ;
+ @folderlast = qw( A_99 A_88 ) ;
+ ok( 0 == compare_lists( [ sort_requested_folders( ) ], [ 'Z_22', 'Z_11', 'M_44', 'M_55', 'A_99', 'A_88' ] ), 'sort_requested_folders: first+middle+last 3' ) ;
+ undef @folderfirst ;
+ undef @folderlast ;
+
+ note( 'Leaving tests_folder_routines()' ) ;
+ return ;
+}
+
+
+sub sort_requested_folders
+{
+ my @requested_folders_sorted = () ;
+
+ #myprint "folderfirst: @folderfirst\n" ;
+ my @folderfirst_requested = remove_from_requested_folders( @folderfirst ) ;
+ #myprint "folderfirst_requested: @folderfirst_requested\n" ;
+
+ my @folderlast_requested = remove_from_requested_folders( @folderlast ) ;
+
+ my @middle = sort keys %requested_folder ;
+
+ @requested_folders_sorted = ( @folderfirst_requested, @middle, @folderlast_requested ) ;
+ #myprint "requested_folders_sorted: @requested_folders_sorted\n" ;
+ add_to_requested_folders( @requested_folders_sorted ) ;
+
+ return( @requested_folders_sorted ) ;
+}
+
+sub is_requested_folder
+{
+ my ( $folder ) = @_;
+
+ return( defined $requested_folder{ $folder } ) ;
+}
+
+
+sub add_to_requested_folders
+{
+ my @wanted_folders = @_ ;
+
+ foreach my $folder ( @wanted_folders ) {
+ ++$requested_folder{ $folder } ;
+ }
+ return( keys %requested_folder ) ;
+}
+
+sub tests_remove_from_requested_folders
+{
+ note( 'Entering tests_remove_from_requested_folders()' ) ;
+
+ is( undef, undef, 'remove_from_requested_folders: undef is undef' ) ;
+ is_deeply( [], [ remove_from_requested_folders( ) ], 'remove_from_requested_folders: no args' ) ;
+ %requested_folder = (
+ 'F1' => 1,
+ ) ;
+ is_deeply( [], [ remove_from_requested_folders( ) ], 'remove_from_requested_folders: remove nothing among F1 => nothing' ) ;
+ is_deeply( [], [ remove_from_requested_folders( 'Fno' ) ], 'remove_from_requested_folders: remove Fno among F1 => nothing' ) ;
+ is_deeply( [ 'F1' ], [ remove_from_requested_folders( 'F1' ) ], 'remove_from_requested_folders: remove F1 among F1 => F1' ) ;
+ is_deeply( { }, { %requested_folder }, 'remove_from_requested_folders: remove F1 among F1 => %requested_folder emptied' ) ;
+
+ %requested_folder = (
+ 'F1' => 1,
+ 'F2' => 1,
+ ) ;
+ is_deeply( [], [ remove_from_requested_folders( ) ], 'remove_from_requested_folders: remove nothing among F1 F2 => nothing' ) ;
+ is_deeply( [], [ remove_from_requested_folders( 'Fno' ) ], 'remove_from_requested_folders: remove Fno among F1 F2 => nothing' ) ;
+ is_deeply( [ 'F1' ], [ remove_from_requested_folders( 'F1' ) ], 'remove_from_requested_folders: remove F1 among F1 F2 => F1' ) ;
+ is_deeply( { 'F2' => 1 }, { %requested_folder }, 'remove_from_requested_folders: remove F1 among F1 F2 => %requested_folder F2' ) ;
+
+ is_deeply( [], [ remove_from_requested_folders( 'F1' ) ], 'remove_from_requested_folders: remove F1 among F2 => nothing' ) ;
+ is_deeply( [ 'F2' ], [ remove_from_requested_folders( 'F1', 'F2' ) ], 'remove_from_requested_folders: remove F1 F2 among F2 => F2' ) ;
+ is_deeply( {}, { %requested_folder }, 'remove_from_requested_folders: remove F1 among F1 F2 => %requested_folder F2' ) ;
+
+ %requested_folder = (
+ 'F1' => 1,
+ 'F2' => 1,
+ 'F3' => 1,
+ ) ;
+ is_deeply( [ 'F1', 'F2' ], [ remove_from_requested_folders( 'F1', 'F2' ) ], 'remove_from_requested_folders: remove F1 F2 among F1 F2 F3 => F1 F2' ) ;
+ is_deeply( { 'F3' => 1 }, { %requested_folder }, 'remove_from_requested_folders: remove F1 F2 among F1 F2 F3 => %requested_folder F3' ) ;
+
+
+
+ note( 'Leaving tests_remove_from_requested_folders()' ) ;
+ return ;
+}
+
+
+sub remove_from_requested_folders
+{
+ my @unwanted_folders = @_ ;
+
+ my @removed_folders = () ;
+ foreach my $folder ( @unwanted_folders ) {
+ if ( exists $requested_folder{ $folder } )
+ {
+ delete $requested_folder{ $folder } ;
+ push @removed_folders, $folder ;
+ }
+ }
+ return( @removed_folders ) ;
+}
+
+sub compare_lists
+{
+ my ($list_1_ref, $list_2_ref) = @_;
+
+ return($MINUS_ONE) if ((not defined $list_1_ref) and defined $list_2_ref);
+ return(0) if ((not defined $list_1_ref) and not defined $list_2_ref); # end if no list
+ return(1) if (not defined $list_2_ref); # end if only one list
+
+ if (not ref $list_1_ref ) {$list_1_ref = [$list_1_ref]};
+ if (not ref $list_2_ref ) {$list_2_ref = [$list_2_ref]};
+
+
+ my $last_used_indice = $MINUS_ONE;
+
+
+ ELEMENT:
+ foreach my $indice ( 0 .. $#{ $list_1_ref } ) {
+ $last_used_indice = $indice ;
+
+ # End of list_2
+ return 1 if ($indice > $#{ $list_2_ref } ) ;
+
+ my $element_list_1 = $list_1_ref->[$indice] ;
+ my $element_list_2 = $list_2_ref->[$indice] ;
+ my $balance = $element_list_1 cmp $element_list_2 ;
+ next ELEMENT if ($balance == 0) ;
+ return $balance ;
+ }
+ # each element equal until last indice of list_1
+ return $MINUS_ONE if ($last_used_indice < $#{ $list_2_ref } ) ;
+
+ # same size, each element equal
+ return 0 ;
+}
+
+sub tests_compare_lists
+{
+ note( 'Entering tests_compare_lists()' ) ;
+
+ my $empty_list_ref = [];
+
+ ok( 0 == compare_lists() , 'compare_lists, no args');
+ ok( 0 == compare_lists(undef) , 'compare_lists, undef = nothing');
+ ok( 0 == compare_lists(undef, undef) , 'compare_lists, undef = undef');
+ ok($MINUS_ONE == compare_lists(undef , []) , 'compare_lists, undef < []');
+ ok($MINUS_ONE == compare_lists(undef , [1]) , 'compare_lists, undef < [1]');
+ ok($MINUS_ONE == compare_lists(undef , [0]) , 'compare_lists, undef < [0]');
+ ok(+1 == compare_lists([]) , 'compare_lists, [] > nothing');
+ ok(+1 == compare_lists([], undef) , 'compare_lists, [] > undef');
+ ok( 0 == compare_lists([] , []) , 'compare_lists, [] = []');
+
+ ok($MINUS_ONE == compare_lists([] , [1]) , 'compare_lists, [] < [1]');
+ ok(+1 == compare_lists([1] , []) , 'compare_lists, [1] > []');
+
+
+ ok( 0 == compare_lists([1], 1 ) , 'compare_lists, [1] = 1 ') ;
+ ok( 0 == compare_lists( 1 , [1]) , 'compare_lists, 1 = [1]') ;
+ ok( 0 == compare_lists( 1 , 1 ) , 'compare_lists, 1 = 1 ') ;
+ ok($MINUS_ONE == compare_lists( 0 , 1 ) , 'compare_lists, 0 < 1 ') ;
+ ok($MINUS_ONE == compare_lists($MINUS_ONE , 0 ) , 'compare_lists, -1 < 0 ') ;
+ ok($MINUS_ONE == compare_lists( 1 , 2 ) , 'compare_lists, 1 < 2 ') ;
+ ok(+1 == compare_lists( 2 , 1 ) , 'compare_lists, 2 > 1 ') ;
+
+
+ ok( 0 == compare_lists([1,2], [1,2]) , 'compare_lists, [1,2] = [1,2]' ) ;
+ ok($MINUS_ONE == compare_lists([1], [1,2]) , 'compare_lists, [1] < [1,2]' ) ;
+ ok(+1 == compare_lists([2], [1,2]) , 'compare_lists, [2] > [1,2]' ) ;
+ ok($MINUS_ONE == compare_lists([1], [1,1]) , 'compare_lists, [1] < [1,1]' ) ;
+ ok(+1 == compare_lists([1, 1], [1]) , 'compare_lists, [1, 1] > [1]' ) ;
+ ok( 0 == compare_lists([1 .. $NUMBER_20_000] , [1 .. $NUMBER_20_000])
+ , 'compare_lists, [1..20_000] = [1..20_000]' ) ;
+ ok($MINUS_ONE == compare_lists([1], [2]) , 'compare_lists, [1] < [2]') ;
+ ok( 0 == compare_lists([2], [2]) , 'compare_lists, [0] = [2]') ;
+ ok(+1 == compare_lists([2], [1]) , 'compare_lists, [2] > [1]') ;
+
+ ok($MINUS_ONE == compare_lists(['a'], ['b']) , 'compare_lists, ["a"] < ["b"]') ;
+ ok( 0 == compare_lists(['a'], ['a']) , 'compare_lists, ["a"] = ["a"]') ;
+ ok( 0 == compare_lists(['ab'], ['ab']) , 'compare_lists, ["ab"] = ["ab"]') ;
+ ok(+1 == compare_lists(['b'], ['a']) , 'compare_lists, ["b"] > ["a"]') ;
+ ok($MINUS_ONE == compare_lists(['a'], ['aa']) , 'compare_lists, ["a"] < ["aa"]') ;
+ ok($MINUS_ONE == compare_lists(['a'], ['a', 'a']), 'compare_lists, ["a"] < ["a", "a"]') ;
+ ok( 0 == compare_lists([split q{ }, 'a b' ], ['a', 'b']), 'compare_lists, split') ;
+ ok( 0 == compare_lists([sort split q{ }, 'b a' ], ['a', 'b']), 'compare_lists, sort split') ;
+
+ note( 'Leaving tests_compare_lists()' ) ;
+ return ;
+}
+
+
+sub guess_prefix
+{
+ my @foldernames = @_ ;
+
+ my $prefix_guessed = q{} ;
+ foreach my $folder ( @foldernames ) {
+ next if ( $folder =~ m{^INBOX$}xi ) ; # no guessing from INBOX
+ if ( $folder !~ m{^INBOX}xi ) {
+ $prefix_guessed = q{} ; # prefix empty guessed
+ last ;
+ }
+ if ( $folder =~ m{^(INBOX(?:\.|\/))}xi ) {
+ $prefix_guessed = $1 ; # prefix Inbox/ or INBOX. guessed
+ }
+ }
+ return( $prefix_guessed ) ;
+}
+
+sub tests_guess_prefix
+{
+ note( 'Entering tests_guess_prefix()' ) ;
+
+ is( guess_prefix( ), q{}, 'guess_prefix: no args => empty string' ) ;
+ is( q{} , guess_prefix( 'INBOX' ), 'guess_prefix: INBOX alone' ) ;
+ is( q{} , guess_prefix( 'Inbox' ), 'guess_prefix: Inbox alone' ) ;
+ is( q{} , guess_prefix( 'INBOX' ), 'guess_prefix: INBOX alone' ) ;
+ is( 'INBOX/' , guess_prefix( 'INBOX', 'INBOX/Junk' ), 'guess_prefix: INBOX INBOX/Junk' ) ;
+ is( 'INBOX.' , guess_prefix( 'INBOX', 'INBOX.Junk' ), 'guess_prefix: INBOX INBOX.Junk' ) ;
+ is( 'Inbox/' , guess_prefix( 'Inbox', 'Inbox/Junk' ), 'guess_prefix: Inbox Inbox/Junk' ) ;
+ is( 'Inbox.' , guess_prefix( 'Inbox', 'Inbox.Junk' ), 'guess_prefix: Inbox Inbox.Junk' ) ;
+ is( 'INBOX/' , guess_prefix( 'INBOX', 'INBOX/Junk', 'INBOX/rrr' ), 'guess_prefix: INBOX INBOX/Junk INBOX/rrr' ) ;
+ is( q{} , guess_prefix( 'INBOX', 'INBOX/Junk', 'INBOX/rrr', 'zzz' ), 'guess_prefix: INBOX INBOX/Junk INBOX/rrr zzz' ) ;
+ is( q{} , guess_prefix( 'INBOX', 'Junk' ), 'guess_prefix: INBOX Junk' ) ;
+ is( q{} , guess_prefix( 'INBOX', 'Junk' ), 'guess_prefix: INBOX Junk' ) ;
+
+ note( 'Leaving tests_guess_prefix()' ) ;
+ return ;
+}
+
+sub get_prefix
+{
+ my( $imap, $prefix_in, $prefix_opt, $Side, $folders_ref ) = @_ ;
+ my( $prefix_out, $prefix_guessed ) ;
+
+ ( $sync->{ debug } or $sync->{debugfolders} ) and myprint( "$Side: Getting prefix\n" ) ;
+ $prefix_guessed = guess_prefix( @{ $folders_ref } ) ;
+ myprint( "$Side: guessing prefix from folder listing: [$prefix_guessed]\n" ) ;
+ ( $sync->{ debug } or $sync->{debugfolders} ) and myprint( "$Side: Calling namespace capability\n" ) ;
+ if ( $imap->has_capability( 'namespace' ) ) {
+ my $r_namespace = $imap->namespace( ) ;
+ $prefix_out = $r_namespace->[0][0][0] ;
+ myprint( "$Side: prefix given by NAMESPACE: [$prefix_out]\n" ) ;
+ if ( defined $prefix_in ) {
+ myprint( "$Side: but using [$prefix_in] given by $prefix_opt\n" ) ;
+ $prefix_out = $prefix_in ;
+ return( $prefix_out ) ;
+ }else{
+ # all good
+ return( $prefix_out ) ;
+ }
+ }
+ else{
+ if ( defined $prefix_in ) {
+ myprint( "$Side: using [$prefix_in] given by $prefix_opt\n" ) ;
+ $prefix_out = $prefix_in ;
+ return( $prefix_out ) ;
+ }else{
+ myprint(
+ "$Side: No NAMESPACE capability so using guessed prefix [$prefix_guessed]\n",
+ help_to_guess_prefix( $imap, $prefix_opt ) ) ;
+ return( $prefix_guessed ) ;
+ }
+ }
+ return ;
+}
+
+
+sub guess_separator
+{
+ my @foldernames = @_ ;
+
+ #return( undef ) unless ( @foldernames ) ;
+
+ my $sep_guessed ;
+ my %counter ;
+ foreach my $folder ( @foldernames ) {
+ $counter{'/'}++ while ( $folder =~ m{/}xg ) ; # count /
+ $counter{'.'}++ while ( $folder =~ m{\.}xg ) ; # count .
+ $counter{'\\\\'}++ while ( $folder =~ m{(\\){2}}xg ) ; # count \\
+ $counter{'\\'}++ while ( $folder =~ m{[^\\](\\){1}(?=[^\\])}xg ) ; # count \
+ }
+ my @race_sorted = sort { $counter{ $b } <=> $counter{ $a } } keys %counter ;
+ $sync->{ debug } and myprint( "@foldernames\n@race_sorted\n", %counter, "\n" ) ;
+ $sep_guessed = shift @race_sorted || $LAST_RESSORT_SEPARATOR ; # / when nothing found.
+ return( $sep_guessed ) ;
+}
+
+sub tests_guess_separator
+{
+ note( 'Entering tests_guess_separator()' ) ;
+
+ ok( '/' eq guess_separator( ), 'guess_separator: no args' ) ;
+ ok( '/' eq guess_separator( 'abcd' ), 'guess_separator: abcd' ) ;
+ ok( '/' eq guess_separator( 'a/b/c.d' ), 'guess_separator: a/b/c.d' ) ;
+ ok( '.' eq guess_separator( 'a.b/c.d' ), 'guess_separator: a.b/c.d' ) ;
+ ok( '\\\\' eq guess_separator( 'a\\\\b\\\\c.c\\\\d/e/f' ), 'guess_separator: a\\\\b\\\\c.c\\\\d/e/f' ) ;
+ ok( '\\' eq guess_separator( 'a\\b\\c.c\\d/e/f' ), 'guess_separator: a\\b\\c.c\\d/e/f' ) ;
+ ok( '\\' eq guess_separator( 'a\\b' ), 'guess_separator: a\\b' ) ;
+ ok( '\\' eq guess_separator( 'a\\b\\c' ), 'guess_separator: a\\b\\c' ) ;
+
+ note( 'Leaving tests_guess_separator()' ) ;
+ return ;
+}
+
+sub get_separator
+{
+ my( $imap, $sep_in, $sep_opt, $Side, $folders_ref ) = @_ ;
+ my( $sep_out, $sep_guessed ) ;
+
+ ( $sync->{ debug } or $sync->{debugfolders} ) and myprint( "$Side: Getting separator\n" ) ;
+ $sep_guessed = guess_separator( @{ $folders_ref } ) ;
+ myprint( "$Side: guessing separator from folder listing: [$sep_guessed]\n" ) ;
+
+ ( $sync->{ debug } or $sync->{debugfolders} ) and myprint( "$Side: calling namespace capability\n" ) ;
+ if ( $imap->has_capability( 'namespace' ) )
+ {
+ $sep_out = $imap->separator( ) ;
+ if ( defined $sep_out ) {
+ myprint( "$Side: separator given by NAMESPACE: [$sep_out]\n" ) ;
+ if ( defined $sep_in ) {
+ myprint( "$Side: but using [$sep_in] given by $sep_opt\n" ) ;
+ $sep_out = $sep_in ;
+ return( $sep_out ) ;
+ }else{
+ return( $sep_out ) ;
+ }
+ }else{
+ if ( defined $sep_in ) {
+ myprint( "$Side: NAMESPACE request failed but using [$sep_in] given by $sep_opt\n" ) ;
+ $sep_out = $sep_in ;
+ return( $sep_out ) ;
+ }else{
+ myprint(
+ "$Side: NAMESPACE request failed so using guessed separator [$sep_guessed]\n",
+ help_to_guess_sep( $imap, $sep_opt ) ) ;
+ return( $sep_guessed ) ;
+ }
+ }
+ }
+ else
+ {
+ if ( defined $sep_in ) {
+ myprint( "$Side: No NAMESPACE capability but using [$sep_in] given by $sep_opt\n" ) ;
+ $sep_out = $sep_in ;
+ return( $sep_out ) ;
+ }else{
+ myprint(
+ "$Side: No NAMESPACE capability, so using guessed separator [$sep_guessed]\n",
+ help_to_guess_sep( $imap, $sep_opt ) ) ;
+ return( $sep_guessed ) ;
+ }
+ }
+ return ;
+}
+
+sub help_to_guess_sep
+{
+ my( $imap, $sep_opt ) = @_ ;
+
+ my $help_to_guess_sep = "You can set the separator character with the $sep_opt option,\n"
+ . "the complete listing of folders may help you to find it\n"
+ . folders_list_to_help( $imap ) ;
+
+ return( $help_to_guess_sep ) ;
+}
+
+sub help_to_guess_prefix
+{
+ my( $imap, $prefix_opt ) = @_ ;
+
+ my $help_to_guess_prefix = "You can set the prefix namespace with the $prefix_opt option,\n"
+ . "the folowing listing of folders may help you to find it:\n"
+ . folders_list_to_help( $imap ) ;
+
+ return( $help_to_guess_prefix ) ;
+}
+
+
+sub folders_list_to_help
+{
+ my( $imap ) = shift ;
+
+ my @folders = $imap->folders ;
+ my $listing = join q{}, map { "[$_]\n" } @folders ;
+ return( $listing ) ;
+}
+
+sub private_folders_separators_and_prefixes
+{
+# what are the private folders separators and prefixes for each server ?
+
+ ( $sync->{ debug } or $sync->{debugfolders} ) and myprint( "Getting separators\n" ) ;
+ $sync->{ h1_sep } = get_separator( $sync->{imap1}, $sync->{ sep1 }, '--sep1', 'Host1', \@h1_folders_all ) ;
+ $sync->{ h2_sep } = get_separator( $sync->{imap2}, $sync->{ sep2 }, '--sep2', 'Host2', \@h2_folders_all ) ;
+
+
+ $sync->{ h1_prefix } = get_prefix( $sync->{imap1}, $prefix1, '--prefix1', 'Host1', \@h1_folders_all ) ;
+ $sync->{ h2_prefix } = get_prefix( $sync->{imap2}, $prefix2, '--prefix2', 'Host2', \@h2_folders_all ) ;
+
+ myprint( "Host1: separator and prefix: [$sync->{ h1_sep }][$sync->{ h1_prefix }]\n" ) ;
+ myprint( "Host2: separator and prefix: [$sync->{ h2_sep }][$sync->{ h2_prefix }]\n" ) ;
+ return ;
+}
+
+
+sub subfolder1
+{
+ my $mysync = shift ;
+ my $subfolder1 = sanitize_subfolder( $mysync->{ subfolder1 } ) ;
+
+ if ( $subfolder1 )
+ {
+ # turns off automap
+ myprint( "Turning off automapping folders because of --subfolder1\n" ) ;
+ $mysync->{ automap } = undef ;
+ myprint( "Sanitizing subfolder1: [$mysync->{ subfolder1 }] => [$subfolder1]\n" ) ;
+ $mysync->{ subfolder1 } = $subfolder1 ;
+ if ( ! add_subfolder1_to_folderrec( $mysync ) )
+ {
+ $mysync->{nb_errors}++ ;
+ exit_clean( $mysync, $EXIT_SUBFOLDER1_NO_EXISTS, "subfolder1 $subfolder1 does not exist\n" ) ;
+ }
+ }
+ else
+ {
+ $mysync->{ subfolder1 } = undef ;
+ }
+
+ return ;
+}
+
+sub subfolder2
+{
+ my $mysync = shift ;
+ my $subfolder2 = sanitize_subfolder( $mysync->{ subfolder2 } ) ;
+ if ( $subfolder2 )
+ {
+ # turns off automap
+ myprint( "Turning off automapping folders because of --subfolder2\n" ) ;
+ $mysync->{ automap } = undef ;
+ myprint( "Sanitizing subfolder2: [$mysync->{ subfolder2 }] => [$subfolder2]\n" ) ;
+ $mysync->{ subfolder2 } = $subfolder2 ;
+ set_regextrans2_for_subfolder2( $mysync ) ;
+ }
+ else
+ {
+ $mysync->{ subfolder2 } = undef ;
+ }
+
+ return ;
+}
+
+sub tests_sanitize_subfolder
+{
+ note( 'Entering tests_sanitize_subfolder()' ) ;
+
+ is( undef, sanitize_subfolder( ), 'sanitize_subfolder: no args => undef' ) ;
+ is( undef, sanitize_subfolder( q{} ), 'sanitize_subfolder: empty => undef' ) ;
+ is( undef, sanitize_subfolder( ' ' ), 'sanitize_subfolder: blank => undef' ) ;
+ is( undef, sanitize_subfolder( ' ' ), 'sanitize_subfolder: blanks => undef' ) ;
+ is( 'abcd', sanitize_subfolder( 'abcd' ), 'sanitize_subfolder: abcd => abcd' ) ;
+ is( 'ab cd', sanitize_subfolder( ' ab cd ' ), 'sanitize_subfolder: " ab cd " => "ab cd"' ) ;
+ is( 'abcd', sanitize_subfolder( q{a&~b#\\c[]=d;} ), 'sanitize_subfolder: "a&~b#\\c[]=d;" => "abcd"' ) ;
+ is( 'aA.b-_ 8c/dD', sanitize_subfolder( 'aA.b-_ 8c/dD' ), 'sanitize_subfolder: aA.b-_ 8c/dD => aA.b-_ 8c/dD' ) ;
+ note( 'Leaving tests_sanitize_subfolder()' ) ;
+ return ;
+}
+
+
+sub sanitize_subfolder
+{
+ my $subfolder = shift ;
+
+ if ( ! $subfolder )
+ {
+ return ;
+ }
+ # Remove edging blanks
+ $subfolder =~ s,^ +| +$,,g ;
+ # Keep only abcd...ABCD...0123... and -_./
+ $subfolder =~ tr,-_a-zA-Z0-9./ ,,cd ;
+
+ # A blank subfolder is not a subfolder
+ if ( ! $subfolder )
+ {
+ return ;
+ }
+ else
+ {
+ return $subfolder ;
+ }
+}
+
+
+
+
+
+sub tests_add_subfolder1_to_folderrec
+{
+ note( 'Entering tests_add_subfolder1_to_folderrec()' ) ;
+
+ is( undef, add_subfolder1_to_folderrec( ), 'add_subfolder1_to_folderrec: undef => undef' ) ;
+ is_deeply( [], [ add_subfolder1_to_folderrec( ) ], 'add_subfolder1_to_folderrec: no args => empty array' ) ;
+ @folderrec = () ;
+ my $mysync = {} ;
+ is_deeply( [ ], [ add_subfolder1_to_folderrec( $mysync ) ], 'add_subfolder1_to_folderrec: empty => empty array' ) ;
+ is_deeply( [ ], [ @folderrec ], 'add_subfolder1_to_folderrec: empty => empty folderrec' ) ;
+ $mysync->{ subfolder1 } = 'SUBI' ;
+ $h1_folders_all{ 'SUBI' } = 1 ;
+ $mysync->{ h1_prefix } = 'INBOX/' ;
+ is_deeply( [ 'SUBI' ], [ add_subfolder1_to_folderrec( $mysync ) ], 'add_subfolder1_to_folderrec: SUBI => SUBI' ) ;
+ is_deeply( [ 'SUBI' ], [ @folderrec ], 'add_subfolder1_to_folderrec: SUBI => folderrec SUBI ' ) ;
+
+ @folderrec = () ;
+ $mysync->{ subfolder1 } = 'SUBO' ;
+ is_deeply( [ ], [ add_subfolder1_to_folderrec( $mysync ) ], 'add_subfolder1_to_folderrec: SUBO no exists => empty array' ) ;
+ is_deeply( [ ], [ @folderrec ], 'add_subfolder1_to_folderrec: SUBO no exists => empty folderrec' ) ;
+ $h1_folders_all{ 'INBOX/SUBO' } = 1 ;
+ is_deeply( [ 'INBOX/SUBO' ], [ add_subfolder1_to_folderrec( $mysync ) ], 'add_subfolder1_to_folderrec: SUBO + INBOX/SUBO exists => INBOX/SUBO' ) ;
+ is_deeply( [ 'INBOX/SUBO' ], [ @folderrec ], 'add_subfolder1_to_folderrec: SUBO + INBOX/SUBO exists => INBOX/SUBO folderrec' ) ;
+
+ note( 'Leaving tests_add_subfolder1_to_folderrec()' ) ;
+ return ;
+}
+
+
+sub add_subfolder1_to_folderrec
+{
+ my $mysync = shift ;
+ if ( ! $mysync || ! $mysync->{ subfolder1 } )
+ {
+ return ;
+ }
+
+ my $subfolder1 = $mysync->{ subfolder1 } ;
+ my $subfolder1_extended = $mysync->{ h1_prefix } . $subfolder1 ;
+
+ if ( exists $h1_folders_all{ $subfolder1 } )
+ {
+ myprint( qq{Acting like --folderrec "$subfolder1"\n} ) ;
+ push @folderrec, $subfolder1 ;
+ }
+ elsif ( exists $h1_folders_all{ $subfolder1_extended } )
+ {
+ myprint( qq{Acting like --folderrec "$subfolder1_extended"\n} ) ;
+ push @folderrec, $subfolder1_extended ;
+ }
+ else
+ {
+ myprint( qq{Nor folder "$subfolder1" nor "$subfolder1_extended" exists on host1\n} ) ;
+ }
+ return @folderrec ;
+}
+
+sub set_regextrans2_for_subfolder2
+{
+ my $mysync = shift ;
+
+
+ unshift @{ $mysync->{ regextrans2 } },
+ q(s,^$mysync->{ h2_prefix }(.*),$mysync->{ h2_prefix }$mysync->{ subfolder2 }$mysync->{ h2_sep }$1,),
+ q(s,^INBOX$,$mysync->{ h2_prefix }$mysync->{ subfolder2 }$mysync->{ h2_sep }INBOX,),
+ q(s,^($mysync->{ h2_prefix }){2},$mysync->{ h2_prefix },);
+
+ #myprint( "@{ $mysync->{ regextrans2 } }\n" ) ;
+ return ;
+}
+
+
+
+# Looks like no globals here
+
+sub tests_imap2_folder_name
+{
+ note( 'Entering tests_imap2_folder_name()' ) ;
+
+ my $mysync = {} ;
+ $mysync->{ h1_prefix } = q{} ;
+ $mysync->{ h2_prefix } = q{} ;
+ $mysync->{ h1_sep } = '/';
+ $mysync->{ h2_sep } = '.';
+
+ $mysync->{ debug } and myprint( <<"EOS"
+prefix1: [$mysync->{ h1_prefix }]
+prefix2: [$mysync->{ h2_prefix }]
+sep1: [$sync->{ h1_sep }]
+sep2: [$sync->{ h2_sep }]
+EOS
+) ;
+
+ $mysync->{ fixslash2 } = 0 ;
+ is( q{INBOX}, imap2_folder_name( $mysync, q{} ), 'imap2_folder_name: empty string' ) ;
+ is( 'blabla', imap2_folder_name( $mysync, 'blabla' ), 'imap2_folder_name: blabla' ) ;
+ is('spam.spam', imap2_folder_name( $mysync, 'spam/spam' ), 'imap2_folder_name: spam/spam' ) ;
+
+ is( 'spam/spam', imap2_folder_name( $mysync, 'spam.spam' ), 'imap2_folder_name: spam.spam') ;
+ is( 'spam.spam/spam', imap2_folder_name( $mysync, 'spam/spam.spam' ), 'imap2_folder_name: spam/spam.spam' ) ;
+ is( 's pam.spam/sp am', imap2_folder_name( $mysync, 's pam/spam.sp am' ), 'imap2_folder_name: s pam/spam.sp am' ) ;
+
+ $mysync->{f1f2h}{ 'auto' } = 'moto' ;
+ is( 'moto', imap2_folder_name( $mysync, 'auto' ), 'imap2_folder_name: auto' ) ;
+ $mysync->{f1f2h}{ 'auto/auto' } = 'moto x 2' ;
+ is( 'moto x 2', imap2_folder_name( $mysync, 'auto/auto' ), 'imap2_folder_name: auto/auto' ) ;
+
+ @{ $mysync->{ regextrans2 } } = ( 's,/,X,g' ) ;
+ is( q{INBOX}, imap2_folder_name( $mysync, q{} ), 'imap2_folder_name: empty string [s,/,X,g]' ) ;
+ is( 'blabla', imap2_folder_name( $mysync, 'blabla' ), 'imap2_folder_name: blabla [s,/,X,g]' ) ;
+ is('spam.spam', imap2_folder_name( $mysync, 'spam/spam'), 'imap2_folder_name: spam/spam [s,/,X,g]');
+ is('spamXspam', imap2_folder_name( $mysync, 'spam.spam'), 'imap2_folder_name: spam.spam [s,/,X,g]');
+ is('spam.spamXspam', imap2_folder_name( $mysync, 'spam/spam.spam'), 'imap2_folder_name: spam/spam.spam [s,/,X,g]');
+
+ @{ $mysync->{ regextrans2 } } = ( 's, ,_,g' ) ;
+ is('blabla', imap2_folder_name( $mysync, 'blabla'), 'imap2_folder_name: blabla [s, ,_,g]');
+ is('bla_bla', imap2_folder_name( $mysync, 'bla bla'), 'imap2_folder_name: blabla [s, ,_,g]');
+
+ @{ $mysync->{ regextrans2 } } = ( q{s,(.*),\U$1,} ) ;
+ is( 'BLABLA', imap2_folder_name( $mysync, 'blabla' ), q{imap2_folder_name: blabla [s,\U(.*)\E,$1,]} ) ;
+
+ $mysync->{ fixslash2 } = 1 ;
+ @{ $mysync->{ regextrans2 } } = ( ) ;
+ is(q{INBOX}, imap2_folder_name( $mysync, q{}), 'imap2_folder_name: empty string');
+ is('blabla', imap2_folder_name( $mysync, 'blabla'), 'imap2_folder_name: blabla');
+ is('spam.spam', imap2_folder_name( $mysync, 'spam/spam'), 'imap2_folder_name: spam/spam -> spam.spam');
+ is('spam_spam', imap2_folder_name( $mysync, 'spam.spam'), 'imap2_folder_name: spam.spam -> spam_spam');
+ is('spam.spam_spam', imap2_folder_name( $mysync, 'spam/spam.spam'), 'imap2_folder_name: spam/spam.spam -> spam.spam_spam');
+ is('s pam.spam_spa m', imap2_folder_name( $mysync, 's pam/spam.spa m'), 'imap2_folder_name: s pam/spam.spa m -> s pam.spam_spa m');
+
+ $mysync->{ h1_sep } = '.';
+ $mysync->{ h2_sep } = '/';
+ is( q{INBOX}, imap2_folder_name( $mysync, q{}), 'imap2_folder_name: empty string');
+ is('blabla', imap2_folder_name( $mysync, 'blabla'), 'imap2_folder_name: blabla');
+ is('spam.spam', imap2_folder_name( $mysync, 'spam/spam'), 'imap2_folder_name: spam/spam -> spam.spam');
+ is('spam/spam', imap2_folder_name( $mysync, 'spam.spam'), 'imap2_folder_name: spam.spam -> spam/spam');
+ is('spam.spam/spam', imap2_folder_name( $mysync, 'spam/spam.spam'), 'imap2_folder_name: spam/spam.spam -> spam.spam/spam');
+
+
+
+ $mysync->{ fixslash2 } = 0 ;
+ $mysync->{ h1_prefix } = q{ };
+
+ is( 'spam.spam/spam', imap2_folder_name( $mysync, 'spam/spam.spam' ), 'imap2_folder_name: spam/spam.spam -> spam.spam/spam' ) ;
+ is( 'spam.spam/spam', imap2_folder_name( $mysync, ' spam/spam.spam' ), 'imap2_folder_name: spam/spam.spam -> spam.spam/spam' ) ;
+
+ $mysync->{ h1_sep } = '.' ;
+ $mysync->{ h2_sep } = '/' ;
+ $mysync->{ h1_prefix } = 'INBOX.' ;
+ $mysync->{ h2_prefix } = q{} ;
+ @{ $mysync->{ regextrans2 } } = ( q{s,(.*),\U$1,} ) ;
+ is( 'BLABLA', imap2_folder_name( $mysync, 'blabla' ), 'imap2_folder_name: blabla' ) ;
+ is( 'TEST/TEST/TEST/TEST', imap2_folder_name( $mysync, 'INBOX.TEST.test.Test.tesT' ), 'imap2_folder_name: INBOX.TEST.test.Test.tesT' ) ;
+ @{ $mysync->{ regextrans2 } } = ( q{s,(.*),\L$1,} ) ;
+ is( 'test/test/test/test', imap2_folder_name( $mysync, 'INBOX.TEST.test.Test.tesT' ), 'imap2_folder_name: INBOX.TEST.test.Test.tesT' ) ;
+
+ # INBOX
+ $mysync = {} ;
+ $mysync->{ h1_prefix } = q{Pf1.} ;
+ $mysync->{ h2_prefix } = q{Pf2/} ;
+ $mysync->{ h1_sep } = '.';
+ $mysync->{ h2_sep } = '/';
+
+ #
+ #$mysync->{ debug } = 1 ;
+ is( 'Pf2/F1/F2/F3', imap2_folder_name( $mysync, 'F1.F2.F3' ), 'imap2_folder_name: F1.F2.F3 -> Pf2/F1/F2/F3' ) ;
+ is( 'Pf2/F1/INBOX', imap2_folder_name( $mysync, 'F1.INBOX' ), 'imap2_folder_name: F1.INBOX -> Pf2/F1/INBOX' ) ;
+ is( 'INBOX', imap2_folder_name( $mysync, 'INBOX' ), 'imap2_folder_name: INBOX -> INBOX' ) ;
+
+ is( 'Pf2/F1/F2/F3', imap2_folder_name( $mysync, 'Pf1.F1.F2.F3' ), 'imap2_folder_name: Pf1.F1.F2.F3 -> Pf2/F1/F2/F3' ) ;
+ is( 'Pf2/F1/INBOX', imap2_folder_name( $mysync, 'Pf1.F1.INBOX' ), 'imap2_folder_name: Pf1.F1.INBOX -> Pf2/F1/INBOX' ) ;
+ is( 'INBOX', imap2_folder_name( $mysync, 'Pf1.INBOX' ), 'imap2_folder_name: Pf1.INBOX -> INBOX' ) ; # not Pf2/INBOX: Yes I can!
+
+
+
+ # subfolder2
+ $mysync = {} ;
+ $mysync->{ h1_prefix } = q{} ;
+ $mysync->{ h2_prefix } = q{} ;
+ $mysync->{ h1_sep } = '/';
+ $mysync->{ h2_sep } = '.';
+
+
+ set_regextrans2_for_subfolder2( $mysync ) ;
+ $mysync->{ subfolder2 } = 'S1.S2' ;
+ is( 'S1.S2.F1.F2.F3', imap2_folder_name( $mysync, 'F1/F2/F3' ), 'imap2_folder_name: F1/F2/F3 -> S1.S2.F1.F2.F3' ) ;
+ is( 'S1.S2.INBOX', imap2_folder_name( $mysync, 'INBOX' ), 'imap2_folder_name: F1/F2/F3 -> S1.S2.INBOX' ) ;
+
+ $mysync = {} ;
+ $mysync->{ h1_prefix } = q{Pf1/} ;
+ $mysync->{ h2_prefix } = q{Pf2.} ;
+ $mysync->{ h1_sep } = '/';
+ $mysync->{ h2_sep } = '.';
+ #$mysync->{ debug } = 1 ;
+
+ set_regextrans2_for_subfolder2( $mysync ) ;
+ $mysync->{ subfolder2 } = 'Pf2.S1.S2' ;
+ is( 'Pf2.S1.S2.F1.F2.F3', imap2_folder_name( $mysync, 'F1/F2/F3' ), 'imap2_folder_name: F1/F2/F3 -> Pf2.S1.S2.F1.F2.F3' ) ;
+ is( 'Pf2.S1.S2.INBOX', imap2_folder_name( $mysync, 'INBOX' ), 'imap2_folder_name: INBOX -> Pf2.S1.S2.INBOX' ) ;
+ is( 'Pf2.S1.S2.F1.F2.F3', imap2_folder_name( $mysync, 'Pf1/F1/F2/F3' ), 'imap2_folder_name: F1/F2/F3 -> Pf2.S1.S2.F1.F2.F3' ) ;
+ is( 'Pf2.S1.S2.INBOX', imap2_folder_name( $mysync, 'Pf1/INBOX' ), 'imap2_folder_name: INBOX -> Pf2.S1.S2.INBOX' ) ;
+
+ # subfolder1
+ # scenario as the reverse of the previous tests, separators point of vue
+ $mysync = {} ;
+ $mysync->{ h1_prefix } = q{Pf1.} ;
+ $mysync->{ h2_prefix } = q{Pf2/} ;
+ $mysync->{ h1_sep } = '.';
+ $mysync->{ h2_sep } = '/';
+ #$mysync->{ debug } = 1 ;
+
+ $mysync->{ subfolder1 } = 'S1.S2' ;
+ is( 'Pf2/F1/F2/F3', imap2_folder_name( $mysync, 'S1.S2.F1.F2.F3' ), 'imap2_folder_name: S1.S2.F1.F2.F3 -> Pf2/F1/F2/F3' ) ;
+ is( 'Pf2/F1/F2/F3', imap2_folder_name( $mysync, 'Pf1.S1.S2.F1.F2.F3' ), 'imap2_folder_name: Pf1.S1.S2.F1.F2.F3 -> Pf2/F1/F2/F3' ) ;
+
+ is( 'INBOX', imap2_folder_name( $mysync, 'S1.S2.INBOX' ), 'imap2_folder_name: S1.S2.INBOX -> INBOX' ) ;
+ is( 'INBOX', imap2_folder_name( $mysync, 'S1.S2' ), 'imap2_folder_name: S1.S2 -> INBOX' ) ;
+ is( 'INBOX', imap2_folder_name( $mysync, 'S1.S2.' ), 'imap2_folder_name: S1.S2. -> INBOX' ) ;
+
+ is( 'INBOX', imap2_folder_name( $mysync, 'Pf1.S1.S2.INBOX' ), 'imap2_folder_name: Pf1.S1.S2.INBOX -> INBOX' ) ;
+ is( 'INBOX', imap2_folder_name( $mysync, 'Pf1.S1.S2' ), 'imap2_folder_name: Pf1.S1.S2 -> INBOX' ) ;
+ is( 'INBOX', imap2_folder_name( $mysync, 'Pf1.S1.S2.' ), 'imap2_folder_name: Pf1.S1.S2. -> INBOX' ) ;
+
+
+ $mysync->{ subfolder1 } = 'S1.S2.' ;
+ is( 'Pf2/F1/F2/F3', imap2_folder_name( $mysync, 'S1.S2.F1.F2.F3' ), 'imap2_folder_name: S1.S2.F1.F2.F3 -> Pf2/F1/F2/F3' ) ;
+ is( 'Pf2/F1/F2/F3', imap2_folder_name( $mysync, 'Pf1.S1.S2.F1.F2.F3' ), 'imap2_folder_name: Pf1.S1.S2.F1.F2.F3 -> Pf2/F1/F2/F3' ) ;
+
+ is( 'INBOX', imap2_folder_name( $mysync, 'S1.S2.INBOX' ), 'imap2_folder_name: S1.S2.INBOX -> INBOX' ) ;
+ is( 'INBOX', imap2_folder_name( $mysync, 'S1.S2' ), 'imap2_folder_name: S1.S2 -> INBOX' ) ;
+ is( 'INBOX', imap2_folder_name( $mysync, 'S1.S2.' ), 'imap2_folder_name: S1.S2. -> INBOX' ) ;
+
+ is( 'INBOX', imap2_folder_name( $mysync, 'Pf1.S1.S2.INBOX' ), 'imap2_folder_name: Pf1.S1.S2.INBOX -> INBOX' ) ;
+ is( 'INBOX', imap2_folder_name( $mysync, 'Pf1.S1.S2' ), 'imap2_folder_name: Pf1.S1.S2 -> INBOX' ) ;
+ is( 'INBOX', imap2_folder_name( $mysync, 'Pf1.S1.S2.' ), 'imap2_folder_name: Pf1.S1.S2. -> INBOX' ) ;
+
+
+ # subfolder1
+ # scenario as Gmail
+ $mysync = {} ;
+ $mysync->{ h1_prefix } = q{} ;
+ $mysync->{ h2_prefix } = q{} ;
+ $mysync->{ h1_sep } = '/';
+ $mysync->{ h2_sep } = '/';
+ #$mysync->{ debug } = 1 ;
+
+ $mysync->{ subfolder1 } = 'S1/S2' ;
+ is( 'F1/F2/F3', imap2_folder_name( $mysync, 'S1/S2/F1/F2/F3' ), 'imap2_folder_name: S1/S2/F1/F2/F3 -> F1/F2/F3' ) ;
+ is( 'INBOX', imap2_folder_name( $mysync, 'S1/S2/INBOX' ), 'imap2_folder_name: S1/S2/INBOX -> INBOX' ) ;
+ is( 'INBOX', imap2_folder_name( $mysync, 'S1/S2' ), 'imap2_folder_name: S1/S2 -> INBOX' ) ;
+ is( 'INBOX', imap2_folder_name( $mysync, 'S1/S2/' ), 'imap2_folder_name: S1/S2/ -> INBOX' ) ;
+
+ $mysync->{ subfolder1 } = 'S1/S2/' ;
+ is( 'F1/F2/F3', imap2_folder_name( $mysync, 'S1/S2/F1/F2/F3' ), 'imap2_folder_name: S1/S2/F1/F2/F3 -> F1/F2/F3' ) ;
+ is( 'INBOX', imap2_folder_name( $mysync, 'S1/S2/INBOX' ), 'imap2_folder_name: S1/S2/INBOX -> INBOX' ) ;
+ is( 'INBOX', imap2_folder_name( $mysync, 'S1/S2' ), 'imap2_folder_name: S1/S2 -> INBOX' ) ;
+ is( 'INBOX', imap2_folder_name( $mysync, 'S1/S2/' ), 'imap2_folder_name: S1/S2/ -> INBOX' ) ;
+
+
+ note( 'Leaving tests_imap2_folder_name()' ) ;
+ return ;
+}
+
+
+# Global variables to remove:
+# None?
+
+
+sub imap2_folder_name
+{
+ my $mysync = shift ;
+ my ( $h1_fold ) = shift ;
+ my ( $h2_fold ) ;
+ if ( $mysync->{f1f2h}{ $h1_fold } ) {
+ $h2_fold = $mysync->{f1f2h}{ $h1_fold } ;
+ ( $mysync->{ debug } or $mysync->{debugfolders} ) and myprint( "f1f2 [$h1_fold] -> [$h2_fold]\n" ) ;
+ return( $h2_fold ) ;
+ }
+ if ( $mysync->{f1f2auto}{ $h1_fold } ) {
+ $h2_fold = $mysync->{f1f2auto}{ $h1_fold } ;
+ ( $mysync->{ debug } or $mysync->{debugfolders} ) and myprint( "automap [$h1_fold] -> [$h2_fold]\n" ) ;
+ return( $h2_fold ) ;
+ }
+
+ if ( $mysync->{ subfolder1 } )
+ {
+ my $esc_h1_sep = "\\" . $mysync->{ h1_sep } ;
+ # case where subfolder1 has the sep1 at the end, then remove it
+ my $part_to_removed = remove_last_char_if_is( $mysync->{ subfolder1 }, $mysync->{ h1_sep } ) ;
+ # remove the subfolder1 part and the sep1 if present after
+ $h1_fold =~ s{$part_to_removed($esc_h1_sep)?}{} ;
+ #myprint( "h1_fold=$h1_fold\n" ) ;
+ }
+
+ if ( ( q{} eq $h1_fold ) or ( $mysync->{ h1_prefix } eq $h1_fold ) )
+ {
+ $h1_fold = 'INBOX' ;
+ }
+
+ $h2_fold = prefix_seperator_invertion( $mysync, $h1_fold ) ;
+ $h2_fold = regextrans2( $mysync, $h2_fold ) ;
+ return( $h2_fold ) ;
+}
+
+
+sub tests_remove_last_char_if_is
+{
+ note( 'Entering tests_remove_last_char_if_is()' ) ;
+
+ is( undef, remove_last_char_if_is( ), 'remove_last_char_if_is: no args => undef' ) ;
+ is( q{}, remove_last_char_if_is( q{} ), 'remove_last_char_if_is: empty => empty' ) ;
+ is( q{}, remove_last_char_if_is( q{}, 'Z' ), 'remove_last_char_if_is: empty Z => empty' ) ;
+ is( q{}, remove_last_char_if_is( 'Z', 'Z' ), 'remove_last_char_if_is: Z Z => empty' ) ;
+ is( 'abc', remove_last_char_if_is( 'abcZ', 'Z' ), 'remove_last_char_if_is: abcZ Z => abc' ) ;
+ is( 'abcY', remove_last_char_if_is( 'abcY', 'Z' ), 'remove_last_char_if_is: abcY Z => abcY' ) ;
+ note( 'Leaving tests_remove_last_char_if_is()' ) ;
+ return ;
+}
+
+
+
+
+sub remove_last_char_if_is
+{
+ my $string = shift ;
+ my $char = shift ;
+
+ if ( ! defined $string )
+ {
+ return ;
+ }
+
+ if ( ! defined $char )
+ {
+ return $string ;
+ }
+
+ my $last_char = substr $string, -1 ;
+ if ( $char eq $last_char )
+ {
+ chop $string ;
+ return $string ;
+ }
+ else
+ {
+ return $string ;
+ }
+}
+
+sub tests_prefix_seperator_invertion
+{
+ note( 'Entering tests_prefix_seperator_invertion()' ) ;
+
+ is( undef, prefix_seperator_invertion( ), 'prefix_seperator_invertion: no args => undef' ) ;
+ is( q{}, prefix_seperator_invertion( undef, q{} ), 'prefix_seperator_invertion: empty string => empty string' ) ;
+ is( 'lalala', prefix_seperator_invertion( undef, 'lalala' ), 'prefix_seperator_invertion: lalala => lalala' ) ;
+ is( 'lal/ala', prefix_seperator_invertion( undef, 'lal/ala' ), 'prefix_seperator_invertion: lal/ala => lal/ala' ) ;
+ is( 'lal.ala', prefix_seperator_invertion( undef, 'lal.ala' ), 'prefix_seperator_invertion: lal.ala => lal.ala' ) ;
+ is( '////', prefix_seperator_invertion( undef, '////' ), 'prefix_seperator_invertion: //// => ////' ) ;
+ is( '.....', prefix_seperator_invertion( undef, '.....' ), 'prefix_seperator_invertion: ..... => .....' ) ;
+
+ my $mysync = {
+ h1_prefix => q{},
+ h2_prefix => q{},
+ h1_sep => '/',
+ h2_sep => '/',
+ } ;
+
+ is( q{}, prefix_seperator_invertion( $mysync, q{} ), 'prefix_seperator_invertion: $mysync empty string => empty string' ) ;
+ is( 'lalala', prefix_seperator_invertion( $mysync, 'lalala' ), 'prefix_seperator_invertion: $mysync lalala => lalala' ) ;
+ is( 'lal/ala', prefix_seperator_invertion( $mysync, 'lal/ala' ), 'prefix_seperator_invertion: $mysync lal/ala => lal/ala' ) ;
+ is( 'lal.ala', prefix_seperator_invertion( $mysync, 'lal.ala' ), 'prefix_seperator_invertion: $mysync lal.ala => lal.ala' ) ;
+ is( '////', prefix_seperator_invertion( $mysync, '////' ), 'prefix_seperator_invertion: $mysync //// => ////' ) ;
+ is( '.....', prefix_seperator_invertion( $mysync, '.....' ), 'prefix_seperator_invertion: $mysync ..... => .....' ) ;
+
+ $mysync = {
+ h1_prefix => 'PPP',
+ h2_prefix => 'QQQ',
+ h1_sep => 's',
+ h2_sep => 't',
+ } ;
+
+ is( q{QQQ}, prefix_seperator_invertion( $mysync, q{} ), 'prefix_seperator_invertion: PPPQQQst empty string => QQQ' ) ;
+ is( 'QQQlalala', prefix_seperator_invertion( $mysync, 'lalala' ), 'prefix_seperator_invertion: PPPQQQst lalala => QQQlalala' ) ;
+ is( 'QQQlal/ala', prefix_seperator_invertion( $mysync, 'lal/ala' ), 'prefix_seperator_invertion: PPPQQQst lal/ala => QQQlal/ala' ) ;
+ is( 'QQQlal.ala', prefix_seperator_invertion( $mysync, 'lal.ala' ), 'prefix_seperator_invertion: PPPQQQst lal.ala => QQQlal.ala' ) ;
+ is( 'QQQ////', prefix_seperator_invertion( $mysync, '////' ), 'prefix_seperator_invertion: PPPQQQst //// => QQQ////' ) ;
+ is( 'QQQ.....', prefix_seperator_invertion( $mysync, '.....' ), 'prefix_seperator_invertion: PPPQQQst ..... => QQQ.....' ) ;
+
+ is( 'QQQPlalala', prefix_seperator_invertion( $mysync, 'PPPPlalala' ), 'prefix_seperator_invertion: PPPQQQst PPPPlalala => QQQPlalala' ) ;
+ is( 'QQQ', prefix_seperator_invertion( $mysync, 'PPP' ), 'prefix_seperator_invertion: PPPQQQst PPP => QQQ' ) ;
+ is( 'QQQttt', prefix_seperator_invertion( $mysync, 'sss' ), 'prefix_seperator_invertion: PPPQQQst sss => QQQttt' ) ;
+ is( 'QQQt', prefix_seperator_invertion( $mysync, 's' ), 'prefix_seperator_invertion: PPPQQQst s => QQQt' ) ;
+ is( 'QQQtAAAtBBB', prefix_seperator_invertion( $mysync, 'PPPsAAAsBBB' ), 'prefix_seperator_invertion: PPPQQQst PPPsAAAsBBB => QQQtAAAtBBB' ) ;
+
+ note( 'Leaving tests_prefix_seperator_invertion()' ) ;
+ return ;
+}
+
+# Global variables to remove:
+
+
+sub prefix_seperator_invertion
+{
+ my $mysync = shift ;
+ my $h1_fold = shift ;
+ my $h2_fold ;
+
+ if ( not defined $h1_fold ) { return ; }
+
+ my $my_h1_prefix = $mysync->{ h1_prefix } || q{} ;
+ my $my_h2_prefix = $mysync->{ h2_prefix } || q{} ;
+ my $my_h1_sep = $mysync->{ h1_sep } || '/' ;
+ my $my_h2_sep = $mysync->{ h2_sep } || '/' ;
+
+ # first we remove the prefix
+ $h1_fold =~ s/^\Q$my_h1_prefix\E//x ;
+ ( $mysync->{ debug } or $mysync->{debugfolders} ) and myprint( "removed host1 prefix: [$h1_fold]\n" ) ;
+ $h2_fold = separator_invert( $mysync, $h1_fold, $my_h1_sep, $my_h2_sep ) ;
+ ( $mysync->{ debug } or $mysync->{debugfolders} ) and myprint( "inverted separators: [$h2_fold]\n" ) ;
+
+ # Adding the prefix supplied by namespace or the --prefix2 option
+ # except for INBOX or Inbox
+ if ( $h2_fold !~ m/^INBOX$/xi )
+ {
+ $h2_fold = $my_h2_prefix . $h2_fold ;
+ }
+
+ ( $mysync->{ debug } or $mysync->{debugfolders} ) and myprint( "added host2 prefix: [$h2_fold]\n" ) ;
+ return( $h2_fold ) ;
+}
+
+sub tests_separator_invert
+{
+ note( 'Entering tests_separator_invert()' ) ;
+
+ my $mysync = {} ;
+ $mysync->{ fixslash2 } = 0 ;
+ ok( not( defined separator_invert( ) ), 'separator_invert: no args' ) ;
+ ok( not( defined separator_invert( q{} ) ), 'separator_invert: not enough args' ) ;
+ ok( not( defined separator_invert( q{}, q{} ) ), 'separator_invert: not enough args' ) ;
+
+ ok( q{} eq separator_invert( $mysync, q{}, q{}, q{} ), 'separator_invert: 3 empty strings' ) ;
+ ok( 'lalala' eq separator_invert( $mysync, 'lalala', q{}, q{} ), 'separator_invert: empty separator' ) ;
+ ok( 'lalala' eq separator_invert( $mysync, 'lalala', '/', '/' ), 'separator_invert: same separator /' ) ;
+ ok( 'lal/ala' eq separator_invert( $mysync, 'lal/ala', '/', '/' ), 'separator_invert: same separator / 2' ) ;
+ ok( 'lal.ala' eq separator_invert( $mysync, 'lal/ala', '/', '.' ), 'separator_invert: separators /.' ) ;
+ ok( 'lal/ala' eq separator_invert( $mysync, 'lal.ala', '.', '/' ), 'separator_invert: separators ./' ) ;
+ ok( 'la.l/ala' eq separator_invert( $mysync, 'la/l.ala', '.', '/' ), 'separator_invert: separators ./' ) ;
+
+ ok( 'l/al.ala' eq separator_invert( $mysync, 'l.al/ala', '/', '.' ), 'separator_invert: separators /.' ) ;
+ $mysync->{ fixslash2 } = 1 ;
+ ok( 'l_al.ala' eq separator_invert( $mysync, 'l.al/ala', '/', '.' ), 'separator_invert: separators /.' ) ;
+
+ note( 'Leaving tests_separator_invert()' ) ;
+ return ;
+}
+
+# Global variables to remove:
+#
+sub separator_invert
+{
+ my( $mysync, $h1_fold, $h1_separator, $h2_separator ) = @_ ;
+
+ return( undef ) if ( not all_defined( $mysync, $h1_fold, $h1_separator, $h2_separator ) ) ;
+ # The separator we hope we'll never encounter: 00000000 == 0x00
+ my $o_sep = "\000" ;
+
+ my $h2_fold = $h1_fold ;
+ $h2_fold =~ s,\Q$h2_separator,$o_sep,xg ;
+ $h2_fold =~ s,\Q$h1_separator,$h2_separator,xg ;
+ $h2_fold =~ s,\Q$o_sep,$h1_separator,xg ;
+ $h2_fold =~ s,/,_,xg if( $mysync->{ fixslash2 } and '/' ne $h2_separator and '/' eq $h1_separator ) ;
+ return( $h2_fold ) ;
+}
+
+
+sub regextrans2
+{
+ my( $mysync, $h2_fold ) = @_ ;
+ # Transforming the folder name by the --regextrans2 option(s)
+ foreach my $regextrans2 ( @{ $mysync->{ regextrans2 } } ) {
+ my $h2_fold_before = $h2_fold ;
+ my $ret = eval "\$h2_fold =~ $regextrans2 ; 1 " ;
+ ( $mysync->{ debug } or $mysync->{debugfolders} ) and myprint( "[$h2_fold_before] -> [$h2_fold] using regextrans2 [$regextrans2]\n" ) ;
+ if ( not ( defined $ret ) or $EVAL_ERROR ) {
+ $mysync->{nb_errors}++ ;
+ exit_clean( $mysync, $EX_USAGE,
+ "error: eval regextrans2 '$regextrans2': $EVAL_ERROR\n"
+ ) ;
+ }
+ }
+ return( $h2_fold ) ;
+}
+
+
+sub tests_decompose_regex
+{
+ note( 'Entering tests_decompose_regex()' ) ;
+
+ ok( 1, 'decompose_regex 1' ) ;
+ ok( 0 == compare_lists( [ q{}, q{} ], [ decompose_regex( q{} ) ] ), 'decompose_regex empty string' ) ;
+ ok( 0 == compare_lists( [ '.*', 'lala' ], [ decompose_regex( 's/.*/lala/' ) ] ), 'decompose_regex s/.*/lala/' ) ;
+
+ note( 'Leaving tests_decompose_regex()' ) ;
+ return ;
+}
+
+sub decompose_regex
+{
+ my $regex = shift ;
+ my( $left_part, $right_part ) ;
+
+ ( $left_part, $right_part ) = $regex =~ m{^s/((?:[^/]|\\/)+)/((?:[^/]|\\/)+)/}x;
+ return( q{}, q{} ) if not $left_part ;
+ return( $left_part, $right_part ) ;
+}
+
+
+
+sub tests_timenext
+{
+ note( 'Entering tests_timenext()' ) ;
+
+ is( undef, timenext( ), 'timenext: no args => undef' ) ;
+ my $mysync ;
+ is( undef, timenext( $mysync ), 'timenext: undef => undef' ) ;
+ $mysync = {} ;
+ ok( time - timenext( $mysync ) <= 1e-02, 'timenext: defined first time => ~ time' ) ;
+ ok( timenext( $mysync ) <= 1e-02, 'timenext: second time => less than 1e-02' ) ;
+ ok( timenext( $mysync ) <= 1e-02, 'timenext: third time => less than 1e-02' ) ;
+
+ note( 'Leaving tests_timenext()' ) ;
+ return ;
+}
+
+
+sub timenext
+{
+ my $mysync = shift ;
+
+ if ( ! defined $mysync )
+ {
+ return ;
+ }
+ my ( $timenow, $timediff ) ;
+
+ $mysync->{ timebefore } ||= 0; # epoch...
+ $timenow = time ;
+ $timediff = $timenow - $mysync->{ timebefore } ;
+ $mysync->{ timebefore } = $timenow ;
+ # myprint( "timenext: $timediff\n" ) ;
+ return( $timediff ) ;
+}
+
+
+sub tests_timesince
+{
+ note( 'Entering tests_timesince()' ) ;
+
+ ok( timesince( time - 1 ) - 1 <= 1e-02, 'timesince: time - 1 => <= 1 + 1e-02' ) ;
+ ok( timesince( time ) <= 1e-02, 'timesince: time => <= 1e-02' ) ;
+ ok( timesince( ) - time <= 1e-02, 'timesince: no args => <= time + 1e-02' ) ;
+ note( 'Leaving tests_timesince()' ) ;
+ return ;
+}
+
+
+
+sub timesince
+{
+ my $timeinit = shift || 0 ;
+ my ( $timenow, $timediff ) ;
+ $timenow = time ;
+ $timediff = $timenow - $timeinit ;
+ # Often used in a division so no 0 but a nano seconde.
+ return( max( $timediff, min( 1e-09, $timediff ) ) ) ;
+}
+
+
+
+
+sub tests_flags_regex
+{
+ note( 'Entering tests_flags_regex()' ) ;
+
+ ok( q{} eq flags_regex(q{} ), 'flags_regex, null string q{}' ) ;
+ ok( q{\Seen NonJunk $Spam} eq flags_regex( q{\Seen NonJunk $Spam} ), q{flags_regex, nothing to do} ) ;
+
+ @regexflag = ('I am BAD' ) ;
+ ok( not ( defined flags_regex( q{} ) ), 'flags_regex, bad regex' ) ;
+
+ @regexflag = ( 's/NonJunk//g' ) ;
+ ok( q{\Seen $Spam} eq flags_regex( q{\Seen NonJunk $Spam} ), q{flags_regex, remove NonJunk: 's/NonJunk//g'} ) ;
+ @regexflag = ( q{s/\$Spam//g} ) ;
+ ok( q{\Seen NonJunk } eq flags_regex( q{\Seen NonJunk $Spam} ), q{flags_regex, remove $Spam: 's/\$Spam//g'} ) ;
+
+ @regexflag = ( 's/\\\\Seen//g' ) ;
+
+ ok( q{ NonJunk $Spam} eq flags_regex( q{\Seen NonJunk $Spam} ), q{flags_regex, remove \Seen: 's/\\\\\\\\Seen//g'} ) ;
+
+ @regexflag = ( 's/(\s|^)[^\\\\]\w+//g' ) ;
+ ok( q{\Seen \Middle \End} eq flags_regex( q{\Seen NonJunk \Middle $Spam \End} ), q{flags_regex: only \word among \Seen NonJunk \Middle $Spam \End} ) ;
+ ok( q{ \Seen \Middle \End1} eq flags_regex( q{Begin \Seen NonJunk \Middle $Spam \End1 End} ),
+ q{flags_regex: only \word among Begin \Seen NonJunk \Middle $Spam \End1 End} ) ;
+
+ @regexflag = ( q{s/.*?(Keep1|Keep2|Keep3)/$1 /g} ) ;
+ ok( 'Keep1 Keep2 ReB' eq flags_regex('ReA Keep1 REM Keep2 ReB'), 'Keep only regex' ) ;
+
+ ok( 'Keep1 Keep2 ' eq flags_regex( 'REM REM Keep1 Keep2'), 'Keep only regex' ) ;
+ ok( 'Keep1 Keep2 ' eq flags_regex( 'Keep1 REM REM Keep2'), 'Keep only regex' ) ;
+ ok( 'Keep1 Keep2 ' eq flags_regex( 'REM Keep1 REM REM Keep2'), 'Keep only regex' ) ;
+ ok( 'Keep1 Keep2 ' eq flags_regex( 'Keep1 Keep2'), 'Keep only regex' ) ;
+ ok( 'Keep1 ' eq flags_regex( 'REM Keep1'), 'Keep only regex' ) ;
+
+ @regexflag = ( q{s/(Keep1|Keep2|Keep3) (?!(Keep1|Keep2|Keep3)).*/$1 /g} ) ;
+ ok( 'Keep1 Keep2 ' eq flags_regex( 'Keep1 Keep2 ReB'), 'Keep only regex' ) ;
+ ok( 'Keep1 Keep2 ' eq flags_regex( 'Keep1 Keep2 REM REM REM'), 'Keep only regex' ) ;
+ ok( 'Keep2 ' eq flags_regex('Keep2 REM REM REM'), 'Keep only regex' ) ;
+
+
+ @regexflag = ( q{s/.*?(Keep1|Keep2|Keep3)/$1 /g},
+ 's/(Keep1|Keep2|Keep3) (?!(Keep1|Keep2|Keep3)).*/$1 /g' ) ;
+ ok( 'Keep1 Keep2 ' eq flags_regex('REM Keep1 REM Keep2 REM'), 'Keep only regex' ) ;
+ ok( 'Keep1 Keep2 ' eq flags_regex('Keep1 REM Keep2 REM'), 'Keep only regex' ) ;
+ ok( 'Keep1 Keep2 ' eq flags_regex('REM Keep1 Keep2 REM'), 'Keep only regex' ) ;
+ ok( 'Keep1 Keep2 ' eq flags_regex('REM Keep1 REM Keep2'), 'Keep only regex' ) ;
+ ok( 'Keep1 Keep2 Keep3 ' eq flags_regex('REM Keep1 REM Keep2 REM REM Keep3 REM'), 'Keep only regex' ) ;
+ ok( 'Keep1 ' eq flags_regex('REM REM Keep1 REM REM REM '), 'Keep only regex' ) ;
+ ok( 'Keep1 Keep3 ' eq flags_regex('RE1 Keep1 RE2 Keep3 RE3 RE4 RE5 '), 'Keep only regex' ) ;
+
+ @regexflag = ( 's/(.*)/$1 jrdH8u/' ) ;
+ ok('REM REM REM REM REM jrdH8u' eq flags_regex('REM REM REM REM REM'), q{Add jrdH8u 's/(.*)/\$1 jrdH8u/'} ) ;
+ @regexflag = ('s/jrdH8u *//');
+ ok('REM REM REM REM REM ' eq flags_regex('REM REM REM REM REM jrdH8u'), q{Remove jrdH8u s/jrdH8u *//} ) ;
+
+ @regexflag = (
+ 's/.*?(?:(\\\\(?:Answered|Flagged|Deleted|Seen|Draft)\s?)|$)/defined($1)?$1:q()/eg'
+ );
+
+ ok( '\\Deleted \\Answered '
+ eq flags_regex('Blabla \$Junk \\Deleted machin \\Answered truc'),
+ 'Keep only regex: Exchange case (Phil)' ) ;
+
+ ok( q{} eq flags_regex( q{} ), 'Keep only regex: Exchange case, null string (Phil)' ) ;
+
+ ok( q{}
+ eq flags_regex('Blabla $Junk machin truc'),
+ 'Keep only regex: Exchange case, no accepted flags (Phil)' ) ;
+
+ ok('\\Deleted \\Answered \\Draft \\Flagged '
+ eq flags_regex('\\Deleted \\Answered \\Draft \\Flagged '),
+ 'Keep only regex: Exchange case (Phil)' ) ;
+
+ @regexflag = ( 's/\\\\Flagged//g' ) ;
+
+ is('\Deleted \Answered \Draft ',
+ flags_regex('\\Deleted \\Answered \\Draft \\Flagged '),
+ 'flags_regex: remove \Flagged 1' ) ;
+ is('\\Deleted \\Answered \\Draft',
+ flags_regex('\\Deleted \\Flagged \\Answered \\Draft'),
+ 'flags_regex: remove \Flagged 2' ) ;
+
+ # I didn't understand why it gives \F
+ # https://perldoc.perl.org/perlrebackslash.html
+ # \F Foldcase till \E. Not in [].
+ # https://perldoc.perl.org/functions/fc.html
+
+ # \F Not available in old Perl so I comment the test
+
+ # @regexflag = ( 's/\\Flagged/X/g' ) ;
+ #is('\Deleted FX \Answered \FX \Draft \FX',
+ #flags_regex( '\Deleted Flagged \Answered \Flagged \Draft \Flagged' ),
+ # 'flags_regex: remove \Flagged 3 mistery...' ) ;
+
+ note( 'Leaving tests_flags_regex()' ) ;
+ return ;
+}
+
+sub flags_regex
+{
+ my ( $h1_flags ) = @_ ;
+ foreach my $regexflag ( @regexflag ) {
+ my $h1_flags_orig = $h1_flags ;
+ $debugflags and myprint( "eval \$h1_flags =~ $regexflag\n" ) ;
+ my $ret = eval "\$h1_flags =~ $regexflag ; 1 " ;
+ $debugflags and myprint( "regexflag $regexflag [$h1_flags_orig] -> [$h1_flags]\n" ) ;
+ if( not ( defined $ret ) or $EVAL_ERROR ) {
+ myprint( "Error: eval regexflag '$regexflag': $EVAL_ERROR\n" ) ;
+ return( undef ) ;
+ }
+ }
+ return( $h1_flags ) ;
+}
+
+sub acls_sync
+{
+ my($h1_fold, $h2_fold) = @_ ;
+ if ( $syncacls ) {
+ my $h1_hash = $sync->{imap1}->getacl($h1_fold)
+ or myprint( "Could not getacl for $h1_fold: $EVAL_ERROR\n" ) ;
+ my $h2_hash = $sync->{imap2}->getacl($h2_fold)
+ or myprint( "Could not getacl for $h2_fold: $EVAL_ERROR\n" ) ;
+ my %users = map { ($_, 1) } ( keys %{ $h1_hash} , keys %{ $h2_hash } ) ;
+ foreach my $user (sort keys %users ) {
+ my $acl = $h1_hash->{$user} || 'none' ;
+ myprint( "acl $user: [$acl]\n" ) ;
+ next if ($h1_hash->{$user} && $h2_hash->{$user} &&
+ $h1_hash->{$user} eq $h2_hash->{$user});
+ unless ($sync->{dry}) {
+ myprint( "setting acl $h2_fold $user $acl\n" ) ;
+ $sync->{imap2}->setacl($h2_fold, $user, $acl)
+ or myprint( "Could not set acl: $EVAL_ERROR\n" ) ;
+ }
+ }
+ }
+ return ;
+}
+
+
+sub tests_permanentflags
+{
+ note( 'Entering tests_permanentflags()' ) ;
+
+ my $string;
+ ok(q{} eq permanentflags(' * OK [PERMANENTFLAGS (\* \Draft \Answered)] Limited'),
+ 'permanentflags \*');
+ ok('\Draft \Answered' eq permanentflags(' * OK [PERMANENTFLAGS (\Draft \Answered)] Limited'),
+ 'permanentflags \Draft \Answered');
+ ok('\Draft \Answered'
+ eq permanentflags('Blabla',
+ ' * OK [PERMANENTFLAGS (\Draft \Answered)] Limited',
+ 'Blabla'),
+ 'permanentflags \Draft \Answered'
+ );
+ ok(q{} eq permanentflags('Blabla'), 'permanentflags nothing');
+
+ note( 'Leaving tests_permanentflags()' ) ;
+ return ;
+}
+
+sub permanentflags
+{
+ my @lines = @_ ;
+
+ foreach my $line (@lines) {
+ if ( $line =~ m{\[PERMANENTFLAGS\s\(([^)]+?)\)\]}x ) {
+ ( $debugflags or $sync->{ debug } ) and myprint( "permanentflags: $line" ) ;
+ my $permanentflags = $1 ;
+ if ( $permanentflags =~ m{\\\*}x ) {
+ $permanentflags = q{} ;
+ }
+ return($permanentflags) ;
+ } ;
+ }
+ return( q{} ) ;
+}
+
+sub tests_flags_filter
+{
+ note( 'Entering tests_flags_filter()' ) ;
+
+ ok( '\Seen' eq flags_filter('\Seen', '\Draft \Seen \Answered'), 'flags_filter ' );
+ ok( q{} eq flags_filter('\Seen', '\Draft \Answered'), 'flags_filter ' );
+ ok( '\Seen' eq flags_filter('\Seen', '\Seen'), 'flags_filter ' );
+ ok( '\Seen' eq flags_filter('\Seen', ' \Seen '), 'flags_filter ' );
+ ok( '\Seen \Draft'
+ eq flags_filter('\Seen \Draft', '\Draft \Seen \Answered'), 'flags_filter ' );
+ ok( '\Seen \Draft'
+ eq flags_filter('\Seen \Draft', ' \Draft \Seen \Answered '), 'flags_filter ' );
+
+ note( 'Leaving tests_flags_filter()' ) ;
+ return ;
+}
+
+sub flags_filter
+{
+ my( $flags, $allowed_flags ) = @_ ;
+
+ my @flags = split /\s+/x, $flags ;
+ my %allowed_flags = map { $_ => 1 } split q{ }, $allowed_flags ;
+ my @flags_out = map { exists $allowed_flags{$_} ? $_ : () } @flags ;
+
+ my $flags_out = join q{ }, @flags_out ;
+
+ return( $flags_out ) ;
+}
+
+sub flagscase
+{
+ my $flags = shift ;
+
+ my @flags = split /\s+/x, $flags ;
+ my %rfc_flags = map { $_ => 1 } split q{ }, '\Answered \Flagged \Deleted \Seen \Draft' ;
+ my @flags_out = map { exists $rfc_flags{ ucsecond( lc $_ ) } ? ucsecond( lc $_ ) : $_ } @flags ;
+
+ my $flags_out = join q{ }, @flags_out ;
+
+ return( $flags_out ) ;
+}
+
+sub tests_flagscase
+{
+ note( 'Entering tests_flagscase()' ) ;
+
+ ok( '\Seen' eq flagscase( '\Seen' ), 'flagscase: \Seen -> \Seen' ) ;
+ ok( '\Seen' eq flagscase( '\SEEN' ), 'flagscase: \SEEN -> \Seen' ) ;
+
+ ok( '\Seen \Draft' eq flagscase( '\SEEN \DRAFT' ), 'flagscase: \SEEN \DRAFT -> \Seen \Draft' ) ;
+ ok( '\Draft \Seen' eq flagscase( '\DRAFT \SEEN' ), 'flagscase: \DRAFT \SEEN -> \Draft \Seen' ) ;
+
+ ok( '\Draft LALA \Seen' eq flagscase( '\DRAFT LALA \SEEN' ), 'flagscase: \DRAFT LALA \SEEN -> \Draft LALA \Seen' ) ;
+ ok( '\Draft lala \Seen' eq flagscase( '\DRAFT lala \SEEN' ), 'flagscase: \DRAFT lala \SEEN -> \Draft lala \Seen' ) ;
+
+ note( 'Leaving tests_flagscase()' ) ;
+ return ;
+}
+
+
+
+sub ucsecond
+{
+ my $string = shift ;
+ my $output ;
+
+ return( $string ) if ( 1 >= length $string ) ;
+
+ $output = ( substr( $string, 0, 1) ) . ( uc substr $string, 1, 1 ) . ( substr $string, 2 ) ;
+ #myprint( "UUU $string -> $output\n" ) ;
+ return( $output ) ;
+}
+
+
+sub tests_ucsecond
+{
+ note( 'Entering tests_ucsecond()' ) ;
+
+ ok( 'aBcde' eq ucsecond( 'abcde' ), 'ucsecond: abcde -> aBcde' ) ;
+ ok( 'ABCDE' eq ucsecond( 'ABCDE' ), 'ucsecond: ABCDE -> ABCDE' ) ;
+ ok( 'ABCDE' eq ucsecond( 'AbCDE' ), 'ucsecond: AbCDE -> ABCDE' ) ;
+ ok( 'ABCde' eq ucsecond( 'AbCde' ), 'ucsecond: AbCde -> ABCde' ) ;
+ ok( 'A' eq ucsecond( 'A' ), 'ucsecond: A -> A' ) ;
+ ok( 'AB' eq ucsecond( 'Ab' ), 'ucsecond: Ab -> AB' ) ;
+ ok( '\B' eq ucsecond( '\b' ), 'ucsecond: \b -> \B' ) ;
+ ok( '\Bcde' eq ucsecond( '\bcde' ), 'ucsecond: \bcde -> \Bcde' ) ;
+
+ note( 'Leaving tests_ucsecond()' ) ;
+ return ;
+}
+
+
+sub select_msgs
+{
+ my ( $imap, $msgs_all_hash_ref, $search_cmd, $abletosearch, $folder ) = @_ ;
+ my ( @msgs ) ;
+
+ if ( $abletosearch ) {
+ @msgs = select_msgs_by_search( $imap, $msgs_all_hash_ref, $search_cmd, $folder ) ;
+ }else{
+ @msgs = select_msgs_by_fetch( $imap, $msgs_all_hash_ref, $search_cmd, $folder ) ;
+ }
+ return( @msgs ) ;
+
+}
+
+sub select_msgs_by_search
+{
+ my ( $imap, $msgs_all_hash_ref, $search_cmd, $folder ) = @_ ;
+ my ( @msgs, @msgs_all ) ;
+
+ # Need to have the whole list in msgs_all_hash_ref
+ # without calling messages() several times.
+ # Need all messages list to avoid deleting useful cache part
+ # in case of --search or --minage or --maxage
+
+ if ( ( defined $msgs_all_hash_ref and $usecache )
+ or ( not defined $maxage and not defined $minage and not defined $search_cmd )
+ ) {
+
+ $debugdev and myprint( "Calling messages()\n" ) ;
+ @msgs_all = $imap->messages( ) ;
+
+ return if ( $#msgs_all == 0 && !defined $msgs_all[0] ) ;
+
+ if ( defined $msgs_all_hash_ref ) {
+ @{ $msgs_all_hash_ref }{ @msgs_all } = () ;
+ }
+ # return all messages
+ if ( not defined $maxage and not defined $minage and not defined $search_cmd ) {
+ return( @msgs_all ) ;
+ }
+ }
+
+ if ( defined $search_cmd ) {
+ @msgs = $imap->search( $search_cmd ) ;
+ return( @msgs ) ;
+ }
+
+ # we are here only if $maxage or $minage is defined
+ @msgs = select_msgs_by_age( $imap ) ;
+ return( @msgs );
+}
+
+
+sub select_msgs_by_fetch
+{
+ my ( $imap, $msgs_all_hash_ref, $search_cmd, $folder ) = @_ ;
+ my ( @msgs, @msgs_all, %fetch ) ;
+
+ # Need to have the whole list in msgs_all_hash_ref
+ # without calling messages() several times.
+ # Need all messages list to avoid deleting useful cache part
+ # in case of --search or --minage or --maxage
+
+
+ $debugdev and myprint( "Calling fetch_hash()\n" ) ;
+ my $uidnext = $imap->uidnext( $folder ) || $uidnext_default ;
+ my $fetch_hash_uids = $fetch_hash_set || "1:$uidnext" ;
+ %fetch = %{$imap->fetch_hash( $fetch_hash_uids, 'INTERNALDATE' ) } ;
+
+ @msgs_all = sort { $a <=> $b } keys %fetch ;
+ $debugdev and myprint( "Done fetch_hash()\n" ) ;
+
+ return if ( $#msgs_all == 0 && !defined $msgs_all[0] ) ;
+
+ if ( defined $msgs_all_hash_ref ) {
+ @{ $msgs_all_hash_ref }{ @msgs_all } = () ;
+ }
+ # return all messages
+ if ( not defined $maxage and not defined $minage and not defined $search_cmd ) {
+ return( @msgs_all ) ;
+ }
+
+ if ( defined $search_cmd ) {
+ myprint( "Warning: strange to see --search with --noabletosearch, an error can happen\n" ) ;
+ @msgs = $imap->search( $search_cmd ) ;
+ return( @msgs ) ;
+ }
+
+ # we are here only if $maxage or $minage is defined
+ my( @max, @min, $maxage_epoch, $minage_epoch ) ;
+ if ( defined $maxage ) { $maxage_epoch = $timestart_int - $NB_SECONDS_IN_A_DAY * $maxage ; }
+ if ( defined $minage ) { $minage_epoch = $timestart_int - $NB_SECONDS_IN_A_DAY * $minage ; }
+ foreach my $msg ( @msgs_all ) {
+ my $idate = $fetch{ $msg }->{'INTERNALDATE'} ;
+ #myprint( "$idate\n" ) ;
+ if ( defined $maxage and ( epoch( $idate ) >= $maxage_epoch ) ) {
+ push @max, $msg ;
+ }
+ if ( defined $minage and ( epoch( $idate ) <= $minage_epoch ) ) {
+ push @min, $msg ;
+ }
+ }
+ @msgs = msgs_from_maxmin( \@max, \@min ) ;
+ return( @msgs ) ;
+}
+
+sub select_msgs_by_age
+{
+ my( $imap ) = @_ ;
+
+ my( @max, @min, @msgs, @inter, @union ) ;
+
+ if ( defined $maxage ) {
+ @max = $imap->sentsince( $timestart_int - $NB_SECONDS_IN_A_DAY * $maxage ) ;
+ }
+ if ( defined $minage ) {
+ @min = $imap->sentbefore( $timestart_int - $NB_SECONDS_IN_A_DAY * $minage ) ;
+ }
+
+ @msgs = msgs_from_maxmin( \@max, \@min ) ;
+ return( @msgs ) ;
+}
+
+sub msgs_from_maxmin
+{
+ my( $max_ref, $min_ref ) = @_ ;
+ my( @max, @min, @msgs, @inter, @union ) ;
+
+ @max = @{ $max_ref } ;
+ @min = @{ $min_ref } ;
+
+ SWITCH: {
+ unless( defined $minage ) { @msgs = @max ; last SWITCH } ;
+ unless( defined $maxage ) { @msgs = @min ; last SWITCH } ;
+ my ( %union, %inter ) ;
+ foreach my $m ( @min, @max ) { $union{ $m }++ && $inter{ $m }++ }
+ @inter = sort { $a <=> $b } keys %inter ;
+ @union = sort { $a <=> $b } keys %union ;
+ # normal case
+ if ( $minage <= $maxage ) { @msgs = @inter ; last SWITCH } ;
+ # just exclude messages between
+ if ( $minage > $maxage ) { @msgs = @union ; last SWITCH } ;
+
+ }
+ return( @msgs ) ;
+}
+
+sub tests_msgs_from_maxmin
+{
+ note( 'Entering tests_msgs_from_maxmin()' ) ;
+
+ my @msgs ;
+ $maxage = $NUMBER_200 ;
+ @msgs = msgs_from_maxmin( [ '1', '2' ], [ '2', '3' ] ) ;
+ ok( 0 == compare_lists( [ '1', '2' ], \@msgs ), 'msgs_from_maxmin: maxage++' ) ;
+ $minage = $NUMBER_100 ;
+ @msgs = msgs_from_maxmin( [ '1', '2' ], [ '2', '3' ] ) ;
+ ok( 0 == compare_lists( [ '2' ], \@msgs ), 'msgs_from_maxmin: -maxage++minage-' ) ;
+ $minage = $NUMBER_300 ;
+ @msgs = msgs_from_maxmin( [ '1', '2' ], [ '2', '3' ] ) ;
+ ok( 0 == compare_lists( [ '1', '2', '3' ], \@msgs ), 'msgs_from_maxmin: ++maxage-minage++' ) ;
+ $maxage = undef ;
+ @msgs = msgs_from_maxmin( [ '1', '2' ], [ '2', '3' ] ) ;
+ ok( 0 == compare_lists( [ '2', '3' ], \@msgs ), 'msgs_from_maxmin: ++minage-' ) ;
+
+ note( 'Leaving tests_msgs_from_maxmin()' ) ;
+ return ;
+}
+
+sub tests_info_date_from_uid
+{
+ note( 'Entering tests_info_date_from_uid()' ) ;
+ note( 'Leaving tests_info_date_from_uid()' ) ;
+
+ return ;
+}
+
+sub info_date_from_uid
+{
+
+ #my $first_uid = $msgs_all[ 0 ] ;
+ #my $first_idate = $fetch{ $first_uid }->{'INTERNALDATE'} ;
+ #my $first_epoch = epoch( $first_idate ) ;
+ #my $first_days = ( $timestart_int - $first_epoch ) / $NB_SECONDS_IN_A_DAY ;
+ #myprint( "\nOldest msg has UID $first_uid INTERNALDATE $first_idate EPOCH $first_epoch DAYS AGO $first_days\n" ) ;
+}
+
+
+sub lastuid
+{
+ my $imap = shift ;
+ my $folder = shift ;
+ my $lastuid_guess = shift ;
+ my $lastuid ;
+
+ # rfc3501: The only reliable way to identify recent messages is to
+ # look at message flags to see which have the \Recent flag
+ # set, or to do a SEARCH RECENT.
+ # SEARCH RECENT doesn't work this way on courrier.
+
+ my @recent_messages ;
+ # SEARCH RECENT for each transfer can be expensive with a big folder
+ # Call commented for now
+ #@recent_messages = $imap->recent( ) ;
+ #myprint( "Recent: @recent_messages\n" ) ;
+
+ my $max_recent ;
+ $max_recent = max( @recent_messages ) ;
+
+ if ( defined $max_recent and ($lastuid_guess <= $max_recent ) ) {
+ $lastuid = $max_recent ;
+ }else{
+ $lastuid = $lastuid_guess
+ }
+ return( $lastuid ) ;
+}
+
+sub size_filtered
+{
+ my( $h1_size, $h1_msg, $h1_fold, $h2_fold ) = @_ ;
+
+ $h1_size = 0 if ( ! $h1_size ) ; # null if empty or undef
+ if ( defined $sync->{ maxsize } and $h1_size > $sync->{ maxsize } ) {
+ myprint( "msg $h1_fold/$h1_msg skipped ($h1_size exceeds maxsize limit $sync->{ maxsize } bytes)\n" ) ;
+ $sync->{ total_bytes_skipped } += $h1_size;
+ $sync->{ nb_msg_skipped } += 1;
+ return( 1 ) ;
+ }
+ if ( defined $minsize and $h1_size <= $minsize ) {
+ myprint( "msg $h1_fold/$h1_msg skipped ($h1_size smaller than minsize $minsize bytes)\n" ) ;
+ $sync->{ total_bytes_skipped } += $h1_size;
+ $sync->{ nb_msg_skipped } += 1;
+ return( 1 ) ;
+ }
+ return( 0 ) ;
+}
+
+sub message_exists
+{
+ my( $imap, $msg ) = @_ ;
+ return( 1 ) if not $imap->Uid( ) ;
+
+ my $search_uid ;
+ ( $search_uid ) = $imap->search( "UID $msg" ) ;
+ #myprint( "$search ? $msg\n" ) ;
+ return( 1 ) if ( $search_uid eq $msg ) ;
+ return( 0 ) ;
+}
+
+
+# Globals
+# $sync->{ total_bytes_skipped }
+# $sync->{ nb_msg_skipped }
+# $mysync->{ h1_nb_msg_processed }
+sub stats_update_skip_message
+{
+ my $mysync = shift ; # to be used
+ my $h1_size = shift ;
+
+ $mysync->{ total_bytes_skipped } += $h1_size ;
+ $mysync->{ nb_msg_skipped } += 1 ;
+ $mysync->{ h1_nb_msg_processed } +=1 ;
+ return ;
+}
+
+sub copy_message
+{
+ # copy
+
+ my ( $mysync, $h1_msg, $h1_fold, $h2_fold, $h1_fir_ref, $permanentflags2, $cache_dir ) = @_ ;
+ ( $mysync->{ debug } or $mysync->{dry} )
+ and myprint( "msg $h1_fold/$h1_msg copying to $h2_fold $mysync->{dry_message} " . eta( $mysync ) . "\n" ) ;
+
+ my $h1_size = $h1_fir_ref->{$h1_msg}->{'RFC822.SIZE'} || 0 ;
+ my $h1_flags = $h1_fir_ref->{$h1_msg}->{'FLAGS'} || q{} ;
+ my $h1_idate = $h1_fir_ref->{$h1_msg}->{'INTERNALDATE'} || q{} ;
+
+
+ if ( size_filtered( $h1_size, $h1_msg, $h1_fold, $h2_fold ) ) {
+ $mysync->{ h1_nb_msg_processed } +=1 ;
+ return ;
+ }
+
+ debugsleep( $mysync ) ;
+ myprint( "- msg $h1_fold/$h1_msg S[$h1_size] F[$h1_flags] I[$h1_idate] has RFC822.SIZE null!\n" ) if ( ! $h1_size ) ;
+
+ if ( $checkmessageexists and not message_exists( $mysync->{imap1}, $h1_msg ) ) {
+ stats_update_skip_message( $mysync, $h1_size ) ;
+ return ;
+ }
+ myprint( debugmemory( $mysync, " at C1" ) ) ;
+
+ my ( $string, $string_len ) ;
+ ( $string_len ) = message_for_host2( $mysync, $h1_msg, $h1_fold, $h1_size, $h1_flags, $h1_idate, $h1_fir_ref, \$string ) ;
+
+ myprint( debugmemory( $mysync, " at C2" ) ) ;
+
+ # not defined or empty $string
+ if ( ( not $string ) or ( not $string_len ) ) {
+ myprint( "- msg $h1_fold/$h1_msg skipped.\n" ) ;
+ stats_update_skip_message( $mysync, $h1_size ) ;
+ return ;
+ }
+
+ # Lines too long (or not enough) => do no copy or fix
+ if ( ( defined $maxlinelength ) or ( defined $minmaxlinelength ) ) {
+ $string = linelengthstuff( $string, $h1_fold, $h1_msg, $string_len, $h1_size, $h1_flags, $h1_idate ) ;
+ if ( not defined $string ) {
+ stats_update_skip_message( $mysync, $h1_size ) ;
+ return ;
+ }
+ }
+
+ my $h1_date = date_for_host2( $h1_msg, $h1_idate ) ;
+
+ ( $mysync->{ debug } or $debugflags ) and
+ myprint( "Host1: flags init msg $h1_fold/$h1_msg date [$h1_date] flags [$h1_flags] size [$h1_size]\n" ) ;
+
+ $h1_flags = flags_for_host2( $h1_flags, $permanentflags2 ) ;
+
+ ( $mysync->{ debug } or $debugflags ) and
+ myprint( "Host1: flags filt msg $h1_fold/$h1_msg date [$h1_date] flags [$h1_flags] size [$h1_size]\n" ) ;
+
+ $h1_date = undef if ( $h1_date eq q{} ) ;
+
+ my $new_id = append_message_on_host2( $mysync, \$string, $h1_fold, $h1_msg, $string_len, $h2_fold, $h1_size, $h1_flags, $h1_date, $cache_dir ) ;
+
+
+
+ if ( $new_id and $syncflagsaftercopy ) {
+ sync_flags_after_copy( $mysync, $h1_fold, $h1_msg, $h1_flags, $h2_fold, $new_id, $permanentflags2 ) ;
+ }
+
+ myprint( debugmemory( $mysync, " at C3" ) ) ;
+
+ return $new_id ;
+}
+
+
+
+sub linelengthstuff
+{
+ my( $string, $h1_fold, $h1_msg, $string_len, $h1_size, $h1_flags, $h1_idate ) = @_ ;
+ my $maxlinelength_string = max_line_length( $string ) ;
+ $debugmaxlinelength and myprint( "msg $h1_fold/$h1_msg maxlinelength: $maxlinelength_string\n" ) ;
+
+ if ( ( defined $minmaxlinelength ) and ( $maxlinelength_string <= $minmaxlinelength ) ) {
+ my $subject = subject( $string ) ;
+ $debugdev and myprint( "- msg $h1_fold/$h1_msg skipped S[$h1_size] F[$h1_flags] I[$h1_idate] "
+ . "(Subject:[$subject]) (max line length under minmaxlinelength $minmaxlinelength bytes)\n" ) ;
+ return ;
+ }
+
+ if ( ( defined $maxlinelength ) and ( $maxlinelength_string > $maxlinelength ) ) {
+ my $subject = subject( $string ) ;
+ if ( $maxlinelengthcmd ) {
+ $string = pipemess( $string, $maxlinelengthcmd ) ;
+ # string undef means something was bad.
+ if ( not ( defined $string ) ) {
+ myprint( "- msg $h1_fold/$h1_msg {$string_len} S[$h1_size] F[$h1_flags] I[$h1_idate] "
+ . "(Subject:[$subject]) could not be successfully transformed by --maxlinelengthcmd option\n" ) ;
+ return ;
+ }else{
+ return $string ;
+ }
+ }
+ myprint( "- msg $h1_fold/$h1_msg skipped S[$h1_size] F[$h1_flags] I[$h1_idate] "
+ . "(Subject:[$subject]) (line length exceeds maxlinelength $maxlinelength bytes)\n" ) ;
+ return ;
+ }
+ return $string ;
+}
+
+
+sub message_for_host2
+{
+
+# global variable list:
+# @skipmess
+# @regexmess
+# @pipemess
+# $debugcontent
+# $debug
+#
+# API current
+#
+# at failure:
+# * return nothing ( will then be undef or () )
+# * $string_ref content is undef or empty
+# at success:
+# * return string length ($string_ref content length)
+# * $string_ref content filled with message
+
+# API future
+#
+#
+ my ( $mysync, $h1_msg, $h1_fold, $h1_size, $h1_flags, $h1_idate, $h1_fir_ref, $string_ref ) = @_ ;
+
+ # abort when missing a parameter
+ if ( ( ! $mysync ) or ( ! $h1_msg ) or ( ! $h1_fold ) or ( ! defined $h1_size )
+ or ( ! defined $h1_flags) or ( ! defined $h1_idate )
+ or ( ! $h1_fir_ref) or ( ! $string_ref ) )
+ {
+ return ;
+ }
+
+ myprint( debugmemory( $mysync, " at M1" ) ) ;
+
+
+ my $string_ok = $mysync->{imap1}->message_to_file( $string_ref, $h1_msg ) ;
+
+ myprint( debugmemory( $mysync, " at M2" ) ) ;
+
+ my $string_len = length_ref( $string_ref ) ;
+
+
+ unless ( defined $string_ok and $string_len ) {
+ # undef or 0 length
+ my $error = join q{},
+ "- msg $h1_fold/$h1_msg {$string_len} S[$h1_size] F[$h1_flags] I[$h1_idate] could not be fetched: ",
+ $mysync->{imap1}->LastError || q{}, "\n" ;
+ errors_incr( $mysync, $error ) ;
+ $mysync->{ h1_nb_msg_processed } +=1 ;
+ return ;
+ }
+
+ if ( @skipmess ) {
+ my $match = skipmess( ${ $string_ref } ) ;
+ # string undef means the eval regex was bad.
+ if ( not ( defined $match ) ) {
+ myprint(
+ "- msg $h1_fold/$h1_msg {$string_len} S[$h1_size] F[$h1_flags] I[$h1_idate]"
+ . " could not be skipped by --skipmess option, bad regex\n" ) ;
+ return ;
+ }
+ if ( $match ) {
+ my $subject = subject( ${ $string_ref } ) ;
+ myprint( "- msg $h1_fold/$h1_msg {$string_len} S[$h1_size] F[$h1_flags] I[$h1_idate]"
+ . " (Subject:[$subject]) skipped by --skipmess\n" ) ;
+ return ;
+ }
+ }
+
+ if ( @regexmess ) {
+ ${ $string_ref } = regexmess( ${ $string_ref } ) ;
+ # string undef means the eval regex was bad.
+ if ( not ( defined ${ $string_ref } ) ) {
+ myprint(
+ "- msg $h1_fold/$h1_msg {$string_len} S[$h1_size] F[$h1_flags] I[$h1_idate]"
+ . " could not be transformed by --regexmess\n" ) ;
+ return ;
+ }
+ }
+
+ if ( @pipemess ) {
+ ${ $string_ref } = pipemess( ${ $string_ref }, @pipemess ) ;
+ # string undef means something was bad.
+ if ( not ( defined ${ $string_ref } ) ) {
+ myprint(
+ "- msg $h1_fold/$h1_msg {$string_len} S[$h1_size] F[$h1_flags] I[$h1_idate]"
+ . " could not be successfully transformed by --pipemess option\n" ) ;
+ return ;
+ }
+ }
+
+ if ( $mysync->{addheader} and defined $h1_fir_ref->{$h1_msg}->{'NO_HEADER'} ) {
+ my $header = add_header( $h1_msg ) ;
+ $mysync->{ debug } and myprint( "msg $h1_fold/$h1_msg adding custom header [$header]\n" ) ;
+ ${ $string_ref } = $header . "\r\n" . ${ $string_ref } ;
+ }
+
+ if ( ( defined $mysync->{ truncmess } ) and is_an_integer( $mysync->{ truncmess } ) )
+ {
+ ${ $string_ref } = truncmess( ${ $string_ref }, $mysync->{ truncmess } ) ;
+ }
+
+ $string_len = length_ref( $string_ref ) ;
+
+ $debugcontent and myprint(
+ q{=} x $STD_CHAR_PER_LINE, "\n",
+ "F message content begin next line ($string_len characters long)\n",
+ ${ $string_ref },
+ "\nF message content ended on previous line\n", q{=} x $STD_CHAR_PER_LINE, "\n" ) ;
+
+ myprint( debugmemory( $mysync, " at M3" ) ) ;
+
+ return $string_len ;
+}
+
+sub tests_truncmess
+{
+ note( 'Entering tests_truncmess()' ) ;
+
+ is( undef, truncmess( ), 'truncmess: no args => undef' ) ;
+ is( 'abc', truncmess( 'abc' ), 'truncmess: abc => abc' ) ;
+ is( 'ab', truncmess( 'abc', 2 ), 'truncmess: abc 2 => ab' ) ;
+ is( 'abc', truncmess( 'abc', 3 ), 'truncmess: abc 3 => abc' ) ;
+ is( 'abc', truncmess( 'abc', 4 ), 'truncmess: abc 4 => abc' ) ;
+ is( '12345', truncmess( "123456789\n", 5 ), 'truncmess: "123456789\n", 5 => 12345' ) ;
+ is( "123456789\n" x 5000, truncmess( "123456789\n" x 100000, 50000 ), 'truncmess: "123456789\n" x 100000, 50000 => "123456789\n" x 5000' ) ;
+ note( 'Leaving tests_truncmess()' ) ;
+ return ;
+}
+
+sub truncmess
+{
+ my $string = shift ;
+ my $length = shift ;
+
+ if ( not defined $string ) { return ; }
+ if ( not defined $length ) { return $string ; }
+
+ $string = substr $string, 0, $length ;
+ return $string ;
+}
+
+sub tests_message_for_host2
+{
+ note( 'Entering tests_message_for_host2()' ) ;
+
+
+ my ( $mysync, $h1_msg, $h1_fold, $h1_size, $h1_flags, $h1_idate, $h1_fir_ref, $string_ref ) ;
+
+ is( undef, message_for_host2( ), q{message_for_host2: no args} ) ;
+ is( undef, message_for_host2( $mysync, $h1_msg, $h1_fold, $h1_size, $h1_flags, $h1_idate, $h1_fir_ref, $string_ref ), q{message_for_host2: undef args} ) ;
+
+ require_ok( "Test::MockObject" ) ;
+ my $imapT = Test::MockObject->new( ) ;
+ $mysync->{imap1} = $imapT ;
+ my $string ;
+
+ $h1_msg = 1 ;
+ $h1_fold = 'FoldFoo';
+ $h1_size = 9 ;
+ $h1_flags = q{} ;
+ $h1_idate = '10-Jul-2015 09:00:00 +0200' ;
+ $h1_fir_ref = {} ;
+ $string_ref = \$string ;
+ $imapT->mock( 'message_to_file',
+ sub {
+ my ( $imap, $mystring_ref, $msg ) = @_ ;
+ ${$mystring_ref} = 'blablabla' ;
+ return length ${$mystring_ref} ;
+ }
+ ) ;
+ is( 9, message_for_host2( $mysync, $h1_msg, $h1_fold, $h1_size, $h1_flags, $h1_idate, $h1_fir_ref, $string_ref ),
+ q{message_for_host2: msg 1 == "blablabla", length} ) ;
+ is( 'blablabla', $string, q{message_for_host2: msg 1 == "blablabla", value} ) ;
+
+ # so far so good
+ # now the --pipemess stuff
+
+ SKIP: {
+ Readonly my $NB_WIN_tests_message_for_host2 => 0 ;
+ skip( 'Not on MSWin32', $NB_WIN_tests_message_for_host2 ) if ('MSWin32' ne $OSNAME) ;
+ # Windows
+ # "type" command does not accept redirection of STDIN with <
+ # "sort" does
+
+ } ;
+
+ SKIP: {
+ Readonly my $NB_UNX_tests_message_for_host2 => 6 ;
+ skip( 'Not on Unix', $NB_UNX_tests_message_for_host2 ) if ('MSWin32' eq $OSNAME) ;
+ # Unix
+
+ # no change by cat
+ @pipemess = ( 'cat' ) ;
+ is( 9, message_for_host2( $mysync, $h1_msg, $h1_fold, $h1_size, $h1_flags, $h1_idate, $h1_fir_ref, $string_ref ),
+ q{message_for_host2: --pipemess 'cat', length} ) ;
+ is( 'blablabla', $string, q{message_for_host2: --pipemess 'cat', value} ) ;
+
+
+ # failure by false
+ @pipemess = ( 'false' ) ;
+ is( undef, message_for_host2( $mysync, $h1_msg, $h1_fold, $h1_size, $h1_flags, $h1_idate, $h1_fir_ref, $string_ref ),
+ q{message_for_host2: --pipemess 'false', length} ) ;
+ is( undef, $string, q{message_for_host2: --pipemess 'false', value} ) ;
+
+ # failure by true since no output
+ @pipemess = ( 'true' ) ;
+ is( undef, message_for_host2( $mysync, $h1_msg, $h1_fold, $h1_size, $h1_flags, $h1_idate, $h1_fir_ref, $string_ref ),
+ q{message_for_host2: --pipemess 'true', length} ) ;
+ is( undef, $string, q{message_for_host2: --pipemess 'true', value} ) ;
+ }
+
+ note( 'Leaving tests_message_for_host2()' ) ;
+ return ;
+}
+
+sub tests_labels_remove_subfolder1
+{
+ note( 'Entering tests_labels_remove_subfolder1()' ) ;
+ is( undef, labels_remove_subfolder1( ), 'labels_remove_subfolder1: no parameters => undef' ) ;
+ is( 'Blabla', labels_remove_subfolder1( 'Blabla' ), 'labels_remove_subfolder1: one parameter Blabla => Blabla' ) ;
+ is( 'Blan blue', labels_remove_subfolder1( 'Blan blue' ), 'labels_remove_subfolder1: one parameter Blan blue => Blan blue' ) ;
+ is( '\Bla "Blan blan" Blabla', labels_remove_subfolder1( '\Bla "Blan blan" Blabla' ),
+ 'labels_remove_subfolder1: one parameter \Bla "Blan blan" Blabla => \Bla "Blan blan" Blabla' ) ;
+
+ is( 'Bla', labels_remove_subfolder1( 'Subf/Bla', 'Subf' ), 'labels_remove_subfolder1: Subf/Bla Subf => "Bla"' ) ;
+
+
+ is( '"\\\\Bla"', labels_remove_subfolder1( '"\\\\Bla"', 'Subf' ), 'labels_remove_subfolder1: "\\\\Bla" Subf => "\\\\Bla"' ) ;
+
+ is( 'Bla Kii', labels_remove_subfolder1( 'Subf/Bla Subf/Kii', 'Subf' ),
+ 'labels_remove_subfolder1: Subf/Bla Subf/Kii, Subf => "Bla" "Kii"' ) ;
+
+ is( '"\\\\Bla" Kii', labels_remove_subfolder1( '"\\\\Bla" Subf/Kii', 'Subf' ),
+ 'labels_remove_subfolder1: "\\\\Bla" Subf/Kii Subf => "\\\\Bla" Kii' ) ;
+
+ is( '"Blan blan"', labels_remove_subfolder1( '"Subf/Blan blan"', 'Subf' ),
+ 'labels_remove_subfolder1: "Subf/Blan blan" Subf => "Blan blan"' ) ;
+
+ is( '"\\\\Loo" "Blan blan" Kii', labels_remove_subfolder1( '"\\\\Loo" "Subf/Blan blan" Subf/Kii', 'Subf' ),
+ 'labels_remove_subfolder1: "\\\\Loo" "Subf/Blan blan" Subf/Kii + Subf => "\\\\Loo" "Blan blan" Kii' ) ;
+
+ is( '"\\\\Inbox"', labels_remove_subfolder1( 'Subf/INBOX', 'Subf' ),
+ 'labels_remove_subfolder1: Subf/INBOX + Subf => "\\\\Inbox"' ) ;
+
+ is( '"\\\\Loo" "Blan blan" Kii "\\\\Inbox"', labels_remove_subfolder1( '"\\\\Loo" "Subf/Blan blan" Subf/Kii Subf/INBOX', 'Subf' ),
+ 'labels_remove_subfolder1: "\\\\Loo" "Subf/Blan blan" Subf/Kii Subf/INBOX + Subf => "\\\\Loo" "Blan blan" Kii "\\\\Inbox"' ) ;
+
+
+ note( 'Leaving tests_labels_remove_subfolder1()' ) ;
+ return ;
+}
+
+
+
+sub labels_remove_subfolder1
+{
+ my $labels = shift ;
+ my $subfolder1 = shift ;
+
+ if ( not defined $labels ) { return ; }
+ if ( not defined $subfolder1 ) { return $labels ; }
+
+ my @labels = quotewords('\s+', 1, $labels ) ;
+ #myprint( "@labels\n" ) ;
+ my @labels_subfolder2 ;
+
+ foreach my $label ( @labels )
+ {
+ if ( $label =~ m{zzzzzzzzzz} )
+ {
+ # \Seen \Deleted ... stay the same
+ push @labels_subfolder2, $label ;
+ }
+ else
+ {
+ # Remove surrounding quotes if any, to add them again in case of space
+ $label = join( q{}, quotewords('\s+', 0, $label ) ) ;
+ $label =~ s{$subfolder1/?}{} ;
+ if ( 'INBOX' eq $label )
+ {
+ push @labels_subfolder2, q{"\\\\Inbox"} ;
+ }
+ elsif ( $label =~ m{\\} )
+ {
+ push @labels_subfolder2, qq{"\\$label"} ;
+ }
+ elsif ( $label =~ m{ } )
+ {
+ push @labels_subfolder2, qq{"$label"} ;
+ }
+ else
+ {
+ push @labels_subfolder2, $label ;
+ }
+ }
+ }
+
+ my $labels_subfolder2 = join( ' ', sort uniq( @labels_subfolder2 ) ) ;
+
+ return $labels_subfolder2 ;
+}
+
+sub tests_labels_remove_special
+{
+ note( 'Entering tests_labels_remove_special()' ) ;
+
+ is( undef, labels_remove_special( ), 'labels_remove_special: no parameters => undef' ) ;
+ is( q{}, labels_remove_special( q{} ), 'labels_remove_special: empty string => empty string' ) ;
+ is( q{}, labels_remove_special( '"\\\\Inbox"' ), 'labels_remove_special:"\\\\Inbox" => empty string' ) ;
+ is( q{}, labels_remove_special( '"\\\\Inbox" "\\\\Starred"' ), 'labels_remove_special:"\\\\Inbox" "\\\\Starred" => empty string' ) ;
+ is( 'Bar Foo', labels_remove_special( 'Foo Bar' ), 'labels_remove_special:Foo Bar => Bar Foo' ) ;
+ is( 'Bar Foo', labels_remove_special( 'Foo Bar "\\\\Inbox"' ), 'labels_remove_special:Foo Bar "\\\\Inbox" => Bar Foo' ) ;
+ note( 'Leaving tests_labels_remove_special()' ) ;
+ return ;
+}
+
+
+
+
+sub labels_remove_special
+{
+ my $labels = shift ;
+
+ if ( not defined $labels ) { return ; }
+
+ my @labels = quotewords('\s+', 1, $labels ) ;
+ myprint( "labels before remove_non_folded: @labels\n" ) ;
+ my @labels_remove_special ;
+
+ foreach my $label ( @labels )
+ {
+ if ( $label =~ m{^\"\\\\} )
+ {
+ # not kept
+ }
+ else
+ {
+ push @labels_remove_special, $label ;
+ }
+ }
+
+ my $labels_remove_special = join( ' ', sort @labels_remove_special ) ;
+
+ return $labels_remove_special ;
+}
+
+
+sub tests_labels_add_subfolder2
+{
+ note( 'Entering tests_labels_add_subfolder2()' ) ;
+ is( undef, labels_add_subfolder2( ), 'labels_add_subfolder2: no parameters => undef' ) ;
+ is( 'Blabla', labels_add_subfolder2( 'Blabla' ), 'labels_add_subfolder2: one parameter Blabla => Blabla' ) ;
+ is( 'Blan blue', labels_add_subfolder2( 'Blan blue' ), 'labels_add_subfolder2: one parameter Blan blue => Blan blue' ) ;
+ is( '\Bla "Blan blan" Blabla', labels_add_subfolder2( '\Bla "Blan blan" Blabla' ),
+ 'labels_add_subfolder2: one parameter \Bla "Blan blan" Blabla => \Bla "Blan blan" Blabla' ) ;
+
+ is( 'Subf/Bla', labels_add_subfolder2( 'Bla', 'Subf' ), 'labels_add_subfolder2: Bla Subf => "Subf/Bla"' ) ;
+
+
+ is( 'Subf/\Bla', labels_add_subfolder2( '\\\\Bla', 'Subf' ), 'labels_add_subfolder2: \Bla Subf => \Bla' ) ;
+
+ is( 'Subf/Bla Subf/Kii', labels_add_subfolder2( 'Bla Kii', 'Subf' ),
+ 'labels_add_subfolder2: Bla Kii Subf => "Subf/Bla" "Subf/Kii"' ) ;
+
+ is( 'Subf/Kii Subf/\Bla', labels_add_subfolder2( '\\\\Bla Kii', 'Subf' ),
+ 'labels_add_subfolder2: \Bla Kii Subf => \Bla Subf/Kii' ) ;
+
+ is( '"Subf/Blan blan"', labels_add_subfolder2( '"Blan blan"', 'Subf' ),
+ 'labels_add_subfolder2: "Blan blan" Subf => "Subf/Blan blan"' ) ;
+
+ is( '"Subf/Blan blan" Subf/Kii Subf/\Loo', labels_add_subfolder2( '\\\\Loo "Blan blan" Kii', 'Subf' ),
+ 'labels_add_subfolder2: \Loo "Blan blan" Kii + Subf => "Subf/Blan blan" Subf/Kii Subf/\Loo' ) ;
+
+ # "\\Inbox" is special, add to subfolder INBOX also because Gmail will but ...
+ is( '"Subf/\\\\Inbox" Subf/INBOX', labels_add_subfolder2( '"\\\\Inbox"', 'Subf' ),
+ 'labels_add_subfolder2: "\\\\Inbox" Subf => "Subf/\\\\Inbox" Subf/INBOX' ) ;
+
+ # but not with INBOX folder
+ is( '"Subf/\\\\Inbox"', labels_add_subfolder2( '"\\\\Inbox"', 'Subf', 'INBOX' ),
+ 'labels_add_subfolder2: "\\\\Inbox" Subf INBOX => "Subf/\\\\Inbox"' ) ;
+
+ # two times => one time
+ is( '"Subf/\\\\Inbox" Subf/INBOX', labels_add_subfolder2( '"\\\\Inbox" "\\\\Inbox"', 'Subf' ),
+ 'labels_add_subfolder2: "\\\\Inbox" "\\\\Inbox" Subf => "Subf/\\\\Inbox"' ) ;
+
+ is( '"Subf/\\\\Starred"', labels_add_subfolder2( '"\\\\Starred"', 'Subf' ),
+ 'labels_add_subfolder2: "\\\\Starred" Subf => "Subf/\\\\Starred"' ) ;
+
+ note( 'Leaving tests_labels_add_subfolder2()' ) ;
+ return ;
+}
+
+sub labels_add_subfolder2
+{
+ my $labels = shift ;
+ my $subfolder2 = shift ;
+ my $h1_folder = shift || q{} ;
+
+ if ( not defined $labels ) { return ; }
+ if ( not defined $subfolder2 ) { return $labels ; }
+
+ # Isn't it messy?
+ if ( 'INBOX' eq $h1_folder )
+ {
+ $labels .= ' "\\\\Inbox"' ;
+ }
+
+ my @labels = uniq( quotewords('\s+', 1, $labels ) ) ;
+ myprint( "labels before subfolder2: @labels\n" ) ;
+ my @labels_subfolder2 ;
+
+
+ foreach my $label ( @labels )
+ {
+ # Isn't it more messy?
+ if ( ( q{"\\\\Inbox"} eq $label ) and ( 'INBOX' ne $h1_folder ) )
+ {
+ if ( $subfolder2 =~ m{ } )
+ {
+ push @labels_subfolder2, qq{"$subfolder2/INBOX"} ;
+ }
+ else
+ {
+ push @labels_subfolder2, "$subfolder2/INBOX" ;
+ }
+ }
+ if ( $label =~ m{^\"\\\\} )
+ {
+ # \Seen \Deleted ... stay the same
+ #push @labels_subfolder2, $label ;
+ # Remove surrounding quotes if any, to add them again
+ $label = join( q{}, quotewords('\s+', 0, $label ) ) ;
+ push @labels_subfolder2, qq{"$subfolder2/\\$label"} ;
+
+ }
+ else
+ {
+ # Remove surrounding quotes if any, to add them again in case of space
+ $label = join( q{}, quotewords('\s+', 0, $label ) ) ;
+ if ( $label =~ m{ } )
+ {
+ push @labels_subfolder2, qq{"$subfolder2/$label"} ;
+ }
+ else
+ {
+ push @labels_subfolder2, "$subfolder2/$label" ;
+ }
+ }
+ }
+
+ my $labels_subfolder2 = join( ' ', sort @labels_subfolder2 ) ;
+
+ return $labels_subfolder2 ;
+}
+
+sub tests_labels
+{
+ note( 'Entering tests_labels()' ) ;
+
+ is( undef, labels( ), 'labels: no parameters => undef' ) ;
+ is( undef, labels( undef ), 'labels: undef => undef' ) ;
+ require_ok( "Test::MockObject" ) ;
+ my $myimap = Test::MockObject->new( ) ;
+
+ $myimap->mock( 'fetch_hash',
+ sub {
+ return(
+ { '1' => {
+ 'X-GM-LABELS' => '\Seen Blabla'
+ }
+ }
+ ) ;
+ }
+ ) ;
+ $myimap->mock( 'Debug' , sub { } ) ;
+ $myimap->mock( 'Unescape', sub { return Mail::IMAPClient::Unescape( @_ ) } ) ; # real one
+
+ is( undef, labels( $myimap ), 'labels: one parameter => undef' ) ;
+ is( '\Seen Blabla', labels( $myimap, '1' ), 'labels: $mysync UID_1 => \Seen Blabla' ) ;
+
+ note( 'Leaving tests_labels()' ) ;
+ return ;
+}
+
+sub labels
+{
+ my ( $myimap, $uid ) = @ARG ;
+
+ if ( not all_defined( $myimap, $uid ) ) {
+ return ;
+ }
+
+ my $hash = $myimap->fetch_hash( [ $uid ], 'X-GM-LABELS' ) ;
+
+ my $labels = $hash->{ $uid }->{ 'X-GM-LABELS' } ;
+ #$labels = $myimap->Unescape( $labels ) ;
+ return $labels ;
+}
+
+sub tests_synclabels
+{
+ note( 'Entering tests_synclabels()' ) ;
+
+ is( undef, synclabels( ), 'synclabels: no parameters => undef' ) ;
+ is( undef, synclabels( undef ), 'synclabels: undef => undef' ) ;
+ my $mysync ;
+ is( undef, synclabels( $mysync ), 'synclabels: var undef => undef' ) ;
+
+ require_ok( "Test::MockObject" ) ;
+ $mysync = {} ;
+
+ my $myimap1 = Test::MockObject->new( ) ;
+ $myimap1->mock( 'fetch_hash',
+ sub {
+ return(
+ { '1' => {
+ 'X-GM-LABELS' => '\Seen Blabla'
+ }
+ }
+ ) ;
+ }
+ ) ;
+ $myimap1->mock( 'Debug', sub { } ) ;
+ $myimap1->mock( 'Unescape', sub { return Mail::IMAPClient::Unescape( @_ ) } ) ; # real one
+
+ my $myimap2 = Test::MockObject->new( ) ;
+
+ $myimap2->mock( 'store',
+ sub {
+ return 1 ;
+ }
+ ) ;
+
+
+ $mysync->{imap1} = $myimap1 ;
+ $mysync->{imap2} = $myimap2 ;
+
+ is( undef, synclabels( $mysync ), 'synclabels: fresh $mysync => undef' ) ;
+
+ is( undef, synclabels( $mysync, '1' ), 'synclabels: $mysync UID_1 alone => undef' ) ;
+ is( 1, synclabels( $mysync, '1', '2' ), 'synclabels: $mysync UID_1 UID_2 => 1' ) ;
+
+ note( 'Leaving tests_synclabels()' ) ;
+ return ;
+}
+
+
+sub synclabels
+{
+ my( $mysync, $uid1, $uid2 ) = @ARG ;
+
+ if ( not all_defined( $mysync, $uid1, $uid2 ) ) {
+ return ;
+ }
+ my $myimap1 = $mysync->{ 'imap1' } || return ;
+ my $myimap2 = $mysync->{ 'imap2' } || return ;
+
+ $mysync->{debuglabels} and $myimap1->Debug( 1 ) ;
+ my $labels1 = labels( $myimap1, $uid1 ) ;
+ $mysync->{debuglabels} and $myimap1->Debug( 0 ) ;
+ $mysync->{debuglabels} and myprint( "Host1 labels: $labels1\n" ) ;
+
+
+
+ if ( $mysync->{ subfolder1 } and $labels1 )
+ {
+ $labels1 = labels_remove_subfolder1( $labels1, $mysync->{ subfolder1 } ) ;
+ $mysync->{debuglabels} and myprint( "Host1 labels with subfolder1: $labels1\n" ) ;
+ }
+
+ if ( $mysync->{ subfolder2 } and $labels1 )
+ {
+ $labels1 = labels_add_subfolder2( $labels1, $mysync->{ subfolder2 } ) ;
+ $mysync->{debuglabels} and myprint( "Host1 labels with subfolder2: $labels1\n" ) ;
+ }
+
+ my $store ;
+ if ( $labels1 and not $mysync->{ dry } )
+ {
+ $mysync->{ debuglabels } and $myimap2->Debug( 1 ) ;
+ $store = $myimap2->store( $uid2, "X-GM-LABELS ($labels1)" ) ;
+ $mysync->{ debuglabels } and $myimap2->Debug( 0 ) ;
+ }
+ return $store ;
+}
+
+
+sub tests_resynclabels
+{
+ note( 'Entering tests_resynclabels()' ) ;
+
+ is( undef, resynclabels( ), 'resynclabels: no parameters => undef' ) ;
+ is( undef, resynclabels( undef ), 'resynclabels: undef => undef' ) ;
+ my $mysync ;
+ is( undef, resynclabels( $mysync ), 'resynclabels: var undef => undef' ) ;
+
+ my ( $h1_fir_ref, $h2_fir_ref ) ;
+
+ $mysync->{ debuglabels } = 1 ;
+ $h1_fir_ref->{ 11 }->{ 'X-GM-LABELS' } = '\Seen Baa Kii' ;
+ $h2_fir_ref->{ 22 }->{ 'X-GM-LABELS' } = '\Seen Baa Kii' ;
+
+ # labels are equal
+ is( 1, resynclabels( $mysync, 11, 22, $h1_fir_ref, $h2_fir_ref ),
+ 'resynclabels: $mysync UID_1 UID_2 labels are equal => 1' ) ;
+
+ # labels are different
+ $h2_fir_ref->{ 22 }->{ 'X-GM-LABELS' } = '\Seen Zuu' ;
+ require_ok( "Test::MockObject" ) ;
+ my $myimap2 = Test::MockObject->new( ) ;
+ $myimap2->mock( 'store',
+ sub {
+ return 1 ;
+ }
+ ) ;
+ $myimap2->mock( 'Debug', sub { } ) ;
+ $mysync->{imap2} = $myimap2 ;
+
+ is( 1, resynclabels( $mysync, 11, 22, $h1_fir_ref, $h2_fir_ref ),
+ 'resynclabels: $mysync UID_1 UID_2 labels are not equal => store => 1' ) ;
+
+ note( 'Leaving tests_resynclabels()' ) ;
+ return ;
+}
+
+
+
+sub resynclabels
+{
+ my( $mysync, $uid1, $uid2, $h1_fir_ref, $h2_fir_ref, $h1_folder ) = @ARG ;
+
+ if ( not all_defined( $mysync, $uid1, $uid2, $h1_fir_ref, $h2_fir_ref ) ) {
+ return ;
+ }
+
+ my $labels1 = $h1_fir_ref->{ $uid1 }->{ 'X-GM-LABELS' } || q{} ;
+ my $labels2 = $h2_fir_ref->{ $uid2 }->{ 'X-GM-LABELS' } || q{} ;
+
+ if ( $mysync->{ subfolder1 } and $labels1 )
+ {
+ $labels1 = labels_remove_subfolder1( $labels1, $mysync->{ subfolder1 } ) ;
+ }
+
+ if ( $mysync->{ subfolder2 } and $labels1 )
+ {
+ $labels1 = labels_add_subfolder2( $labels1, $mysync->{ subfolder2 }, $h1_folder ) ;
+ $labels2 = labels_remove_special( $labels2 ) ;
+ }
+ $mysync->{ debuglabels } and myprint( "Host1 labels fixed: $labels1\n" ) ;
+ $mysync->{ debuglabels } and myprint( "Host2 labels : $labels2\n" ) ;
+
+ my $store ;
+ if ( $labels1 eq $labels2 )
+ {
+ # no sync needed
+ $mysync->{ debuglabels } and myprint( "Labels are already equal\n" ) ;
+ return 1 ;
+ }
+ elsif ( not $mysync->{ dry } )
+ {
+ # sync needed
+ $mysync->{debuglabels} and $mysync->{imap2}->Debug( 1 ) ;
+ $store = $mysync->{imap2}->store( $uid2, "X-GM-LABELS ($labels1)" ) ;
+ $mysync->{debuglabels} and $mysync->{imap2}->Debug( 0 ) ;
+ }
+
+ return $store ;
+}
+
+sub tests_uniq
+{
+ note( 'Entering tests_uniq()' ) ;
+
+ is( 0, uniq( ), 'uniq: undef => 0' ) ;
+ is_deeply( [ 'one' ], [ uniq( 'one' ) ], 'uniq: one => one' ) ;
+ is_deeply( [ 'one' ], [ uniq( 'one', 'one' ) ], 'uniq: one one => one' ) ;
+ is_deeply( [ 'one', 'two' ], [ uniq( 'one', 'one', 'two', 'one', 'two' ) ], 'uniq: one one two one two => one two' ) ;
+ note( 'Leaving tests_uniq()' ) ;
+ return ;
+}
+
+sub uniq
+{
+ my @list = @ARG ;
+ my %seen = ( ) ;
+ my @uniq = ( ) ;
+ foreach my $item ( @list ) {
+ if ( ! $seen{ $item } ) {
+ $seen{ $item } = 1 ;
+ push( @uniq, $item ) ;
+ }
+ }
+ return @uniq ;
+}
+
+
+sub length_ref
+{
+ my $string_ref = shift ;
+ my $string_len = defined ${ $string_ref } ? length( ${ $string_ref } ) : q{} ; # length or empty string
+ return $string_len ;
+}
+
+sub tests_length_ref
+{
+ note( 'Entering tests_length_ref()' ) ;
+
+ my $notdefined ;
+ is( q{}, length_ref( \$notdefined ), q{length_ref: value not defined} ) ;
+ my $notref ;
+ is( q{}, length_ref( $notref ), q{length_ref: param not a ref} ) ;
+
+ my $lala = 'lala' ;
+ is( 4, length_ref( \$lala ), q{length_ref: lala length == 4} ) ;
+ is( 4, length_ref( \'lili' ), q{length_ref: lili length == 4} ) ;
+
+ note( 'Leaving tests_length_ref()' ) ;
+ return ;
+}
+
+sub date_for_host2
+{
+ my( $h1_msg, $h1_idate ) = @_ ;
+
+ my $h1_date = q{} ;
+
+ if ( $syncinternaldates ) {
+ $h1_date = $h1_idate ;
+ $sync->{ debug } and myprint( "internal date from host1: [$h1_date]\n" ) ;
+ $h1_date = good_date( $h1_date ) ;
+ $sync->{ debug } and myprint( "internal date from host1: [$h1_date] (fixed)\n" ) ;
+ }
+
+ if ( $idatefromheader ) {
+ $h1_date = $sync->{imap1}->get_header( $h1_msg, 'Date' ) ;
+ $sync->{ debug } and myprint( "header date from host1: [$h1_date]\n" ) ;
+ $h1_date = good_date( $h1_date ) ;
+ $sync->{ debug } and myprint( "header date from host1: [$h1_date] (fixed)\n" ) ;
+ }
+
+ return( $h1_date ) ;
+}
+
+sub flags_for_host2
+{
+ my( $h1_flags, $permanentflags2 ) = @_ ;
+ # RFC 2060: This flag can not be altered by any client
+ $h1_flags =~ s@\\Recent\s?@@xgi ;
+ my $h1_flags_re ;
+ if ( @regexflag and defined( $h1_flags_re = flags_regex( $h1_flags ) ) ) {
+ $h1_flags = $h1_flags_re ;
+ }
+ $h1_flags = flagscase( $h1_flags ) if $flagscase ;
+ $h1_flags = flags_filter( $h1_flags, $permanentflags2) if ( $permanentflags2 and $filterflags ) ;
+
+ return( $h1_flags ) ;
+}
+
+sub subject
+{
+ my $string = shift ;
+ my $subject = q{} ;
+
+ my $header = extract_header( $string ) ;
+
+ if( $header =~ m/^Subject:\s*([^\n\r]*)\r?$/msx ) {
+ #myprint( "MMM[$1]\n" ) ;
+ $subject = $1 ;
+ }
+ return( $subject ) ;
+}
+
+sub tests_subject
+{
+ note( 'Entering tests_subject()' ) ;
+
+ ok( q{} eq subject( q{} ), 'subject: null') ;
+ ok( 'toto le hero' eq subject( 'Subject: toto le hero' ), 'subject: toto le hero') ;
+ ok( 'toto le hero' eq subject( 'Subject:toto le hero' ), 'subject: toto le hero blank') ;
+ ok( 'toto le hero' eq subject( "Subject:toto le hero\r\n" ), 'subject: toto le hero\r\n') ;
+
+ my $MESS ;
+ $MESS = <<'EOF';
+From: lalala
+Subject: toto le hero
+Date: zzzzzz
+
+Boogie boogie
+EOF
+ ok( 'toto le hero' eq subject( $MESS ), 'subject: toto le hero 2') ;
+
+ $MESS = <<'EOF';
+Subject: toto le hero
+From: lalala
+Date: zzzzzz
+
+Boogie boogie
+EOF
+ ok( 'toto le hero' eq subject( $MESS ), 'subject: toto le hero 3') ;
+
+
+ $MESS = <<'EOF';
+From: lalala
+Subject: cuicui
+Date: zzzzzz
+
+Subject: toto le hero
+EOF
+ ok( 'cuicui' eq subject( $MESS ), 'subject: cuicui') ;
+
+ $MESS = <<'EOF';
+From: lalala
+Date: zzzzzz
+
+Subject: toto le hero
+EOF
+ ok( q{} eq subject( $MESS ), 'subject: null but body could') ;
+
+ note( 'Leaving tests_subject()' ) ;
+ return ;
+}
+
+
+# GlobVar
+# $max_msg_size_in_bytes
+# $h2_uidguess
+# ...
+#
+#
+sub append_message_on_host2
+{
+ my( $mysync, $string_ref, $h1_fold, $h1_msg, $string_len, $h2_fold, $h1_size, $h1_flags, $h1_date, $cache_dir ) = @_ ;
+ myprint( debugmemory( $mysync, " at A1" ) ) ;
+
+ my $new_id ;
+ if ( ! $mysync->{dry} ) {
+ $max_msg_size_in_bytes = max( $string_len, $max_msg_size_in_bytes ) ;
+ $new_id = $mysync->{imap2}->append_string( $h2_fold, ${ $string_ref }, $h1_flags, $h1_date ) ;
+ myprint( debugmemory( $mysync, " at A2" ) ) ;
+ if ( ! $new_id){
+ my $subject = subject( ${ $string_ref } ) ;
+ my $error_imap = $mysync->{imap2}->LastError || q{} ;
+ my $error = "- msg $h1_fold/$h1_msg {$string_len} could not append ( Subject:[$subject], Date:[$h1_date], Size:[$h1_size], Flags:[$h1_flags] ) to folder $h2_fold: $error_imap\n" ;
+ errors_incr( $mysync, $error ) ;
+ $mysync->{ h1_nb_msg_processed } +=1 ;
+ return ;
+ }
+ else{
+ # good
+ # $new_id is an id if the IMAP server has the
+ # UIDPLUS capability else just a ref
+ if ( $new_id !~ m{^\d+$}x ) {
+ $new_id = lastuid( $mysync->{imap2}, $h2_fold, $h2_uidguess ) ;
+ }
+ if ( $mysync->{ synclabels } ) { synclabels( $mysync, $h1_msg, $new_id ) }
+ $h2_uidguess += 1 ;
+ $mysync->{ total_bytes_transferred } += $string_len ;
+ $mysync->{ nb_msg_transferred } += 1 ;
+ $mysync->{ h1_nb_msg_processed } +=1 ;
+
+ my $time_spent = timesince( $mysync->{begin_transfer_time} ) ;
+ my $rate = bytes_display_string( $mysync->{total_bytes_transferred} / $time_spent ) ;
+ my $eta = eta( $mysync ) ;
+ my $amount_transferred = bytes_display_string( $mysync->{total_bytes_transferred} ) ;
+ myprintf( "msg %s/%-19s copied to %s/%-10s %.2f msgs/s %s/s %s copied %s\n",
+ $h1_fold, "$h1_msg {$string_len}", $h2_fold, $new_id, $mysync->{nb_msg_transferred}/$time_spent, $rate,
+ $amount_transferred,
+ $eta );
+ sleep_if_needed( $mysync ) ;
+ if ( $usecache and $cacheaftercopy and $new_id =~ m{^\d+$}x ) {
+ $debugcache and myprint( "touch $cache_dir/${h1_msg}_$new_id\n" ) ;
+ touch( "$cache_dir/${h1_msg}_$new_id" )
+ or croak( "Couldn't touch $cache_dir/${h1_msg}_$new_id" ) ;
+ }
+ if ( $mysync->{ delete1 } ) {
+ delete_message_on_host1( $mysync, $h1_fold, $mysync->{ expungeaftereach }, $h1_msg ) ;
+ }
+ #myprint( "PRESS ENTER" ) and my $a = <> ;
+
+ return( $new_id ) ;
+ }
+ }
+ else{
+ $nb_msg_skipped_dry_mode += 1 ;
+ $mysync->{ h1_nb_msg_processed } += 1 ;
+ }
+
+ return ;
+}
+
+
+sub tests_sleep_if_needed
+{
+ note( 'Entering tests_sleep_if_needed()' ) ;
+
+ is( undef, sleep_if_needed( ), 'sleep_if_needed: no args => undef' ) ;
+ my $mysync ;
+ is( undef, sleep_if_needed( $mysync ), 'sleep_if_needed: arg undef => undef' ) ;
+
+ $mysync->{maxbytespersecond} = 1000 ;
+ is( 0, sleep_if_needed( $mysync ), 'sleep_if_needed: maxbytespersecond only => no sleep => 0' ) ;
+ $mysync->{begin_transfer_time} = time ; # now
+ is( 0, sleep_if_needed( $mysync ), 'sleep_if_needed: begin_transfer_time now => no sleep => 0' ) ;
+ $mysync->{begin_transfer_time} = time - 2 ; # 2 s before
+ is( 0, sleep_if_needed( $mysync ), 'sleep_if_needed: total_bytes_transferred == 0 => no sleep => 0' ) ;
+
+ $mysync->{total_bytes_transferred} = 2200 ;
+ $mysync->{begin_transfer_time} = time - 2 ; # 2 s before
+ is( '0.20', sleep_if_needed( $mysync ), 'sleep_if_needed: total_bytes_transferred == 2200 since 2s => sleep 0.2s' ) ;
+ is( '0', sleep_if_needed( $mysync ), 'sleep_if_needed: total_bytes_transferred == 2200 since 2+2 == 4s => no sleep' ) ;
+
+ $mysync->{maxsleep} = 0.1 ;
+ $mysync->{begin_transfer_time} = time - 2 ; # 2 s before again
+ is( '0.10', sleep_if_needed( $mysync ), 'sleep_if_needed: total_bytes_transferred == 4000 since 2s but maxsleep 0.1s => sleep 0.1s' ) ;
+
+ $mysync->{maxbytesafter} = 4000 ;
+ $mysync->{begin_transfer_time} = time - 2 ; # 2 s before again
+ is( 0, sleep_if_needed( $mysync ), 'sleep_if_needed: maxbytesafter == total_bytes_transferred => no sleep => 0' ) ;
+
+ note( 'Leaving tests_sleep_if_needed()' ) ;
+ return ;
+}
+
+
+sub sleep_if_needed
+{
+ my( $mysync ) = shift ;
+
+ if ( ! $mysync ) {
+ return ;
+ }
+ # No need to go further if there is no limit set
+ if ( not ( $mysync->{maxmessagespersecond}
+ or $mysync->{maxbytespersecond} )
+ ) {
+ return ;
+ }
+
+ $mysync->{maxsleep} = defined $mysync->{maxsleep} ? $mysync->{maxsleep} : $MAX_SLEEP ;
+ # Must be positive
+ $mysync->{maxsleep} = max( 0, $mysync->{maxsleep} ) ;
+
+ my $time_spent = timesince( $mysync->{begin_transfer_time} ) ;
+ my $sleep_max_messages = sleep_max_messages( $mysync->{nb_msg_transferred}, $time_spent, $mysync->{maxmessagespersecond} ) ;
+
+ my $maxbytesafter = $mysync->{maxbytesafter} || 0 ;
+ my $total_bytes_transferred = $mysync->{total_bytes_transferred} || 0 ;
+ my $total_bytes_to_consider = $total_bytes_transferred - $maxbytesafter ;
+
+ #myprint( "maxbytesafter:$maxbytesafter\n" ) ;
+ #myprint( "total_bytes_to_consider:$total_bytes_to_consider\n" ) ;
+
+ my $sleep_max_bytes = sleep_max_bytes( $total_bytes_to_consider, $time_spent, $mysync->{maxbytespersecond} ) ;
+ my $sleep_max = min( $mysync->{maxsleep}, max( $sleep_max_messages, $sleep_max_bytes ) ) ;
+ $sleep_max = mysprintf( "%.2f", $sleep_max ) ; # round with 2 decimals.
+ if ( $sleep_max > 0 ) {
+ myprint( "sleeping $sleep_max s\n" ) ;
+ sleep $sleep_max ;
+ # Slept
+ return $sleep_max ;
+ }
+ # No sleep
+ return 0 ;
+}
+
+sub sleep_max_messages
+{
+ # how long we have to sleep to go under max_messages_per_second
+ my( $nb_msg_transferred, $time_spent, $maxmessagespersecond ) = @_ ;
+ if ( ( not defined $maxmessagespersecond ) or $maxmessagespersecond <= 0 ) { return( 0 ) } ;
+ my $sleep = ( $nb_msg_transferred / $maxmessagespersecond ) - $time_spent ;
+ # the sleep must be positive
+ return( max( 0, $sleep ) ) ;
+}
+
+
+sub tests_sleep_max_messages
+{
+ note( 'Entering tests_sleep_max_messages()' ) ;
+
+ ok( 0 == sleep_max_messages( 4, 2, undef ), 'sleep_max_messages: maxmessagespersecond = undef') ;
+ ok( 0 == sleep_max_messages( 4, 2, 0 ), 'sleep_max_messages: maxmessagespersecond = 0') ;
+ ok( 0 == sleep_max_messages( 4, 2, $MINUS_ONE ), 'sleep_max_messages: maxmessagespersecond = -1') ;
+ ok( 0 == sleep_max_messages( 4, 2, 2 ), 'sleep_max_messages: maxmessagespersecond = 2 max reached') ;
+ ok( 2 == sleep_max_messages( 8, 2, 2 ), 'sleep_max_messages: maxmessagespersecond = 2 max over') ;
+ ok( 0 == sleep_max_messages( 2, 2, 2 ), 'sleep_max_messages: maxmessagespersecond = 2 max not reached') ;
+
+ note( 'Leaving tests_sleep_max_messages()' ) ;
+ return ;
+}
+
+
+sub sleep_max_bytes
+{
+ # how long we have to sleep to go under max_bytes_per_second
+ my( $total_bytes_to_consider, $time_spent, $maxbytespersecond ) = @_ ;
+ $total_bytes_to_consider ||= 0 ;
+ $time_spent ||= 0 ;
+
+ if ( ( not defined $maxbytespersecond ) or $maxbytespersecond <= 0 ) { return( 0 ) } ;
+ #myprint( "total_bytes_to_consider:$total_bytes_to_consider\n" ) ;
+ my $sleep = ( $total_bytes_to_consider / $maxbytespersecond ) - $time_spent ;
+ # the sleep must be positive
+ return( max( 0, $sleep ) ) ;
+}
+
+
+sub tests_sleep_max_bytes
+{
+ note( 'Entering tests_sleep_max_bytes()' ) ;
+
+ ok( 0 == sleep_max_bytes( 4000, 2, undef ), 'sleep_max_bytes: maxbytespersecond == undef => sleep 0' ) ;
+ ok( 0 == sleep_max_bytes( 4000, 2, 0 ), 'sleep_max_bytes: maxbytespersecond = 0 => sleep 0') ;
+ ok( 0 == sleep_max_bytes( 4000, 2, $MINUS_ONE ), 'sleep_max_bytes: maxbytespersecond = -1 => sleep 0') ;
+ ok( 0 == sleep_max_bytes( 4000, 2, 2000 ), 'sleep_max_bytes: maxbytespersecond = 2k max reached sharp => sleep 0') ;
+ ok( 2 == sleep_max_bytes( 8000, 2, 2000 ), 'sleep_max_bytes: maxbytespersecond = 2k max over => sleep a little') ;
+ ok( 0 == sleep_max_bytes( -8000, 2, 2000 ), 'sleep_max_bytes: maxbytespersecond = 2k max not reached => sleep 0') ;
+ ok( 0 == sleep_max_bytes( 2000, 2, 2000 ), 'sleep_max_bytes: maxbytespersecond = 2k max not reached => sleep 0') ;
+ ok( 0 == sleep_max_bytes( -2000, 2, 1000 ), 'sleep_max_bytes: maxbytespersecond = 1k max not reached => sleep 0') ;
+
+ note( 'Leaving tests_sleep_max_bytes()' ) ;
+ return ;
+}
+
+
+sub delete_message_on_host1
+{
+ my( $mysync, $h1_fold, $expunge, @h1_msg ) = @_ ;
+ if ( ! $mysync->{ delete1 } ) { return ; }
+ if ( ! @h1_msg ) { return ; }
+ delete_messages_on_any(
+ $mysync,
+ $mysync->{imap1},
+ "Host1: $h1_fold",
+ $expunge,
+ $split1,
+ @h1_msg ) ;
+ return ;
+}
+
+sub tests_operators_and_exclam_precedence
+{
+ note( 'Entering tests_operators_and_exclam_precedence()' ) ;
+
+ is( 1, ! 0, 'tests_operators_and_exclam_precedence: ! 0 => 1' ) ;
+ is( "", ! 1, 'tests_operators_and_exclam_precedence: ! 1 => ""' ) ;
+ is( 1, not( 0 ), 'tests_operators_and_exclam_precedence: not( 0 ) => 1' ) ;
+ is( "", not( 1 ), 'tests_operators_and_exclam_precedence: not( 1 ) => ""' ) ;
+
+ # I wrote those tests to avoid perlcrit "Mixed high and low-precedence booleans"
+ # and change sub delete_messages_on_any() but got 4 more warnings... So now commented.
+
+ #is( 0, ( ! 0 and 0 ), 'tests_operators_and_exclam_precedence: ! 0 and 0 ) => 0' ) ;
+ #is( 1, ( ! 0 and 1 ), 'tests_operators_and_exclam_precedence: ! 0 and 1 ) => 1' ) ;
+ #is( "", ( ! 1 and 0 ), 'tests_operators_and_exclam_precedence: ! 1 and 0 ) => ""' ) ;
+ #is( "", ( ! 1 and 1 ), 'tests_operators_and_exclam_precedence: ! 1 and 1 ) => ""' ) ;
+
+ is( 0, ( ! 0 && 0 ), 'tests_operators_and_exclam_precedence: ! 0 && 0 ) => 0' ) ;
+ is( 1, ( ! 0 && 1 ), 'tests_operators_and_exclam_precedence: ! 0 && 1 ) => 1' ) ;
+ is( "", ( ! 1 && 0 ), 'tests_operators_and_exclam_precedence: ! 1 && 0 ) => ""' ) ;
+ is( "", ( ! 1 && 1 ), 'tests_operators_and_exclam_precedence: ! 1 && 1 ) => ""' ) ;
+
+ is( 2, ( ! 0 && 2 ), 'tests_operators_and_exclam_precedence: ! 0 && 2 ) => 1' ) ;
+
+ note( 'Leaving tests_operators_and_exclam_precedence()' ) ;
+ return ;
+}
+
+sub delete_messages_on_any
+{
+ my( $mysync, $imap, $hostX_folder, $expunge, $split, @messages ) = @_ ;
+ my $expunge_message = q{} ;
+
+ my $dry_message = $mysync->{ dry_message } ;
+ $expunge_message = 'and expunged' if ( $expunge ) ;
+ # "Host1: msg "
+
+ $imap->Debug( 1 ) ;
+
+ while ( my @messages_part = splice @messages, 0, $split )
+ {
+ foreach my $message ( @messages_part )
+ {
+ myprint( "$hostX_folder/$message marking deleted $expunge_message $dry_message\n" ) ;
+ }
+ if ( ! $mysync->{dry} && @messages_part )
+ {
+ my $nb_deleted = $imap->delete_message( $imap->Range( @messages_part ) ) ;
+ if ( defined $nb_deleted )
+ {
+ # $nb_deleted is not accurate
+ $mysync->{ h1_nb_msg_deleted } += scalar @messages_part ;
+ }
+ else
+ {
+ my $error_imap = $imap->LastError || q{} ;
+ my $error = join( q{}, "$hostX_folder folder, could not delete ",
+ scalar @messages_part, ' messages: ', $error_imap, "\n" ) ;
+ errors_incr( $mysync, $error ) ;
+ }
+ }
+ }
+
+ if ( $expunge ) {
+ uidexpunge_or_expunge( $mysync, $imap, @messages ) ;
+ }
+
+ $imap->Debug( 0 ) ;
+
+ return ;
+}
+
+
+sub tests_uidexpunge_or_expunge
+{
+ note( 'Entering tests_uidexpunge_or_expunge()' ) ;
+
+
+ is( undef, uidexpunge_or_expunge( ), 'uidexpunge_or_expunge: no args => undef' ) ;
+ my $mysync ;
+ is( undef, uidexpunge_or_expunge( $mysync ), 'uidexpunge_or_expunge: undef args => undef' ) ;
+ $mysync = {} ;
+ is( undef, uidexpunge_or_expunge( $mysync ), 'uidexpunge_or_expunge: arg empty => undef' ) ;
+ my $imap ;
+ is( undef, uidexpunge_or_expunge( $mysync, $imap ), 'uidexpunge_or_expunge: undef Mail-IMAPClient instance => undef' ) ;
+
+ require_ok( "Test::MockObject" ) ;
+ $imap = Test::MockObject->new( ) ;
+ is( undef, uidexpunge_or_expunge( $mysync, $imap ), 'uidexpunge_or_expunge: no message (1) to uidexpunge => undef' ) ;
+
+ my @messages = ( ) ;
+ is( undef, uidexpunge_or_expunge( $mysync, $imap, @messages ), 'uidexpunge_or_expunge: no message (2) to uidexpunge => undef' ) ;
+
+ @messages = ( '2', '1' ) ;
+ $imap->mock( 'uidexpunge', sub { return ; } ) ;
+ $imap->mock( 'expunge', sub { return ; } ) ;
+ is( undef, uidexpunge_or_expunge( $mysync, $imap, @messages ), 'uidexpunge_or_expunge: uidexpunge failure => expunge failure => undef' ) ;
+
+ $imap->mock( 'expunge', sub { return 1 ; } ) ;
+ is( 1, uidexpunge_or_expunge( $mysync, $imap, @messages ), 'uidexpunge_or_expunge: uidexpunge failure => expunge ok => 1' ) ;
+
+ $imap->mock( 'uidexpunge', sub { return 1 ; } ) ;
+ is( 1, uidexpunge_or_expunge( $mysync, $imap, @messages ), 'uidexpunge_or_expunge: messages to uidexpunge ok => 1' ) ;
+
+ note( 'Leaving tests_uidexpunge_or_expunge()' ) ;
+ return ;
+}
+
+sub uidexpunge_or_expunge
+{
+ my $mysync = shift ;
+ my $imap = shift ;
+ my @messages = @ARG ;
+
+ if ( ! $imap ) { return ; } ;
+ if ( ! @messages ) { return ; } ;
+
+ # Doing uidexpunge
+ my @uidexpunge_result = $imap->uidexpunge( @messages ) ;
+ if ( @uidexpunge_result ) {
+ return 1 ;
+ }
+ # Failure so doing expunge
+ my $expunge_result = $imap->expunge( ) ;
+ if ( $expunge_result ) {
+ return 1 ;
+ }
+ # bad trip
+ return ;
+}
+
+sub eta_print
+{
+ my $mysync = shift ;
+ if ( my $eta = eta( $mysync ) )
+ {
+ myprint( "$eta\n" ) ;
+ }
+ return ;
+}
+
+sub tests_eta
+{
+ note( 'Entering tests_eta()' ) ;
+
+ is( q{}, eta( ), 'eta: no args => ""' ) ;
+ is( q{}, eta( undef ), 'eta: undef => ""' ) ;
+ my $mysync = {} ;
+ # No foldersizes
+ is( q{}, eta( $mysync ), 'eta: No foldersizes => ""' ) ;
+
+ $mysync->{ foldersizes } = 1 ;
+
+ $mysync->{ begin_transfer_time } = time ; # Now
+ $mysync->{ h1_nb_msg_processed } = 0 ;
+
+ is( "ETA: " . localtime( time ) . " 0 s 0/0 msgs left",
+ eta( $mysync ),
+ 'eta: no args => ETA: "Now" 0 s 0/0 msgs left' ) ;
+
+ $mysync->{ h1_nb_msg_processed } = 1 ;
+ $mysync->{ h1_nb_msg_start } = 2 ;
+ is( "ETA: " . localtime( time ) . " 0 s 1/2 msgs left",
+ eta( $mysync ),
+ 'eta: 1, 1, 2 => ETA: "Now" 0 s 1/2 msgs left' ) ;
+
+ note( 'Leaving tests_eta()' ) ;
+ return ;
+}
+
+
+sub eta
+{
+ my( $mysync ) = shift ;
+
+ if ( ! $mysync )
+ {
+ return q{} ;
+ }
+
+ return( q{} ) if not $mysync->{ foldersizes } ;
+
+ my $h1_nb_msg_start = $mysync->{ h1_nb_msg_start } ;
+ my $h1_nb_processed = $mysync->{ h1_nb_msg_processed } ;
+ my $nb_msg_transferred = ( $mysync->{dry} ) ? $mysync->{ h1_nb_msg_processed } : $mysync->{ nb_msg_transferred } ;
+ my $time_spent = timesince( $mysync->{ begin_transfer_time } ) ;
+ $h1_nb_processed ||= 0 ;
+ $h1_nb_msg_start ||= 0 ;
+ $time_spent ||= 0 ;
+
+ my $time_remaining = time_remaining( $time_spent, $h1_nb_processed, $h1_nb_msg_start, $nb_msg_transferred ) ;
+ $mysync->{ debug } and myprint( "time_spent: $time_spent time_remaining: $time_remaining\n" ) ;
+ my $nb_msg_remaining = $h1_nb_msg_start - $h1_nb_processed ;
+ my $eta_date = localtime( time + $time_remaining ) ;
+ return( mysprintf( 'ETA: %s %1.0f s %s/%s msgs left',
+ $eta_date, $time_remaining, $nb_msg_remaining, $h1_nb_msg_start ) ) ;
+}
+
+
+
+
+sub time_remaining
+{
+
+ my( $my_time_spent, $h1_nb_processed, $h1_nb_msg_start, $nb_transferred ) = @_ ;
+
+ $nb_transferred ||= 1 ; # At least one is done (no division by zero)
+ $h1_nb_processed ||= 0 ;
+ $h1_nb_msg_start ||= $h1_nb_processed ;
+ $my_time_spent ||= 0 ;
+
+ my $time_remaining = ( $my_time_spent / $nb_transferred ) * ( $h1_nb_msg_start - $h1_nb_processed ) ;
+ return( $time_remaining ) ;
+}
+
+
+sub tests_time_remaining
+{
+ note( 'Entering tests_time_remaining()' ) ;
+
+ # time_spent, nb_processed, nb_to_do_total, nb_transferred
+ is( 0, time_remaining( ), 'time_remaining: no args -> 0' ) ;
+ is( 0, time_remaining( 0, 0, 0, 0 ), 'time_remaining: 0, 0, 0, 0 -> 0' ) ;
+ is( 1, time_remaining( 1, 1, 2, 1 ), 'time_remaining: 1, 1, 2, 1 -> 1' ) ;
+ is( 1, time_remaining( 9, 9, 10, 9 ), 'time_remaining: 9, 9, 10, 9 -> 1' ) ;
+ is( 9, time_remaining( 1, 1, 10, 1 ), 'time_remaining: 1, 1, 10, 1 -> 9' ) ;
+ is( 5, time_remaining( 5, 5, 10, 5 ), 'time_remaining: 5, 5, 10, 5 -> 5' ) ;
+ is( 25, time_remaining( 5, 5, 10, 0 ), 'time_remaining: 5, 5, 10, 0 -> ( 5 / 1 ) * ( 10 - 5) = 25' ) ;
+ is( 25, time_remaining( 5, 5, 10, 1 ), 'time_remaining: 5, 5, 10, 1 -> ( 5 / 1 ) * ( 10 - 5) = 25' ) ;
+
+ note( 'Leaving tests_time_remaining()' ) ;
+ return ;
+}
+
+
+sub cache_map
+{
+ my ( $cache_files_ref, $h1_msgs_ref, $h2_msgs_ref ) = @_;
+ my ( %map1_2, %map2_1, %done2 ) ;
+
+ my $h1_msgs_hash_ref = { } ;
+ my $h2_msgs_hash_ref = { } ;
+
+ @{ $h1_msgs_hash_ref }{ @{ $h1_msgs_ref } } = ( ) ;
+ @{ $h2_msgs_hash_ref }{ @{ $h2_msgs_ref } } = ( ) ;
+
+ foreach my $file ( sort @{ $cache_files_ref } ) {
+ $debugcache and myprint( "C12: $file\n" ) ;
+ ( $uid1, $uid2 ) = match_a_cache_file( $file ) ;
+
+ if ( exists( $h1_msgs_hash_ref->{ defined $uid1 ? $uid1 : q{} } )
+ and exists( $h2_msgs_hash_ref->{ defined $uid2 ? $uid2 : q{} } ) ) {
+ # keep only the greatest uid2
+ # 130_2301 and
+ # 130_231 => keep only 130 -> 2301
+
+ # keep only the greatest uid1
+ # 1601_260 and
+ # 161_260 => keep only 1601 -> 260
+ my $max_uid2 = max( $uid2, $map1_2{ $uid1 } || $MINUS_ONE ) ;
+ if ( exists $done2{ $max_uid2 } ) {
+ if ( $done2{ $max_uid2 } < $uid1 ) {
+ $map1_2{ $uid1 } = $max_uid2 ;
+ delete $map1_2{ $done2{ $max_uid2 } } ;
+ $done2{ $max_uid2 } = $uid1 ;
+ }
+ }else{
+ $map1_2{ $uid1 } = $max_uid2 ;
+ $done2{ $max_uid2 } = $uid1 ;
+ }
+ };
+
+ }
+ %map2_1 = reverse %map1_2 ;
+ return( \%map1_2, \%map2_1) ;
+}
+
+sub tests_cache_map
+{
+ note( 'Entering tests_cache_map()' ) ;
+
+ #$debugcache = 1 ;
+ my @cache_files = qw (
+ 100_200
+ 101_201
+ 120_220
+ 142_242
+ 143_243
+ 177_277
+ 177_278
+ 177_279
+ 155_255
+ 180_280
+ 181_280
+ 182_280
+ 130_231
+ 130_2301
+ 161_260
+ 1601_260
+ ) ;
+
+ my $msgs_1 = [120, 142, 143, 144, 161, 1601, 177, 182, 130 ];
+ my $msgs_2 = [ 242, 243, 260, 299, 377, 279, 255, 280, 231, 2301 ];
+
+ my( $c12, $c21 ) ;
+ ok( ( $c12, $c21 ) = cache_map( \@cache_files, $msgs_1, $msgs_2 ), 'cache_map: 02' );
+ my $a1 = [ sort { $a <=> $b } keys %{ $c12 } ] ;
+ my $a2 = [ sort { $a <=> $b } keys %{ $c21 } ] ;
+ ok( 0 == compare_lists( [ 130, 142, 143, 177, 182, 1601 ], $a1 ), 'cache_map: 03' );
+ ok( 0 == compare_lists( [ 242, 243, 260, 279, 280, 2301 ], $a2 ), 'cache_map: 04' );
+ ok( ! $c12->{161}, 'cache_map: ! 161 -> 260' );
+ ok( 260 == $c12->{1601}, 'cache_map: 1601 -> 260' );
+ ok( 2301 == $c12->{130}, 'cache_map: 130 -> 2301' );
+ #myprint( $c12->{1601}, "\n" ) ;
+
+ note( 'Leaving tests_cache_map()' ) ;
+ return ;
+
+}
+
+sub cache_dir_fix
+{
+ my $cache_dir = shift ;
+ $cache_dir =~ s/([;<>\*\|`&\$!#\(\)\[\]\{\}:'"\\])/\\$1/xg ;
+ #myprint( "cache_dir_fix: $cache_dir\n" ) ;
+ return( $cache_dir ) ;
+}
+
+sub tests_cache_dir_fix
+{
+ note( 'Entering tests_cache_dir_fix()' ) ;
+
+ ok( 'lalala' eq cache_dir_fix('lalala'), 'cache_dir_fix: lalala -> lalala' );
+ ok( 'ii\\\\ii' eq cache_dir_fix('ii\ii'), 'cache_dir_fix: ii\ii -> ii\\\\ii' );
+ ok( 'ii@ii' eq cache_dir_fix('ii@ii'), 'cache_dir_fix: ii@ii -> ii@ii' );
+ ok( 'ii@ii\\:ii' eq cache_dir_fix('ii@ii:ii'), 'cache_dir_fix: ii@ii:ii -> ii@ii\\:ii' );
+ ok( 'i\\\\i\\\\ii' eq cache_dir_fix('i\i\ii'), 'cache_dir_fix: i\i\ii -> i\\\\i\\\\ii' );
+ ok( 'i\\\\ii' eq cache_dir_fix('i\\ii'), 'cache_dir_fix: i\\ii -> i\\\\\\\\ii' );
+ ok( '\\\\ ' eq cache_dir_fix('\\ '), 'cache_dir_fix: \\ -> \\\\\ ' );
+ ok( '\\\\ ' eq cache_dir_fix('\ '), 'cache_dir_fix: \ -> \\\\\ ' );
+ ok( '\[bracket\]' eq cache_dir_fix('[bracket]'), 'cache_dir_fix: [bracket] -> \[bracket\]' );
+
+ note( 'Leaving tests_cache_dir_fix()' ) ;
+ return ;
+}
+
+sub cache_dir_fix_win
+{
+ my $cache_dir = shift ;
+ $cache_dir =~ s/(\[|\])/[$1]/xg ;
+ #myprint( "cache_dir_fix_win: $cache_dir\n" ) ;
+ return( $cache_dir ) ;
+}
+
+sub tests_cache_dir_fix_win
+{
+ note( 'Entering tests_cache_dir_fix_win()' ) ;
+
+ ok( 'lalala' eq cache_dir_fix_win('lalala'), 'cache_dir_fix_win: lalala -> lalala' );
+ ok( '[[]bracket[]]' eq cache_dir_fix_win('[bracket]'), 'cache_dir_fix_win: [bracket] -> [[]bracket[]]' );
+
+ note( 'Leaving tests_cache_dir_fix_win()' ) ;
+ return ;
+}
+
+
+
+
+sub get_cache
+{
+ my ( $cache_dir, $h1_msgs_ref, $h2_msgs_ref, $h1_msgs_all_hash_ref, $h2_msgs_all_hash_ref ) = @_;
+
+ $debugcache and myprint( "Entering get_cache\n" ) ;
+
+ -d $cache_dir or return( undef ); # exit if cache directory doesn't exist
+ $debugcache and myprint( "cache_dir : $cache_dir\n" ) ;
+
+
+ if ( 'MSWin32' ne $OSNAME ) {
+ $cache_dir = cache_dir_fix( $cache_dir ) ;
+ }else{
+ $cache_dir = cache_dir_fix_win( $cache_dir ) ;
+ }
+
+ $debugcache and myprint( "cache_dir_fix: $cache_dir\n" ) ;
+
+ my @cache_files = bsd_glob( "$cache_dir/*" ) ;
+ #$debugcache and myprint( "cache_files: [@cache_files]\n" ) ;
+
+ $debugcache and myprint( 'cache_files: ', scalar @cache_files , " files found\n" ) ;
+
+ my( $cache_1_2_ref, $cache_2_1_ref )
+ = cache_map( \@cache_files, $h1_msgs_ref, $h2_msgs_ref ) ;
+
+ clean_cache( \@cache_files, $cache_1_2_ref, $h1_msgs_all_hash_ref, $h2_msgs_all_hash_ref ) ;
+
+ $debugcache and myprint( "Exiting get_cache\n" ) ;
+ return( $cache_1_2_ref, $cache_2_1_ref ) ;
+}
+
+
+sub tests_get_cache
+{
+ note( 'Entering tests_get_cache()' ) ;
+
+ ok( not( get_cache('/cache_no_exist') ), 'get_cache: /cache_no_exist' );
+ ok( ( not -d 'W/tmp/cache/F1/F2' or rmtree( 'W/tmp/cache/F1/F2' ) ), 'get_cache: rmtree W/tmp/cache/F1/F2' ) ;
+ ok( mkpath( 'W/tmp/cache/F1/F2' ), 'get_cache: mkpath W/tmp/cache/F1/F2' ) ;
+
+ my @test_files_cache = ( qw(
+ W/tmp/cache/F1/F2/100_200
+ W/tmp/cache/F1/F2/101_201
+ W/tmp/cache/F1/F2/120_220
+ W/tmp/cache/F1/F2/142_242
+ W/tmp/cache/F1/F2/143_243
+ W/tmp/cache/F1/F2/177_277
+ W/tmp/cache/F1/F2/177_377
+ W/tmp/cache/F1/F2/177_777
+ W/tmp/cache/F1/F2/155_255
+ ) ) ;
+ ok( touch( @test_files_cache ), 'get_cache: touch W/tmp/cache/F1/F2/...' ) ;
+
+
+ # on cache: 100_200 101_201 142_242 143_243 177_277 177_377 177_777 155_255
+ # on live:
+ my $msgs_1 = [120, 142, 143, 144, 177 ];
+ my $msgs_2 = [ 242, 243, 299, 377, 777, 255 ];
+
+ my $msgs_all_1 = { 120 => 0, 142 => 0, 143 => 0, 144 => 0, 177 => 0 } ;
+ my $msgs_all_2 = { 242 => 0, 243 => 0, 299 => 0, 377 => 0, 777 => 0, 255 => 0 } ;
+
+ my( $c12, $c21 ) ;
+ ok( ( $c12, $c21 ) = get_cache( 'W/tmp/cache/F1/F2', $msgs_1, $msgs_2, $msgs_all_1, $msgs_all_2 ), 'get_cache: 02' );
+ my $a1 = [ sort { $a <=> $b } keys %{ $c12 } ] ;
+ my $a2 = [ sort { $a <=> $b } keys %{ $c21 } ] ;
+ ok( 0 == compare_lists( [ 142, 143, 177 ], $a1 ), 'get_cache: 03' );
+ ok( 0 == compare_lists( [ 242, 243, 777 ], $a2 ), 'get_cache: 04' );
+ ok( -f 'W/tmp/cache/F1/F2/142_242', 'get_cache: file kept 142_242');
+ ok( -f 'W/tmp/cache/F1/F2/142_242', 'get_cache: file kept 143_243');
+ ok( ! -f 'W/tmp/cache/F1/F2/100_200', 'get_cache: file removed 100_200');
+ ok( ! -f 'W/tmp/cache/F1/F2/101_201', 'get_cache: file removed 101_201');
+
+ # test clean_cache executed
+ $maxage = 2 ;
+ ok( touch(@test_files_cache), 'get_cache: touch W/tmp/cache/F1/F2/...' ) ;
+ ok( ( $c12, $c21 ) = get_cache('W/tmp/cache/F1/F2', $msgs_1, $msgs_2, $msgs_all_1, $msgs_all_2 ), 'get_cache: 02' );
+ ok( -f 'W/tmp/cache/F1/F2/142_242', 'get_cache: file kept 142_242');
+ ok( -f 'W/tmp/cache/F1/F2/142_242', 'get_cache: file kept 143_243');
+ ok( ! -f 'W/tmp/cache/F1/F2/100_200', 'get_cache: file NOT removed 100_200');
+ ok( ! -f 'W/tmp/cache/F1/F2/101_201', 'get_cache: file NOT removed 101_201');
+
+
+ # strange files
+ #$debugcache = 1 ;
+ $maxage = undef ;
+ ok( ( not -d 'W/tmp/cache/rr\uee' or rmtree( 'W/tmp/cache/rr\uee' )), 'get_cache: rmtree W/tmp/cache/rr\uee' ) ;
+ ok( mkpath( 'W/tmp/cache/rr\uee' ), 'get_cache: mkpath W/tmp/cache/rr\uee' ) ;
+
+ @test_files_cache = ( qw(
+ W/tmp/cache/rr\uee/100_200
+ W/tmp/cache/rr\uee/101_201
+ W/tmp/cache/rr\uee/120_220
+ W/tmp/cache/rr\uee/142_242
+ W/tmp/cache/rr\uee/143_243
+ W/tmp/cache/rr\uee/177_277
+ W/tmp/cache/rr\uee/177_377
+ W/tmp/cache/rr\uee/177_777
+ W/tmp/cache/rr\uee/155_255
+ ) ) ;
+ ok( touch(@test_files_cache), 'get_cache: touch strange W/tmp/cache/...' ) ;
+
+ # on cache: 100_200 101_201 142_242 143_243 177_277 177_377 177_777 155_255
+ # on live:
+ $msgs_1 = [120, 142, 143, 144, 177 ] ;
+ $msgs_2 = [ 242, 243, 299, 377, 777, 255 ] ;
+
+ $msgs_all_1 = { 120 => q{}, 142 => q{}, 143 => q{}, 144 => q{}, 177 => q{} } ;
+ $msgs_all_2 = { 242 => q{}, 243 => q{}, 299 => q{}, 377 => q{}, 777 => q{}, 255 => q{} } ;
+
+ ok( ( $c12, $c21 ) = get_cache('W/tmp/cache/rr\uee', $msgs_1, $msgs_2, $msgs_all_1, $msgs_all_2), 'get_cache: strange path 02' );
+ $a1 = [ sort { $a <=> $b } keys %{ $c12 } ] ;
+ $a2 = [ sort { $a <=> $b } keys %{ $c21 } ] ;
+ ok( 0 == compare_lists( [ 142, 143, 177 ], $a1 ), 'get_cache: strange path 03' );
+ ok( 0 == compare_lists( [ 242, 243, 777 ], $a2 ), 'get_cache: strange path 04' );
+ ok( -f 'W/tmp/cache/rr\uee/142_242', 'get_cache: strange path file kept 142_242');
+ ok( -f 'W/tmp/cache/rr\uee/142_242', 'get_cache: strange path file kept 143_243');
+ ok( ! -f 'W/tmp/cache/rr\uee/100_200', 'get_cache: strange path file removed 100_200');
+ ok( ! -f 'W/tmp/cache/rr\uee/101_201', 'get_cache: strange path file removed 101_201');
+
+ note( 'Leaving tests_get_cache()' ) ;
+ return ;
+}
+
+sub match_a_cache_file
+{
+ my $file = shift ;
+ my ( $cache_uid1, $cache_uid2 ) ;
+
+ return( ( undef, undef ) ) if ( ! $file ) ;
+ if ( $file =~ m{(?:^|/)(\d+)_(\d+)$}x ) {
+ $cache_uid1 = $1 ;
+ $cache_uid2 = $2 ;
+ }
+ return( $cache_uid1, $cache_uid2 ) ;
+}
+
+sub tests_match_a_cache_file
+{
+ note( 'Entering tests_match_a_cache_file()' ) ;
+
+ my ( $tuid1, $tuid2 ) ;
+ ok( ( $tuid1, $tuid2 ) = match_a_cache_file( ), 'match_a_cache_file: no arg' ) ;
+ ok( ! defined $tuid1 , 'match_a_cache_file: no arg 1' ) ;
+ ok( ! defined $tuid2 , 'match_a_cache_file: no arg 2' ) ;
+
+ ok( ( $tuid1, $tuid2 ) = match_a_cache_file( q{} ), 'match_a_cache_file: empty arg' ) ;
+ ok( ! defined $tuid1 , 'match_a_cache_file: empty arg 1' ) ;
+ ok( ! defined $tuid2 , 'match_a_cache_file: empty arg 2' ) ;
+
+ ok( ( $tuid1, $tuid2 ) = match_a_cache_file( '000_000' ), 'match_a_cache_file: 000_000' ) ;
+ ok( '000' eq $tuid1, 'match_a_cache_file: 000_000 1' ) ;
+ ok( '000' eq $tuid2, 'match_a_cache_file: 000_000 2' ) ;
+
+ ok( ( $tuid1, $tuid2 ) = match_a_cache_file( '123_456' ), 'match_a_cache_file: 123_456' ) ;
+ ok( '123' eq $tuid1, 'match_a_cache_file: 123_456 1' ) ;
+ ok( '456' eq $tuid2, 'match_a_cache_file: 123_456 2' ) ;
+
+ ok( ( $tuid1, $tuid2 ) = match_a_cache_file( '/tmp/truc/123_456' ), 'match_a_cache_file: /tmp/truc/123_456' ) ;
+ ok( '123' eq $tuid1, 'match_a_cache_file: /tmp/truc/123_456 1' ) ;
+ ok( '456' eq $tuid2, 'match_a_cache_file: /tmp/truc/123_456 2' ) ;
+
+ ok( ( $tuid1, $tuid2 ) = match_a_cache_file( '/lala123_456' ), 'match_a_cache_file: NO /lala123_456' ) ;
+ ok( ! $tuid1, 'match_a_cache_file: /lala123_456 1' ) ;
+ ok( ! $tuid2, 'match_a_cache_file: /lala123_456 2' ) ;
+
+ ok( ( $tuid1, $tuid2 ) = match_a_cache_file( 'la123_456' ), 'match_a_cache_file: NO la123_456' ) ;
+ ok( ! $tuid1, 'match_a_cache_file: la123_456 1' ) ;
+ ok( ! $tuid2, 'match_a_cache_file: la123_456 2' ) ;
+
+ note( 'Leaving tests_match_a_cache_file()' ) ;
+ return ;
+}
+
+sub clean_cache
+{
+ my ( $cache_files_ref, $cache_1_2_ref, $h1_msgs_all_hash_ref, $h2_msgs_all_hash_ref ) = @_ ;
+
+ $debugcache and myprint( "Entering clean_cache\n" ) ;
+
+ $debugcache and myprint( map { "$_ -> " . $cache_1_2_ref->{ $_ } . "\n" } keys %{ $cache_1_2_ref } ) ;
+ foreach my $file ( @{ $cache_files_ref } ) {
+ $debugcache and myprint( "$file\n" ) ;
+ my ( $cache_uid1, $cache_uid2 ) = match_a_cache_file( $file ) ;
+ $debugcache and myprint( "u1: $cache_uid1 u2: $cache_uid2 c12: ", $cache_1_2_ref->{ $cache_uid1 } || q{}, "\n") ;
+# or ( ! exists( $cache_1_2_ref->{ $cache_uid1 } ) )
+# or ( ! ( $cache_uid2 == $cache_1_2_ref->{ $cache_uid1 } ) )
+ if ( ( not defined $cache_uid1 )
+ or ( not defined $cache_uid2 )
+ or ( not exists $h1_msgs_all_hash_ref->{ $cache_uid1 } )
+ or ( not exists $h2_msgs_all_hash_ref->{ $cache_uid2 } )
+ ) {
+ $debugcache and myprint( "remove $file\n" ) ;
+ unlink $file or myprint( "$OS_ERROR" ) ;
+ }
+ }
+
+ $debugcache and myprint( "Exiting clean_cache\n" ) ;
+ return( 1 ) ;
+}
+
+sub tests_clean_cache
+{
+ note( 'Entering tests_clean_cache()' ) ;
+
+ ok( ( not -d 'W/tmp/cache/G1/G2' or rmtree( 'W/tmp/cache/G1/G2' )), 'clean_cache: rmtree W/tmp/cache/G1/G2' ) ;
+ ok( mkpath( 'W/tmp/cache/G1/G2' ), 'clean_cache: mkpath W/tmp/cache/G1/G2' ) ;
+
+ my @test_files_cache = ( qw(
+ W/tmp/cache/G1/G2/100_200
+ W/tmp/cache/G1/G2/101_201
+ W/tmp/cache/G1/G2/120_220
+ W/tmp/cache/G1/G2/142_242
+ W/tmp/cache/G1/G2/143_243
+ W/tmp/cache/G1/G2/177_277
+ W/tmp/cache/G1/G2/177_377
+ W/tmp/cache/G1/G2/177_777
+ W/tmp/cache/G1/G2/155_255
+ ) ) ;
+ ok( touch(@test_files_cache), 'clean_cache: touch W/tmp/cache/G1/G2/...' ) ;
+
+ ok( -f 'W/tmp/cache/G1/G2/100_200', 'clean_cache: 100_200 before' );
+ ok( -f 'W/tmp/cache/G1/G2/142_242', 'clean_cache: 142_242 before' );
+ ok( -f 'W/tmp/cache/G1/G2/177_277', 'clean_cache: 177_277 before' );
+ ok( -f 'W/tmp/cache/G1/G2/177_377', 'clean_cache: 177_377 before' );
+ ok( -f 'W/tmp/cache/G1/G2/177_777', 'clean_cache: 177_777 before' );
+ ok( -f 'W/tmp/cache/G1/G2/155_255', 'clean_cache: 155_255 before' );
+
+ my $cache = {
+ 142 => 242,
+ 177 => 777,
+ } ;
+
+ my $all_1 = {
+ 142 => q{},
+ 177 => q{},
+ } ;
+
+ my $all_2 = {
+ 200 => q{},
+ 242 => q{},
+ 777 => q{},
+ } ;
+ ok( clean_cache( \@test_files_cache, $cache, $all_1, $all_2 ), 'clean_cache: ' ) ;
+
+ ok( ! -f 'W/tmp/cache/G1/G2/100_200', 'clean_cache: 100_200 after' );
+ ok( -f 'W/tmp/cache/G1/G2/142_242', 'clean_cache: 142_242 after' );
+ ok( ! -f 'W/tmp/cache/G1/G2/177_277', 'clean_cache: 177_277 after' );
+ ok( ! -f 'W/tmp/cache/G1/G2/177_377', 'clean_cache: 177_377 after' );
+ ok( -f 'W/tmp/cache/G1/G2/177_777', 'clean_cache: 177_777 after' );
+ ok( ! -f 'W/tmp/cache/G1/G2/155_255', 'clean_cache: 155_255 after' );
+
+ note( 'Leaving tests_clean_cache()' ) ;
+ return ;
+}
+
+sub tests_clean_cache_2
+{
+ note( 'Entering tests_clean_cache_2()' ) ;
+
+ ok( ( not -d 'W/tmp/cache/G1/G2' or rmtree( 'W/tmp/cache/G1/G2' )), 'clean_cache_2: rmtree W/tmp/cache/G1/G2' ) ;
+ ok( mkpath( 'W/tmp/cache/G1/G2' ), 'clean_cache_2: mkpath W/tmp/cache/G1/G2' ) ;
+
+ my @test_files_cache = ( qw(
+ W/tmp/cache/G1/G2/100_200
+ W/tmp/cache/G1/G2/101_201
+ W/tmp/cache/G1/G2/120_220
+ W/tmp/cache/G1/G2/142_242
+ W/tmp/cache/G1/G2/143_243
+ W/tmp/cache/G1/G2/177_277
+ W/tmp/cache/G1/G2/177_377
+ W/tmp/cache/G1/G2/177_777
+ W/tmp/cache/G1/G2/155_255
+ ) ) ;
+ ok( touch(@test_files_cache), 'clean_cache_2: touch W/tmp/cache/G1/G2/...' ) ;
+
+ ok( -f 'W/tmp/cache/G1/G2/100_200', 'clean_cache_2: 100_200 before' );
+ ok( -f 'W/tmp/cache/G1/G2/142_242', 'clean_cache_2: 142_242 before' );
+ ok( -f 'W/tmp/cache/G1/G2/177_277', 'clean_cache_2: 177_277 before' );
+ ok( -f 'W/tmp/cache/G1/G2/177_377', 'clean_cache_2: 177_377 before' );
+ ok( -f 'W/tmp/cache/G1/G2/177_777', 'clean_cache_2: 177_777 before' );
+ ok( -f 'W/tmp/cache/G1/G2/155_255', 'clean_cache_2: 155_255 before' );
+
+ my $cache = {
+ 142 => 242,
+ 177 => 777,
+ } ;
+
+ my $all_1 = {
+ $NUMBER_100 => q{},
+ 142 => q{},
+ 177 => q{},
+ } ;
+
+ my $all_2 = {
+ 200 => q{},
+ 242 => q{},
+ 777 => q{},
+ } ;
+
+
+
+ ok( clean_cache( \@test_files_cache, $cache, $all_1, $all_2 ), 'clean_cache_2: ' ) ;
+
+ ok( -f 'W/tmp/cache/G1/G2/100_200', 'clean_cache_2: 100_200 after' );
+ ok( -f 'W/tmp/cache/G1/G2/142_242', 'clean_cache_2: 142_242 after' );
+ ok( ! -f 'W/tmp/cache/G1/G2/177_277', 'clean_cache_2: 177_277 after' );
+ ok( ! -f 'W/tmp/cache/G1/G2/177_377', 'clean_cache_2: 177_377 after' );
+ ok( -f 'W/tmp/cache/G1/G2/177_777', 'clean_cache_2: 177_777 after' );
+ ok( ! -f 'W/tmp/cache/G1/G2/155_255', 'clean_cache_2: 155_255 after' );
+
+ note( 'Leaving tests_clean_cache_2()' ) ;
+ return ;
+}
+
+
+
+sub tests_mkpath
+{
+ note( 'Entering tests_mkpath()' ) ;
+
+ ok( (-d 'W/tmp/tests/' or mkpath( 'W/tmp/tests/' )), 'mkpath: mkpath W/tmp/tests/' ) ;
+
+ SKIP: {
+ skip( 'Tests only for Unix', 10 ) if ( 'MSWin32' eq $OSNAME ) ;
+ my $long_path_unix = '123456789/' x 30 ;
+ ok( ( -d "W/tmp/tests/long/$long_path_unix" or mkpath( "W/tmp/tests/long/$long_path_unix" ) ), 'mkpath: mkpath 300 char' ) ;
+ ok( -d "W/tmp/tests/long/$long_path_unix", 'mkpath: mkpath > 300 char verified' ) ;
+ ok( ( -d "W/tmp/tests/long/$long_path_unix" and rmtree( 'W/tmp/tests/long/' ) ), 'mkpath: rmtree 300 char' ) ;
+ ok( ! -d "W/tmp/tests/long/$long_path_unix", 'mkpath: rmtree 300 char verified' ) ;
+
+ ok( ( -d 'W/tmp/tests/trailing_dots...' or mkpath( 'W/tmp/tests/trailing_dots...' ) ), 'mkpath: mkpath trailing_dots...' ) ;
+ ok( -d 'W/tmp/tests/trailing_dots...', 'mkpath: mkpath trailing_dots... verified' ) ;
+ ok( ( -d 'W/tmp/tests/trailing_dots...' and rmtree( 'W/tmp/tests/trailing_dots...' ) ), 'mkpath: rmtree trailing_dots...' ) ;
+ ok( ! -d 'W/tmp/tests/trailing_dots...', 'mkpath: rmtree trailing_dots... verified' ) ;
+
+ eval { ok( 1 / 0, 'mkpath: divide by 0' ) ; } or ok( 1, 'mkpath: can not divide by 0' ) ;
+ ok( 1, 'mkpath: still alive' ) ;
+ } ;
+
+ SKIP: {
+ skip( 'Tests only for MSWin32', 13 ) if ( 'MSWin32' ne $OSNAME ) ;
+ my $long_path_2_prefix = ".\\imapsync_tests" || '\\\?\\E:\\TEMP\\imapsync_tests' ;
+ myprint( "long_path_2_prefix: $long_path_2_prefix\n" ) ;
+
+ my $long_path_100 = $long_path_2_prefix . '\\' . '123456789\\' x 10 . 'END' ;
+ my $long_path_300 = $long_path_2_prefix . '\\' . '123456789\\' x 30 . 'END' ;
+
+ #myprint( "$long_path_100\n" ) ;
+
+ ok( ( -d $long_path_2_prefix or mkpath( $long_path_2_prefix ) ), 'mkpath: -d mkpath small path' ) ;
+ ok( ( -d $long_path_2_prefix ), 'mkpath: -d mkpath small path done' ) ;
+ ok( ( -d $long_path_100 or mkpath( $long_path_100 ) ), 'mkpath: mkpath > 100 char' ) ;
+ ok( ( -d $long_path_100 ), 'mkpath: -d mkpath > 200 char done' ) ;
+ ok( ( -d $long_path_2_prefix and rmtree( $long_path_2_prefix ) ), 'mkpath: rmtree > 100 char' ) ;
+ ok( (! -d $long_path_2_prefix ), 'mkpath: ! -d rmtree done' ) ;
+
+ # Without the eval the following mkpath 300 just kill the whole process without a whisper
+ #myprint( "$long_path_300\n" ) ;
+ eval { ok( ( -d $long_path_300 or mkpath( $long_path_300 ) ), 'mkpath: create a path with 300 characters' ) ; }
+ or ok( 1, 'mkpath: can not create a path with 300 characters' ) ;
+ ok( ( ( ! -d $long_path_300 ) or -d $long_path_300 and rmtree( $long_path_300 ) ), 'mkpath: rmtree the 300 character path' ) ;
+ ok( 1, 'mkpath: still alive' ) ;
+
+ ok( ( -d 'W/tmp/tests/trailing_dots...' or mkpath( 'W/tmp/tests/trailing_dots...' ) ), 'mkpath: mkpath trailing_dots...' ) ;
+ ok( -d 'W/tmp/tests/trailing_dots...', 'mkpath: mkpath trailing_dots... verified' ) ;
+ ok( ( -d 'W/tmp/tests/trailing_dots...' and rmtree( 'W/tmp/tests/trailing_dots...' ) ), 'mkpath: rmtree trailing_dots...' ) ;
+ ok( ! -d 'W/tmp/tests/trailing_dots...', 'mkpath: rmtree trailing_dots... verified' ) ;
+
+
+ } ;
+
+ note( 'Leaving tests_mkpath()' ) ;
+ # Keep this because of the eval used by the caller (failed badly?)
+ return 1 ;
+}
+
+sub tests_touch
+{
+ note( 'Entering tests_touch()' ) ;
+
+ ok( (-d 'W/tmp/tests/' or mkpath( 'W/tmp/tests/' )), 'touch: mkpath W/tmp/tests/' ) ;
+ ok( 1 == touch( 'W/tmp/tests/lala'), 'touch: W/tmp/tests/lala') ;
+ ok( 1 == touch( 'W/tmp/tests/\y'), 'touch: W/tmp/tests/\y') ;
+ ok( 0 == touch( '/no/no/no/aaa'), 'touch: not /aaa') ;
+ ok( 1 == touch( 'W/tmp/tests/lili', 'W/tmp/tests/lolo'), 'touch: 2 files') ;
+ ok( 0 == touch( 'W/tmp/tests/\y', '/no/no/aaa'), 'touch: 2 files, 1 fails' ) ;
+
+ note( 'Leaving tests_touch()' ) ;
+ return ;
+}
+
+
+sub touch
+{
+ my @files = @_ ;
+ my $failures = 0 ;
+
+ foreach my $file ( @files ) {
+ my $fh = IO::File->new ;
+ if ( $fh->open(">> $file" ) ) {
+ $fh->close ;
+ }else{
+ myprint( "Could not open file $file in write/append mode\n" ) ;
+ $failures++ ;
+ }
+ }
+ return( ! $failures );
+}
+
+
+sub tests_tmpdir_has_colon_bug
+{
+ note( 'Entering tests_tmpdir_has_colon_bug()' ) ;
+
+ ok( 0 == tmpdir_has_colon_bug( q{} ), 'tmpdir_has_colon_bug: ' ) ;
+ ok( 0 == tmpdir_has_colon_bug( '/tmp' ), 'tmpdir_has_colon_bug: /tmp' ) ;
+ ok( 1 == tmpdir_has_colon_bug( 'C:' ), 'tmpdir_has_colon_bug: C:' ) ;
+ ok( 1 == tmpdir_has_colon_bug( 'C:\temp' ), 'tmpdir_has_colon_bug: C:\temp' ) ;
+
+ note( 'Leaving tests_tmpdir_has_colon_bug()' ) ;
+ return ;
+}
+
+sub tmpdir_has_colon_bug
+{
+ my $path = shift ;
+
+ my $path_filtered = filter_forbidden_characters( $path ) ;
+ if ( $path_filtered ne $path ) {
+ ( -d $path_filtered ) and myprint( "Path $path was previously mistakely changed to $path_filtered\n" ) ;
+ return( 1 ) ;
+ }
+ return( 0 ) ;
+}
+
+sub tmpdir_fix_colon_bug
+{
+ my $mysync = shift ;
+ my $err = 0 ;
+ if ( not (-d $mysync->{ tmpdir } and -r _ and -w _) ) {
+ myprint( "tmpdir $mysync->{ tmpdir } is not valid\n" ) ;
+ return( 0 ) ;
+ }
+ my $cachedir_new = "$mysync->{ tmpdir }/imapsync_cache" ;
+
+ if ( not tmpdir_has_colon_bug( $cachedir_new ) ) { return( 0 ) } ;
+
+ # check if old cache directory already exists
+ my $cachedir_old = filter_forbidden_characters( $cachedir_new ) ;
+ if ( not ( -d $cachedir_old ) ) {
+ myprint( "Old cache directory $cachedir_new no exists, nothing to do\n" ) ;
+ return( 1 ) ;
+ }
+ # check if new cache directory already exists
+ if ( -d $cachedir_new ) {
+ myprint( "New fixed cache directory $cachedir_new already exists, not moving the old one $cachedir_old. Fix this manually.\n" ) ;
+ return( 0 ) ;
+ }else{
+ # move the old one to the new place
+ myprint( "Moving $cachedir_old to $cachedir_new Do not interrupt this task.\n" ) ;
+ File::Copy::Recursive::rmove( $cachedir_old, $cachedir_new )
+ or do {
+ myprint( "Could not move $cachedir_old to $cachedir_new\n" ) ;
+ $err++ ;
+ } ;
+ # check it succeeded
+ if ( -d $cachedir_new and -r _ and -w _ ) {
+ myprint( "New fixed cache directory $cachedir_new ok\n" ) ;
+ }else{
+ myprint( "New fixed cache directory $cachedir_new does not exist\n" ) ;
+ $err++ ;
+ }
+ if ( -d $cachedir_old ) {
+ myprint( "Old cache directory $cachedir_old still exists\n" ) ;
+ $err++ ;
+ }else{
+ myprint( "Old cache directory $cachedir_old successfuly moved\n" ) ;
+ }
+ }
+ return( not $err ) ;
+}
+
+
+sub tests_cache_folder
+{
+ note( 'Entering tests_cache_folder()' ) ;
+
+ ok( '/path/fold1/fold2' eq cache_folder( q{}, '/path', 'fold1', 'fold2'), 'cache_folder: /path, fold1, fold2 -> /path/fold1/fold2' ) ;
+ ok( '/pa_th/fold1/fold2' eq cache_folder( q{}, '/pa*th', 'fold1', 'fold2'), 'cache_folder: /pa*th, fold1, fold2 -> /path/fold1/fold2' ) ;
+ ok( '/_p_a__th/fol_d1/fold2' eq cache_folder( q{}, '/>p<a|*th', 'fol*d1', 'fold2'), 'cache_folder: />p<a|*th, fol*d1, fold2 -> /path/fol_d1/fold2' ) ;
+
+ ok( 'D:/path/fold1/fold2' eq cache_folder( 'D:', '/path', 'fold1', 'fold2'), 'cache_folder: /path, fold1, fold2 -> /path/fold1/fold2' ) ;
+ ok( 'D:/pa_th/fold1/fold2' eq cache_folder( 'D:', '/pa*th', 'fold1', 'fold2'), 'cache_folder: /pa*th, fold1, fold2 -> /path/fold1/fold2' ) ;
+ ok( 'D:/_p_a__th/fol_d1/fold2' eq cache_folder( 'D:', '/>p<a|*th', 'fol*d1', 'fold2'), 'cache_folder: />p<a|*th, fol*d1, fold2 -> /path/fol_d1/fold2' ) ;
+ ok( '//' eq cache_folder( q{}, q{}, q{}, q{}), 'cache_folder: -> //' ) ;
+ ok( '//_______' eq cache_folder( q{}, q{}, q{}, '*|?:"<>'), 'cache_folder: *|?:"<> -> //_______' ) ;
+
+ note( 'Leaving tests_cache_folder()' ) ;
+ return ;
+}
+
+sub cache_folder
+{
+ my( $cache_base, $cache_dir, $h1_fold, $h2_fold ) = @_ ;
+
+ my $sep_1 = $sync->{ h1_sep } || '/';
+ my $sep_2 = $sync->{ h2_sep } || '/';
+
+ #myprint( "$cache_dir h1_fold $h1_fold sep1 $sep_1 h2_fold $h2_fold sep2 $sep_2\n" ) ;
+ $h1_fold = convert_sep_to_slash( $h1_fold, $sep_1 ) ;
+ $h2_fold = convert_sep_to_slash( $h2_fold, $sep_2 ) ;
+
+ my $cache_folder = "$cache_base" . filter_forbidden_characters( "$cache_dir/$h1_fold/$h2_fold" ) ;
+ #myprint( "cache_folder [$cache_folder]\n" ) ;
+ return( $cache_folder ) ;
+}
+
+sub tests_filter_forbidden_characters
+{
+ note( 'Entering tests_filter_forbidden_characters()' ) ;
+
+ ok( 'a_b' eq filter_forbidden_characters( 'a_b' ), 'filter_forbidden_characters: a_b -> a_b' ) ;
+ ok( 'a_b' eq filter_forbidden_characters( 'a*b' ), 'filter_forbidden_characters: a*b -> a_b' ) ;
+ ok( 'a_b' eq filter_forbidden_characters( 'a|b' ), 'filter_forbidden_characters: a|b -> a_b' ) ;
+ ok( 'a_b' eq filter_forbidden_characters( 'a?b' ), 'filter_forbidden_characters: a?b -> a_b' ) ;
+ ok( 'a_______b' eq filter_forbidden_characters( 'a*|?:"<>b' ), 'filter_forbidden_characters: a*|?:"<>b -> a_______b' ) ;
+
+ SKIP: {
+ skip( 'Not on MSWin32', 1 ) if ( 'MSWin32' eq $OSNAME ) ;
+ ok( ( 'a b ' eq filter_forbidden_characters( 'a b ' ) ), 'filter_forbidden_characters: "a b " -> "a b "' ) ;
+ } ;
+
+ SKIP: {
+ skip( 'Only on MSWin32', 2 ) if ( 'MSWin32' ne $OSNAME ) ;
+ ok( ( ' a b_' eq filter_forbidden_characters( ' a b ' ) ), 'filter_forbidden_characters: "a b " -> "a b_"' ) ;
+ ok( ( ' a b_/ c d_' eq filter_forbidden_characters( ' a b / c d ' ) ), 'filter_forbidden_characters: " a b / c d " -> "a b_/ c d_"' ) ;
+ } ;
+
+ ok( 'a_b' eq filter_forbidden_characters( "a\tb" ), 'filter_forbidden_characters: a\tb -> a_b' ) ;
+ ok( "a_b" eq filter_forbidden_characters( "a\rb" ), 'filter_forbidden_characters: a\rb -> a_b' ) ;
+ ok( "a_b" eq filter_forbidden_characters( "a\nb" ), 'filter_forbidden_characters: a\nb -> a_b' ) ;
+ ok( "a_b" eq filter_forbidden_characters( "a\\b" ), 'filter_forbidden_characters: a\b -> a_b' ) ;
+
+ note( 'Leaving tests_filter_forbidden_characters()' ) ;
+ return ;
+}
+
+sub filter_forbidden_characters
+{
+ my $string = shift ;
+
+ if ( ! defined $string ) { return ; }
+
+ if ( 'MSWin32' eq $OSNAME ) {
+ # Move trailing whitespace to _ " a b /c d " -> " a b_/c d_"
+ $string =~ s{\ (/|$)}{_$1}xg ;
+ }
+ $string =~ s{[\Q*|?:"<>\E\t\r\n\\]}{_}xg ;
+ #myprint( "[$string]\n" ) ;
+ return( $string ) ;
+}
+
+sub tests_convert_sep_to_slash
+{
+ note( 'Entering tests_convert_sep_to_slash()' ) ;
+
+
+ ok(q{} eq convert_sep_to_slash(q{}, '/'), 'convert_sep_to_slash: no folder');
+ ok('INBOX' eq convert_sep_to_slash('INBOX', '/'), 'convert_sep_to_slash: INBOX');
+ ok('INBOX/foo' eq convert_sep_to_slash('INBOX/foo', '/'), 'convert_sep_to_slash: INBOX/foo');
+ ok('INBOX/foo' eq convert_sep_to_slash('INBOX_foo', '_'), 'convert_sep_to_slash: INBOX_foo');
+ ok('INBOX/foo/zob' eq convert_sep_to_slash('INBOX_foo_zob', '_'), 'convert_sep_to_slash: INBOX_foo_zob');
+ ok('INBOX/foo' eq convert_sep_to_slash('INBOX.foo', '.'), 'convert_sep_to_slash: INBOX.foo');
+ ok('INBOX/foo/hi' eq convert_sep_to_slash('INBOX.foo.hi', '.'), 'convert_sep_to_slash: INBOX.foo.hi');
+
+ note( 'Leaving tests_convert_sep_to_slash()' ) ;
+ return ;
+}
+
+sub convert_sep_to_slash
+{
+ my ( $folder, $sep ) = @_ ;
+
+ $folder =~ s{\Q$sep\E}{/}xg ;
+ return( $folder ) ;
+}
+
+
+sub tests_regexmess
+{
+ note( 'Entering tests_regexmess()' ) ;
+
+ ok( 'blabla' eq regexmess( 'blabla' ), 'regexmess, no regexmess, nothing to do' ) ;
+
+ @regexmess = ( 'lalala' ) ;
+ ok( not( defined regexmess( 'popopo' ) ), 'regexmess, bad regex lalala' ) ;
+
+ @regexmess = ( 's/p/Z/g' ) ;
+ ok( 'ZoZoZo' eq regexmess( 'popopo' ), 'regexmess, s/p/Z/g' ) ;
+
+ @regexmess = ( 's{c}{C}gxms' ) ;
+ ok("H1: abC\nH2: Cde\n\nBody abC"
+ eq regexmess( "H1: abc\nH2: cde\n\nBody abc"),
+ 'regexmess, c->C');
+
+ @regexmess = ( 's{\AFrom\ }{From:}gxms' ) ;
+ ok( q{}
+ eq regexmess(q{}),
+ 'From mbox 1 add colon blank');
+
+ ok( 'From:<tartanpion@machin.truc>'
+ eq regexmess('From <tartanpion@machin.truc>'),
+ 'From mbox 2 add colo');
+
+ ok( "\n" . 'From <tartanpion@machin.truc>'
+ eq regexmess("\n" . 'From <tartanpion@machin.truc>'),
+ 'From mbox 3 add colo') ;
+
+ ok( "From: zzz\n" . 'From <tartanpion@machin.truc>'
+ eq regexmess("From zzz\n" . 'From <tartanpion@machin.truc>'),
+ 'From mbox 4 add colo') ;
+
+ @regexmess = ( 's{\AFrom\ [^\n]*(\n)?}{}gxms' ) ;
+ ok( q{}
+ eq regexmess(q{}),
+ 'From mbox 1 remove, blank');
+
+ ok( q{}
+ eq regexmess('From <tartanpion@machin.truc>'),
+ 'From mbox 2 remove');
+
+ ok( "\n" . 'From <tartanpion@machin.truc>'
+ eq regexmess("\n" . 'From <tartanpion@machin.truc>'),
+ 'From mbox 3 remove');
+
+ #myprint( "[", regexmess("From zzz\n" . 'From <tartanpion@machin.truc>'), "]" ) ;
+ ok( q{} . 'From <tartanpion@machin.truc>'
+ eq regexmess("From zzz\n" . 'From <tartanpion@machin.truc>'),
+ 'From mbox 4 remove');
+
+
+ ok(
+<<'EOM'
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+From:<tartanpion@machin.truc>
+
+Hello,
+Bye.
+EOM
+ eq regexmess(
+<<'EOM'
+From zzz
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+From:<tartanpion@machin.truc>
+
+Hello,
+Bye.
+EOM
+), 'From mbox 5 remove');
+
+
+@regexmess = ( 's{\A((?:[^\n]+\n)+|)^Disposition-Notification-To:[^\n]*\n(\r?\n|.*\n\r?\n)}{$1$2}xms' ) ; # SUPER SUPER BEST!
+ ok(
+<<'EOM'
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+From:<tartanpion@machin.truc>
+
+Hello,
+Bye.
+EOM
+ eq regexmess(
+<<'EOM'
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+Disposition-Notification-To: Gilles LAMIRAL <gilles@lamiral.info>
+From:<tartanpion@machin.truc>
+
+Hello,
+Bye.
+EOM
+ ),
+ 'regexmess: 1 Delete header Disposition-Notification-To:');
+
+ ok(
+<<'EOM'
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+From:<tartanpion@machin.truc>
+
+Hello,
+Bye.
+EOM
+ eq regexmess(
+<<'EOM'
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+From:<tartanpion@machin.truc>
+Disposition-Notification-To: Gilles LAMIRAL <gilles@lamiral.info>
+
+Hello,
+Bye.
+EOM
+),
+ 'regexmess: 2 Delete header Disposition-Notification-To:');
+
+ ok(
+<<'EOM'
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+From:<tartanpion@machin.truc>
+
+Hello,
+Bye.
+EOM
+ eq regexmess(
+<<'EOM'
+Disposition-Notification-To: Gilles LAMIRAL <gilles@lamiral.info>
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+From:<tartanpion@machin.truc>
+
+Hello,
+Bye.
+EOM
+),
+ 'regexmess: 3 Delete header Disposition-Notification-To:');
+
+ ok(
+<<'EOM'
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+From:<tartanpion@machin.truc>
+
+Disposition-Notification-To: Gilles LAMIRAL <gilles@lamiral.info>
+Bye.
+EOM
+ eq regexmess(
+<<'EOM'
+Disposition-Notification-To: Gilles LAMIRAL <gilles@lamiral.info>
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+From:<tartanpion@machin.truc>
+
+Disposition-Notification-To: Gilles LAMIRAL <gilles@lamiral.info>
+Bye.
+EOM
+),
+ 'regexmess: 4 Delete header Disposition-Notification-To:');
+
+
+ ok(
+<<'EOM'
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+From:<tartanpion@machin.truc>
+
+Disposition-Notification-To: Gilles LAMIRAL <gilles@lamiral.info>
+Bye.
+EOM
+ eq regexmess(
+<<'EOM'
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+From:<tartanpion@machin.truc>
+
+Disposition-Notification-To: Gilles LAMIRAL <gilles@lamiral.info>
+Bye.
+EOM
+),
+ 'regexmess: 5 Delete header Disposition-Notification-To:');
+
+
+ok(
+<<'EOM'
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+From:<tartanpion@machin.truc>
+
+Hello,
+Disposition-Notification-To: Gilles LAMIRAL <gilles@lamiral.info>
+Bye.
+EOM
+ eq regexmess(
+<<'EOM'
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+From:<tartanpion@machin.truc>
+
+Hello,
+Disposition-Notification-To: Gilles LAMIRAL <gilles@lamiral.info>
+Bye.
+EOM
+),
+ 'regexmess: 6 Delete header Disposition-Notification-To:');
+
+ok(
+<<'EOM'
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+From:<tartanpion@machin.truc>
+
+Hello,
+Disposition-Notification-To: Gilles LAMIRAL <gilles@lamiral.info>
+
+Bye.
+EOM
+ eq regexmess(
+<<'EOM'
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+From:<tartanpion@machin.truc>
+
+Hello,
+Disposition-Notification-To: Gilles LAMIRAL <gilles@lamiral.info>
+
+Bye.
+EOM
+),
+ 'regexmess: 7 Delete header Disposition-Notification-To:');
+
+
+ok(
+<<'EOM'
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+From:<tartanpion@machin.truc>
+
+Hello,
+Bye.
+EOM
+ eq regexmess(
+<<'EOM'
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+From:<tartanpion@machin.truc>
+
+Hello,
+Bye.
+EOM
+),
+ 'regexmess: 8 Delete header Disposition-Notification-To:');
+
+
+ok(
+<<'EOM'
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+From:<tartanpion@machin.truc>
+
+Hello,
+Disposition-Notification-To: Gilles LAMIRAL <gilles@lamiral.info>
+Bye.
+EOM
+ eq regexmess(
+<<'EOM'
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+From:<tartanpion@machin.truc>
+
+Hello,
+Disposition-Notification-To: Gilles LAMIRAL <gilles@lamiral.info>
+Bye.
+EOM
+),
+ 'regexmess: 9 Delete header Disposition-Notification-To:');
+
+
+
+ok(
+<<'EOM'
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+From:<tartanpion@machin.truc>
+
+Hello,
+Disposition-Notification-To: Gilles LAMIRAL <gilles@lamiral.info>
+
+
+Bye.
+EOM
+ eq regexmess(
+<<'EOM'
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+From:<tartanpion@machin.truc>
+
+Hello,
+Disposition-Notification-To: Gilles LAMIRAL <gilles@lamiral.info>
+
+
+Bye.
+EOM
+),
+ 'regexmess: 10 Delete header Disposition-Notification-To:');
+
+ok(
+<<'EOM'
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+From:<tartanpion@machin.truc>
+
+Hello,
+
+Disposition-Notification-To: Gilles LAMIRAL <gilles@lamiral.info>
+
+Bye.
+EOM
+ eq regexmess(
+<<'EOM'
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+From:<tartanpion@machin.truc>
+
+Hello,
+
+Disposition-Notification-To: Gilles LAMIRAL <gilles@lamiral.info>
+
+Bye.
+EOM
+),
+ 'regexmess: 11 Delete header Disposition-Notification-To:');
+
+ok(
+<<'EOM'
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+From:<tartanpion@machin.truc>
+
+Hello,
+
+Disposition-Notification-To: Gilles LAMIRAL <gilles@lamiral.info>
+
+Disposition-Notification-To: Gilles LAMIRAL <gilles@lamiral.info>
+
+Bye.
+EOM
+ eq regexmess(
+<<'EOM'
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+From:<tartanpion@machin.truc>
+
+Hello,
+
+Disposition-Notification-To: Gilles LAMIRAL <gilles@lamiral.info>
+
+Disposition-Notification-To: Gilles LAMIRAL <gilles@lamiral.info>
+
+Bye.
+EOM
+),
+ 'regexmess: 12 Delete header Disposition-Notification-To:');
+
+
+@regexmess = ( 's{\A(.*?(?! ^$))^Disposition-Notification-To:(.*?)$}{$1X-Disposition-Notification-To:$2}igxms' ) ; # BAD!
+@regexmess = ( 's{\A((?:[^\n]+\n)+|)(^Disposition-Notification-To:[^\n]*\n)(\r?\n|.*\n\r?\n)}{$1X-$2$3}ims' ) ;
+
+
+ok(
+<<'EOM'
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+From:<tartanpion@machin.truc>
+
+Hello,
+
+Disposition-Notification-To: Gilles LAMIRAL <gilles@lamiral.info>
+
+Disposition-Notification-To: Gilles LAMIRAL <gilles@lamiral.info>
+
+Bye.
+EOM
+ eq regexmess(
+<<'EOM'
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+From:<tartanpion@machin.truc>
+
+Hello,
+
+Disposition-Notification-To: Gilles LAMIRAL <gilles@lamiral.info>
+
+Disposition-Notification-To: Gilles LAMIRAL <gilles@lamiral.info>
+
+Bye.
+EOM
+),
+ 'regexmess: 13 Delete header Disposition-Notification-To:');
+
+ok(
+<<'EOM'
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+X-Disposition-Notification-To: Gilles LAMIRAL <gilles@lamiral.info>
+From:<tartanpion@machin.truc>
+
+Hello,
+
+Disposition-Notification-To: Gilles LAMIRAL <gilles@lamiral.info>
+
+Disposition-Notification-To: Gilles LAMIRAL <gilles@lamiral.info>
+
+Bye.
+EOM
+ eq regexmess(
+<<'EOM'
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+Disposition-Notification-To: Gilles LAMIRAL <gilles@lamiral.info>
+From:<tartanpion@machin.truc>
+
+Hello,
+
+Disposition-Notification-To: Gilles LAMIRAL <gilles@lamiral.info>
+
+Disposition-Notification-To: Gilles LAMIRAL <gilles@lamiral.info>
+
+Bye.
+EOM
+),
+ 'regexmess: 14 Delete header Disposition-Notification-To:');
+
+ok(
+<<'EOM'
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+X-Disposition-Notification-To: Gilles LAMIRAL <gilles@lamiral.info>
+From:<tartanpion@machin.truc>
+
+Hello,
+
+Bye.
+EOM
+ eq regexmess(
+<<'EOM'
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+Disposition-Notification-To: Gilles LAMIRAL <gilles@lamiral.info>
+From:<tartanpion@machin.truc>
+
+Hello,
+
+Bye.
+EOM
+),
+ 'regexmess: 15 Delete header Disposition-Notification-To:');
+
+
+ok(
+<<'EOM'
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+From:<tartanpion@machin.truc>
+X-Disposition-Notification-To: Gilles LAMIRAL <gilles@lamiral.info>
+
+Hello,
+
+Bye.
+EOM
+ eq regexmess(
+<<'EOM'
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+From:<tartanpion@machin.truc>
+Disposition-Notification-To: Gilles LAMIRAL <gilles@lamiral.info>
+
+Hello,
+
+Bye.
+EOM
+),
+ 'regexmess: 16 Delete header Disposition-Notification-To:');
+
+ok(
+<<'EOM'
+X-Disposition-Notification-To: Gilles LAMIRAL <gilles@lamiral.info>
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+From:<tartanpion@machin.truc>
+
+Hello,
+
+Bye.
+EOM
+ eq regexmess(
+<<'EOM'
+Disposition-Notification-To: Gilles LAMIRAL <gilles@lamiral.info>
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+From:<tartanpion@machin.truc>
+
+Hello,
+
+Bye.
+EOM
+),
+ 'regexmess: 17 Delete header Disposition-Notification-To:');
+
+ @regexmess = ( 's/.{11}\K.*//gs' ) ;
+ is( "0123456789\n", regexmess( "0123456789\n" x 100 ), 'regexmess, truncate whole message after 11 characters' ) ;
+ is( "0123456789\n", regexmess( "0123456789\n" x 100_000 ), 'regexmess, truncate whole message after 11 characters ~ 1MB' ) ;
+
+ @regexmess = ( 's/.{10000}\K.*//gs' ) ;
+ is( "123456789\n" x 1000, regexmess( "123456789\n" x 100_000 ), 'regexmess, truncate whole message after 10000 characters ~ 1MB' ) ;
+
+@regexmess = ( 's/^(X-Ham-Report.*?\n)^X-/X-/sm' ) ;
+
+is(
+<<'EOM'
+X-Spam-Score: -1
+X-Spam-Bar: /
+X-Spam-Flag: NO
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+From:<tartanpion@machin.truc>
+
+Hello,
+
+Bye.
+EOM
+,
+regexmess(
+<<'EOM'
+X-Spam-Score: -1
+X-Spam-Bar: /
+X-Ham-Report: =?utf-8?Q?Spam_detection_software=2C_running?=
+ =?utf-8?Q?_on_the_system_=22ohp-ag006.int200?=
+_has_NOT_identified_thi?=
+ =?utf-8?Q?s_incoming_email_as_spam.__The_o?=
+_message_has_been_attac?=
+ =?utf-8?Q?hed_to_this_so_you_can_view_it_o?=
+___________________________?=
+ =?utf-8?Q?__author's_domain
+X-Spam-Flag: NO
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+From:<tartanpion@machin.truc>
+
+Hello,
+
+Bye.
+EOM
+),
+ 'regexmess: 1 Delete header X-Ham-Report:');
+
+
+# regex to play with Date: from the FAQ
+#@regexmess = 's{\A(.*?(?! ^$))^Date:(.*?)$}{$1Date:$2\nX-Date:$2}gxms'
+
+
+
+
+
+ note( 'Leaving tests_regexmess()' ) ;
+ return ;
+
+}
+
+sub regexmess
+{
+ my ( $string ) = @_ ;
+ foreach my $regexmess ( @regexmess ) {
+ $sync->{ debug } and myprint( "eval \$string =~ $regexmess\n" ) ;
+ my $ret = eval "\$string =~ $regexmess ; 1" ;
+ #myprint( "eval [$ret]\n" ) ;
+ if ( ( not $ret ) or $EVAL_ERROR ) {
+ myprint( "Error: eval regexmess '$regexmess': $EVAL_ERROR" ) ;
+ return( undef ) ;
+ }
+ }
+ $sync->{ debug } and myprint( "$string\n" ) ;
+ return( $string ) ;
+}
+
+
+sub tests_skipmess
+{
+ note( 'Entering tests_skipmess()' ) ;
+
+ ok( not( defined skipmess( 'blabla' ) ), 'skipmess, no skipmess, no skip' ) ;
+
+ @skipmess = ('[') ;
+ ok( not( defined skipmess( 'popopo' ) ), 'skipmess, bad regex [' ) ;
+
+ @skipmess = ('lalala') ;
+ ok( not( defined skipmess( 'popopo' ) ), 'skipmess, bad regex lalala' ) ;
+
+ @skipmess = ('/popopo/') ;
+ ok( 1 == skipmess( 'popopo' ), 'skipmess, popopo match regex /popopo/' ) ;
+
+ @skipmess = ('/popopo/') ;
+ ok( 0 == skipmess( 'rrrrrr' ), 'skipmess, rrrrrr does not match regex /popopo/' ) ;
+
+ @skipmess = ('m{^$}') ;
+ ok( 1 == skipmess( q{} ), 'skipmess: empty string yes' ) ;
+ ok( 0 == skipmess( 'Hi!' ), 'skipmess: empty string no' ) ;
+
+ @skipmess = ('m{i}') ;
+ ok( 1 == skipmess( 'Hi!' ), 'skipmess: i string yes' ) ;
+ ok( 0 == skipmess( 'Bye!' ), 'skipmess: i string no' ) ;
+
+ @skipmess = ('m{[\x80-\xff]}') ;
+ ok( 0 == skipmess( 'Hi!' ), 'skipmess: i 8bit no' ) ;
+ ok( 1 == skipmess( "\xff" ), 'skipmess: \xff 8bit yes' ) ;
+
+ @skipmess = ('m{A}', 'm{B}') ;
+ ok( 0 == skipmess( 'Hi!' ), 'skipmess: A or B no' ) ;
+ ok( 0 == skipmess( 'lala' ), 'skipmess: A or B no' ) ;
+ ok( 0 == skipmess( "\xff" ), 'skipmess: A or B no' ) ;
+ ok( 1 == skipmess( 'AB' ), 'skipmess: A or B yes' ) ;
+ ok( 1 == skipmess( 'BA' ), 'skipmess: A or B yes' ) ;
+ ok( 1 == skipmess( 'AA' ), 'skipmess: A or B yes' ) ;
+ ok( 1 == skipmess( 'Ok Bye' ), 'skipmess: A or B yes' ) ;
+
+
+ @skipmess = ( 'm#\A((?:[^\n]+\n)+|)^Content-Type: Message/Partial;[^\n]*\n(?:\n|.*\n\n)#ism' ) ; # SUPER BEST!
+
+
+
+ ok( 1 == skipmess(
+<<'EOM'
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+Content-Type: Message/Partial; blabla
+From:<tartanpion@machin.truc>
+
+Hello!
+Bye.
+EOM
+),
+ 'skipmess: 1 match Content-Type: Message/Partial' ) ;
+
+ ok( 0 == skipmess(
+<<'EOM'
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+From:<tartanpion@machin.truc>
+
+Hello!
+Bye.
+EOM
+),
+ 'skipmess: 2 not match Content-Type: Message/Partial' ) ;
+
+
+ ok( 1 == skipmess(
+<<'EOM'
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+From:<tartanpion@machin.truc>
+Content-Type: Message/Partial; blabla
+
+Hello!
+Bye.
+EOM
+),
+ 'skipmess: 3 match Content-Type: Message/Partial' ) ;
+
+ ok( 0 == skipmess(
+<<'EOM'
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+From:<tartanpion@machin.truc>
+
+Hello!
+Content-Type: Message/Partial; blabla
+Bye.
+EOM
+),
+ 'skipmess: 4 not match Content-Type: Message/Partial' ) ;
+
+
+ ok( 0 == skipmess(
+<<'EOM'
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+From:<tartanpion@machin.truc>
+
+Hello!
+Content-Type: Message/Partial; blabla
+
+Bye.
+EOM
+),
+ 'skipmess: 5 not match Content-Type: Message/Partial' ) ;
+
+
+ ok( 1 == skipmess(
+<<'EOM'
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+Content-Type: Message/Partial; blabla
+From:<tartanpion@machin.truc>
+
+Hello!
+
+Content-Type: Message/Partial; blabla
+
+Bye.
+EOM
+),
+ 'skipmess: 6 match Content-Type: Message/Partial' ) ;
+
+ ok( 1 == skipmess(
+<<'EOM'
+Date: Sat, 10 Jul 2010 05:34:45 -0700
+Content-Type: Message/Partial;
+From:<tartanpion@machin.truc>
+
+Hello!
+Bye.
+EOM
+),
+ 'skipmess: 7 match Content-Type: Message/Partial' ) ;
+
+ ok( 1 == skipmess(
+<<'EOM'
+Date: Wed, 2 Jul 2014 02:26:40 +0000
+MIME-Version: 1.0
+Content-Type: message/partial;
+ id="TAN_U_P<1404267997.00007489ed17>";
+ number=3;
+ total=3
+
+6HQ6Hh3CdXj77qEGixerQ6zHx0OnQ/Cf5On4W0Y6vtU2crABZQtD46Hx1EOh8dDz4+OnTr1G
+
+
+Hello!
+Bye.
+EOM
+),
+ 'skipmess: 8 match Content-Type: Message/Partial' ) ;
+
+
+ok( 1 == skipmess(
+<<'EOM'
+Return-Path: <gilles@lamiral.info>
+Received: by lamiral.info (Postfix, from userid 1000)
+ id 21EB12443BF; Mon, 2 Mar 2015 15:38:35 +0100 (CET)
+Subject: test: aethaecohngiexao
+To: <tata@petite.lamiral.info>
+X-Mailer: mail (GNU Mailutils 2.2)
+Message-Id: <20150302143835.21EB12443BF@lamiral.info>
+Content-Type: message/partial;
+ id="TAN_U_P<1404267997.00007489ed17>";
+ number=3;
+ total=3
+Date: Mon, 2 Mar 2015 15:38:34 +0100 (CET)
+From: gilles@lamiral.info (Gilles LAMIRAL)
+
+test: aethaecohngiexao
+EOM
+),
+ 'skipmess: 9 match Content-Type: Message/Partial' ) ;
+
+ok( 1 == skipmess(
+<<'EOM'
+Date: Mon, 2 Mar 2015 15:38:34 +0100 (CET)
+From: gilles@lamiral.info (Gilles LAMIRAL)
+Content-Type: message/partial;
+ id="TAN_U_P<1404267997.00007489ed17>";
+ number=3;
+ total=3
+
+test: aethaecohngiexao
+EOM
+. "lalala\n" x 3_000_000
+),
+ 'skipmess: 10 match Content-Type: Message/Partial' ) ;
+
+ok( 0 == skipmess(
+<<'EOM'
+Date: Mon, 2 Mar 2015 15:38:34 +0100 (CET)
+From: gilles@lamiral.info (Gilles LAMIRAL)
+
+test: aethaecohngiexao
+EOM
+. "lalala\n" x 3_000_000
+),
+ 'skipmess: 11 match Content-Type: Message/Partial' ) ;
+
+
+ok( 0 == skipmess(
+<<"EOM"
+From: fff\r
+To: fff\r
+Subject: Testing imapsync --skipmess\r
+Date: Mon, 22 Aug 2011 08:40:20 +0800\r
+Mime-Version: 1.0\r
+Content-Type: text/plain; charset=iso-8859-1\r
+Content-Transfer-Encoding: 7bit\r
+\r
+EOM
+. qq{!#"d%&'()*+,-./0123456789:;<=>?\@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefg\r\n } x 32_730
+),
+ 'skipmess: 12 not match Content-Type: Message/Partial' ) ;
+ # Complex regular subexpression recursion limit (32766) exceeded with more lines
+ # exit;
+
+ note( 'Leaving tests_skipmess()' ) ;
+ return ;
+}
+
+sub skipmess
+{
+ my ( $string ) = @_ ;
+ my $match ;
+ #myprint( "$string\n" ) ;
+ foreach my $skipmess ( @skipmess ) {
+ $sync->{ debug } and myprint( "eval \$match = \$string =~ $skipmess\n" ) ;
+ my $ret = eval "\$match = \$string =~ $skipmess ; 1" ;
+ #myprint( "eval [$ret]\n" ) ;
+ $sync->{ debug } and myprint( "match [$match]\n" ) ;
+ if ( ( not $ret ) or $EVAL_ERROR ) {
+ myprint( "Error: eval skipmess '$skipmess': $EVAL_ERROR" ) ;
+ return( undef ) ;
+ }
+ return( $match ) if ( $match ) ;
+ }
+ return( $match ) ;
+}
+
+
+
+
+sub tests_bytes_display_string
+{
+ note( 'Entering tests_bytes_display_string()' ) ;
+
+
+ is( 'NA', bytes_display_string( ), 'bytes_display_string: no args => NA' ) ;
+ is( 'NA', bytes_display_string( undef ), 'bytes_display_string: undef => NA' ) ;
+ is( 'NA', bytes_display_string( 'blabla' ), 'bytes_display_string: blabla => NA' ) ;
+
+ ok( '0.000 KiB' eq bytes_display_string( 0 ), 'bytes_display_string: 0' ) ;
+ ok( '0.001 KiB' eq bytes_display_string( 1 ), 'bytes_display_string: 1' ) ;
+ ok( '0.010 KiB' eq bytes_display_string( 10 ), 'bytes_display_string: 10' ) ;
+ ok( '1.000 MiB' eq bytes_display_string( 1_048_575 ), 'bytes_display_string: 1_048_575' ) ;
+ ok( '1.000 MiB' eq bytes_display_string( 1_048_576 ), 'bytes_display_string: 1_048_576' ) ;
+
+ ok( '1.000 GiB' eq bytes_display_string( 1_073_741_823 ), 'bytes_display_string: 1_073_741_823 ' ) ;
+ ok( '1.000 GiB' eq bytes_display_string( 1_073_741_824 ), 'bytes_display_string: 1_073_741_824 ' ) ;
+
+ ok( '1.000 TiB' eq bytes_display_string( 1_099_511_627_775 ), 'bytes_display_string: 1_099_511_627_775' ) ;
+ ok( '1.000 TiB' eq bytes_display_string( 1_099_511_627_776 ), 'bytes_display_string: 1_099_511_627_776' ) ;
+
+ ok( '1.000 PiB' eq bytes_display_string( 1_125_899_906_842_623 ), 'bytes_display_string: 1_125_899_906_842_623' ) ;
+ ok( '1.000 PiB' eq bytes_display_string( 1_125_899_906_842_624 ), 'bytes_display_string: 1_125_899_906_842_624' ) ;
+
+ ok( '1024.000 PiB' eq bytes_display_string( 1_152_921_504_606_846_975 ), 'bytes_display_string: 1_152_921_504_606_846_975' ) ;
+ ok( '1024.000 PiB' eq bytes_display_string( 1_152_921_504_606_846_976 ), 'bytes_display_string: 1_152_921_504_606_846_976' ) ;
+
+ ok( '1048576.000 PiB' eq bytes_display_string( 1_180_591_620_717_411_303_424 ), 'bytes_display_string: 1_180_591_620_717_411_303_424' ) ;
+
+ #myprint( bytes_display_string( 1_180_591_620_717_411_303_424 ), "\n" ) ;
+ note( 'Leaving tests_bytes_display_string()' ) ;
+
+ return ;
+}
+
+sub bytes_display_string
+{
+ my ( $bytes ) = @_ ;
+
+ my $readable_value = q{} ;
+
+ if ( ! defined( $bytes ) ) {
+ return( 'NA' ) ;
+ }
+
+ if ( not match_number( $bytes ) ) {
+ return( 'NA' ) ;
+ }
+
+
+
+ SWITCH: {
+ if ( abs( $bytes ) < ( 1000 * $KIBI ) ) {
+ $readable_value = mysprintf( '%.3f KiB', $bytes / $KIBI) ;
+ last SWITCH ;
+ }
+ if ( abs( $bytes ) < ( 1000 * $KIBI * $KIBI ) ) {
+ $readable_value = mysprintf( '%.3f MiB', $bytes / ($KIBI * $KIBI) ) ;
+ last SWITCH ;
+ }
+ if ( abs( $bytes ) < ( 1000 * $KIBI * $KIBI * $KIBI) ) {
+ $readable_value = mysprintf( '%.3f GiB', $bytes / ($KIBI * $KIBI * $KIBI) ) ;
+ last SWITCH ;
+ }
+ if ( abs( $bytes ) < ( 1000 * $KIBI * $KIBI * $KIBI * $KIBI) ) {
+ $readable_value = mysprintf( '%.3f TiB', $bytes / ($KIBI * $KIBI * $KIBI * $KIBI) ) ;
+ last SWITCH ;
+ } else {
+ $readable_value = mysprintf( '%.3f PiB', $bytes / ($KIBI * $KIBI * $KIBI * $KIBI * $KIBI) ) ;
+ }
+ # if you have exabytes (EiB) of email to transfer, you have too much email!
+ }
+ #myprint( "$bytes = $readable_value\n" ) ;
+ return( $readable_value ) ;
+}
+
+
+sub tests_useheader_suggestion
+{
+ note( 'Entering tests_useheader_suggestion()' ) ;
+
+ is( undef, useheader_suggestion( ), 'useheader_suggestion: no args => undef' ) ;
+ my $mysync = {} ;
+
+ $mysync->{ h1_nb_msg_noheader } = 0 ;
+ is( q{}, useheader_suggestion( $mysync ), 'useheader_suggestion: h1_nb_msg_noheader count null => no suggestion' ) ;
+ $mysync->{ h1_nb_msg_noheader } = 2 ;
+ is( q{in order to sync those 2 unidentified messages, add option --addheader}, useheader_suggestion( $mysync ),
+ 'useheader_suggestion: h1_nb_msg_noheader count 2 => suggestion of --addheader' ) ;
+
+ note( 'Leaving tests_useheader_suggestion()' ) ;
+ return ;
+}
+
+sub useheader_suggestion
+{
+ my $mysync = shift ;
+ if ( ! defined $mysync->{ h1_nb_msg_noheader } )
+ {
+ return ;
+ }
+ elsif ( 1 <= $mysync->{ h1_nb_msg_noheader } )
+ {
+ return qq{in order to sync those $mysync->{ h1_nb_msg_noheader } unidentified messages, add option --addheader} ;
+ }
+ else
+ {
+ return q{} ;
+ }
+ return ;
+}
+
+sub stats
+{
+ my $mysync = shift ;
+
+ if ( ! $mysync->{stats} ) {
+ return ;
+ }
+
+ my $timeend = time ;
+ my $timediff = $timeend - $mysync->{timestart} ;
+
+ my $timeend_str = localtime $timeend ;
+
+ my $memory_consumption_at_end = memory_consumption( ) || 0 ;
+ my $memory_consumption_at_start = $mysync->{ memory_consumption_at_start } || 0 ;
+ my $memory_ratio = ($max_msg_size_in_bytes) ?
+ mysprintf('%.1f', $memory_consumption_at_end / $max_msg_size_in_bytes) : 'NA' ;
+
+ # my $useheader_suggestion = useheader_suggestion( $mysync ) ;
+ myprint( "++++ Statistics\n" ) ;
+ myprint( "Transfer started on : $timestart_str\n" ) ;
+ myprint( "Transfer ended on : $timeend_str\n" ) ;
+ myprintf( "Transfer time : %.1f sec\n", $timediff ) ;
+ myprint( "Folders synced : $h1_folders_wanted_ct/$h1_folders_wanted_nb synced\n" ) ;
+ myprint( "Messages transferred : $mysync->{ nb_msg_transferred } " ) ;
+ myprint( "(could be $nb_msg_skipped_dry_mode without dry mode)" ) if ( $mysync->{dry} ) ;
+ myprint( "\n" ) ;
+ myprint( "Messages skipped : $mysync->{ nb_msg_skipped }\n" ) ;
+ myprint( "Messages found duplicate on host1 : $h1_nb_msg_duplicate\n" ) ;
+ myprint( "Messages found duplicate on host2 : $h2_nb_msg_duplicate\n" ) ;
+ myprint( "Messages found crossduplicate on host2 : $mysync->{ h2_nb_msg_crossdup }\n" ) ;
+ myprint( "Messages void (noheader) on host1 : $mysync->{ h1_nb_msg_noheader } ", useheader_suggestion( $mysync ), "\n" ) ;
+ myprint( "Messages void (noheader) on host2 : $h2_nb_msg_noheader\n" ) ;
+ nb_messages_in_1_not_in_2( $mysync ) ;
+ nb_messages_in_2_not_in_1( $mysync ) ;
+ myprintf( "Messages found in host1 not in host2 : %s messages\n", $mysync->{ nb_messages_in_1_not_in_2 } ) ;
+ myprintf( "Messages found in host2 not in host1 : %s messages\n", $mysync->{ nb_messages_in_2_not_in_1 } ) ;
+ myprint( "Messages deleted on host1 : $mysync->{ h1_nb_msg_deleted }\n" ) ;
+ myprint( "Messages deleted on host2 : $h2_nb_msg_deleted\n" ) ;
+ myprintf( "Total bytes transferred : %s (%s)\n",
+ $mysync->{total_bytes_transferred},
+ bytes_display_string( $mysync->{total_bytes_transferred} ) ) ;
+ myprintf( "Total bytes skipped : %s (%s)\n",
+ $mysync->{ total_bytes_skipped },
+ bytes_display_string( $mysync->{ total_bytes_skipped } ) ) ;
+ $timediff ||= 1 ; # No division per 0
+ myprintf("Message rate : %.1f messages/s\n", $mysync->{nb_msg_transferred} / $timediff ) ;
+ myprintf("Average bandwidth rate : %.1f KiB/s\n", $mysync->{total_bytes_transferred} / $KIBI / $timediff ) ;
+ myprint( "Reconnections to host1 : $mysync->{imap1}->{IMAPSYNC_RECONNECT_COUNT}\n" ) ;
+ myprint( "Reconnections to host2 : $mysync->{imap2}->{IMAPSYNC_RECONNECT_COUNT}\n" ) ;
+ myprintf("Memory consumption at the end : %.1f MiB (started with %.1f MiB)\n",
+ $memory_consumption_at_end / $KIBI / $KIBI,
+ $memory_consumption_at_start / $KIBI / $KIBI ) ;
+ myprint( "Load end is : " . ( join( q{ }, loadavg( ) ) || 'unknown' ), " on $mysync->{cpu_number} cores\n" ) ;
+
+ myprintf("Biggest message : %s bytes (%s)\n",
+ $max_msg_size_in_bytes,
+ bytes_display_string( $max_msg_size_in_bytes) ) ;
+ myprint( "Memory/biggest message ratio : $memory_ratio\n" ) ;
+ if ( $mysync->{ foldersizesatend } and $mysync->{ foldersizes } ) {
+
+
+ my $nb_msg_start_diff = diff_or_NA( $mysync->{ h2_nb_msg_start }, $mysync->{ h1_nb_msg_start } ) ;
+ my $bytes_start_diff = diff_or_NA( $mysync->{ h2_bytes_start }, $mysync->{ h1_bytes_start } ) ;
+
+ myprintf("Start difference host2 - host1 : %s messages, %s bytes (%s)\n", $nb_msg_start_diff,
+ $bytes_start_diff,
+ bytes_display_string( $bytes_start_diff ) ) ;
+
+ my $nb_msg_end_diff = diff_or_NA( $h2_nb_msg_end, $h1_nb_msg_end ) ;
+ my $bytes_end_diff = diff_or_NA( $h2_bytes_end, $h1_bytes_end ) ;
+
+ myprintf("Final difference host2 - host1 : %s messages, %s bytes (%s)\n", $nb_msg_end_diff,
+ $bytes_end_diff,
+ bytes_display_string( $bytes_end_diff ) ) ;
+ }
+
+ comment_on_final_diff_in_1_not_in_2( $mysync ) ;
+ comment_on_final_diff_in_2_not_in_1( $mysync ) ;
+ myprint( "Detected $mysync->{nb_errors} errors\n\n" ) ;
+
+ myprint( $warn_release, "\n" ) ;
+ myprint( homepage( ), "\n" ) ;
+ return ;
+}
+
+sub diff_or_NA
+{
+ my( $n1, $n2 ) = @ARG ;
+
+ if ( not defined $n1 or not defined $n2 ) {
+ return 'NA' ;
+ }
+
+ if ( not match_number( $n1 )
+ or not match_number( $n2 ) ) {
+ return 'NA' ;
+ }
+
+ return( $n1 - $n2 ) ;
+}
+
+sub match_number
+{
+ my $n = shift @ARG ;
+
+ if ( not defined $n ) {
+ return 0 ;
+ }
+ if ( $n =~ /[0-9]+\.?[0-9]?/x ) {
+ return 1 ;
+ }
+ else {
+ return 0 ;
+ }
+}
+
+
+sub tests_match_number
+{
+ note( 'Entering tests_match_number()' ) ;
+
+
+ is( 0, match_number( ), 'match_number: no parameters => 0' ) ;
+ is( 0, match_number( undef ), 'match_number: undef => 0' ) ;
+ is( 0, match_number( 'blabla' ), 'match_number: blabla => 0' ) ;
+ is( 1, match_number( 0 ), 'match_number: 0 => 1' ) ;
+ is( 1, match_number( 1 ), 'match_number: 1 => 1' ) ;
+ is( 1, match_number( 1.0 ), 'match_number: 1.0 => 1' ) ;
+ is( 1, match_number( 0.0 ), 'match_number: 0.0 => 1' ) ;
+
+ note( 'Leaving tests_match_number()' ) ;
+ return ;
+}
+
+
+
+sub tests_diff_or_NA
+{
+ note( 'Entering tests_diff_or_NA()' ) ;
+
+
+ is( 'NA', diff_or_NA( ), 'diff_or_NA: no parameters => NA' ) ;
+ is( 'NA', diff_or_NA( undef ), 'diff_or_NA: undef => NA' ) ;
+ is( 'NA', diff_or_NA( undef, undef ), 'diff_or_NA: undef undef => NA' ) ;
+ is( 'NA', diff_or_NA( undef, 1 ), 'diff_or_NA: undef 1 => NA' ) ;
+ is( 'NA', diff_or_NA( 1, undef ), 'diff_or_NA: 1 undef => NA' ) ;
+ is( 'NA', diff_or_NA( 'blabla', 1 ), 'diff_or_NA: blabla 1 => NA' ) ;
+ is( 'NA', diff_or_NA( 1, 'blabla' ), 'diff_or_NA: 1 blabla => NA' ) ;
+ is( 0, diff_or_NA( 1, 1 ), 'diff_or_NA: 1 1 => 0' ) ;
+ is( 1, diff_or_NA( 1, 0 ), 'diff_or_NA: 1 0 => 1' ) ;
+ is( -1, diff_or_NA( 0, 1 ), 'diff_or_NA: 0 1 => -1' ) ;
+ is( 0, diff_or_NA( 1.0, 1 ), 'diff_or_NA: 1.0 1 => 0' ) ;
+ is( 1, diff_or_NA( 1.0, 0 ), 'diff_or_NA: 1.0 0 => 1' ) ;
+ is( -1, diff_or_NA( 0, 1.0 ), 'diff_or_NA: 0 1.0 => -1' ) ;
+
+ note( 'Leaving tests_diff_or_NA()' ) ;
+ return ;
+}
+
+sub homepage
+{
+ return( 'Homepage: https://imapsync.lamiral.info/' ) ;
+}
+
+
+sub load_modules
+{
+ if ( $sync->{ssl1}
+ or $sync->{ssl2}
+ or $sync->{tls1}
+ or $sync->{tls2}) {
+ if ( $sync->{inet4} ) {
+ IO::Socket::SSL->import( 'inet4' ) ;
+ }
+ if ( $sync->{inet6} ) {
+ IO::Socket::SSL->import( 'inet6' ) ;
+ }
+ }
+ return ;
+}
+
+
+
+sub parse_header_msg
+{
+ my ( $mysync, $imap, $m_uid, $s_heads, $s_fir, $side, $s_hash ) = @_ ;
+
+ my $head = $s_heads->{$m_uid} ;
+ my $headnum = scalar keys %{ $head } ;
+ $mysync->{ debug } and myprint( "$side: uid $m_uid number of headers, pass one: ", $headnum, "\n" ) ;
+
+ if ( ( ! $headnum ) and ( $wholeheaderifneeded ) ){
+ $mysync->{ debug } and myprint( "$side: uid $m_uid no header by parse_headers so taking whole header with BODY.PEEK[HEADER]\n" ) ;
+ $imap->fetch($m_uid, 'BODY.PEEK[HEADER]' ) ;
+ my $whole_header = $imap->_transaction_literals ;
+
+ #myprint( $whole_header ) ;
+ $head = decompose_header( $whole_header ) ;
+
+ $headnum = scalar keys %{ $head } ;
+ $mysync->{ debug } and myprint( "$side: uid $m_uid number of headers, pass two: ", $headnum, "\n" ) ;
+ }
+
+ #myprint( Data::Dumper->Dump( [ $head, \%useheader ] ) ) ;
+
+ my $headstr ;
+
+ $headstr = header_construct( $head, $side, $m_uid ) ;
+
+ if ( ( ! $headstr ) and ( $mysync->{addheader} ) and ( $side eq 'Host1' ) ) {
+ my $header = add_header( $m_uid ) ;
+ $mysync->{ debug } and myprint( "$side: uid $m_uid no header found so adding our own [$header]\n" ) ;
+ $headstr .= uc $header ;
+ $s_fir->{$m_uid}->{NO_HEADER} = 1;
+ }
+
+ return if ( ! $headstr ) ;
+
+ my $size = $s_fir->{$m_uid}->{'RFC822.SIZE'} ;
+ my $flags = $s_fir->{$m_uid}->{'FLAGS'} ;
+ my $idate = $s_fir->{$m_uid}->{'INTERNALDATE'} ;
+ $size = length $headstr unless ( $size ) ;
+ my $m_md5 = md5_base64( $headstr ) ;
+ $mysync->{ debug } and myprint( "$side: uid $m_uid sig $m_md5 size $size idate $idate\n" ) ;
+ my $key ;
+ if ($skipsize) {
+ $key = "$m_md5";
+ }
+ else {
+ $key = "$m_md5:$size";
+ }
+ # 0 return code is used to identify duplicate message hash
+ return 0 if exists $s_hash->{"$key"};
+ $s_hash->{"$key"}{'5'} = $m_md5;
+ $s_hash->{"$key"}{'s'} = $size;
+ $s_hash->{"$key"}{'D'} = $idate;
+ $s_hash->{"$key"}{'F'} = $flags;
+ $s_hash->{"$key"}{'m'} = $m_uid;
+
+ return( 1 ) ;
+}
+
+sub header_construct
+{
+
+ my( $head, $side, $m_uid ) = @_ ;
+
+ my $headstr ;
+ foreach my $h ( sort keys %{ $head } ) {
+ next if ( not ( exists $useheader{ uc $h } )
+ and ( not exists $useheader{ 'ALL' } )
+ ) ;
+ foreach my $val ( sort @{$head->{$h}} ) {
+
+ my $H = header_line_normalize( $h, $val ) ;
+
+ # show stuff in debug mode
+ $sync->{ debug } and myprint( "$side uid $m_uid header [$H]", "\n" ) ;
+
+ if ($skipheader and $H =~ m/$skipheader/xi) {
+ $sync->{ debug } and myprint( "$side uid $m_uid skipping header [$H]\n" ) ;
+ next ;
+ }
+ $headstr .= "$H" ;
+ }
+ }
+ return( $headstr ) ;
+}
+
+
+sub header_line_normalize
+{
+ my( $header_key, $header_val ) = @_ ;
+
+ # no 8-bit data in headers !
+ $header_val =~ s/[\x80-\xff]/X/xog;
+
+ # change tabulations to space (Gmail bug on with "Received:" on multilines)
+ $header_val =~ s/\t/\ /xgo ;
+
+ # remove the first blanks ( dbmail bug? )
+ $header_val =~ s/^\s*//xo;
+
+ # remove the last blanks ( Gmail bug )
+ $header_val =~ s/\s*$//xo;
+
+ # remove successive blanks ( Mailenable does it )
+ $header_val =~ s/\s+/ /xgo;
+
+ # remove Message-Id value domain part ( Mailenable changes it )
+ if ( ( $messageidnodomain ) and ( 'MESSAGE-ID' eq uc $header_key ) ) { $header_val =~ s/^([^@]+).*$/$1/xo ; }
+
+ # and uppercase header line
+ # (dbmail and dovecot)
+
+ my $header_line = uc "$header_key: $header_val" ;
+
+ return( $header_line ) ;
+}
+
+sub tests_header_line_normalize
+{
+ note( 'Entering tests_header_line_normalize()' ) ;
+
+
+ ok( ': ' eq header_line_normalize( q{}, q{} ), 'header_line_normalize: empty args' ) ;
+ ok( 'HHH: VVV' eq header_line_normalize( 'hhh', 'vvv' ), 'header_line_normalize: hhh vvv ' ) ;
+ ok( 'HHH: VVV' eq header_line_normalize( 'hhh', ' vvv' ), 'header_line_normalize: remove first blancs' ) ;
+ ok( 'HHH: AA BB CCC D' eq header_line_normalize( 'hhh', 'aa bb ccc d' ), 'header_line_normalize: remove succesive blanks' ) ;
+ ok( 'HHH: AA BB CCC' eq header_line_normalize( 'hhh', 'aa bb ccc ' ), 'header_line_normalize: remove last blanks' ) ;
+ ok( 'HHH: VVV XX YY' eq header_line_normalize( 'hhh', "vvv\t\txx\tyy" ), 'header_line_normalize: tabs' ) ;
+ ok( 'HHH: XABX' eq header_line_normalize( 'hhh', "\x80AB\xff" ), 'header_line_normalize: 8bit' ) ;
+
+ note( 'Leaving tests_header_line_normalize()' ) ;
+ return ;
+}
+
+
+sub tests_firstline
+{
+ note( 'Entering tests_firstline()' ) ;
+
+ is( q{}, firstline( 'W/tmp/tests/noexist.txt' ), 'firstline: getting empty string from inexisting W/tmp/tests/noexist.txt' ) ;
+
+ ok( (-d 'W/tmp/tests/' or mkpath( 'W/tmp/tests/' ) ), 'firstline: mkpath W/tmp/tests/' ) ;
+
+ is( "blabla\n" , string_to_file( "blabla\n", 'W/tmp/tests/firstline.txt' ), 'firstline: put blabla in W/tmp/tests/firstline.txt' ) ;
+ is( 'blabla' , firstline( 'W/tmp/tests/firstline.txt' ), 'firstline: get blabla from W/tmp/tests/firstline.txt' ) ;
+
+ is( q{} , string_to_file( q{}, 'W/tmp/tests/firstline2.txt' ), 'firstline: put empty string in W/tmp/tests/firstline2.txt' ) ;
+ is( q{} , firstline( 'W/tmp/tests/firstline2.txt' ), 'firstline: get empty string from W/tmp/tests/firstline2.txt' ) ;
+
+ is( "\n" , string_to_file( "\n", 'W/tmp/tests/firstline3.txt' ), 'firstline: put CR in W/tmp/tests/firstline3.txt' ) ;
+ is( q{} , firstline( 'W/tmp/tests/firstline3.txt' ), 'firstline: get empty string from W/tmp/tests/firstline3.txt' ) ;
+
+ is( "blabla\nTiti\n" , string_to_file( "blabla\nTiti\n", 'W/tmp/tests/firstline4.txt' ), 'firstline: put blabla\nTiti\n in W/tmp/tests/firstline4.txt' ) ;
+ is( 'blabla' , firstline( 'W/tmp/tests/firstline4.txt' ), 'firstline: get blabla from W/tmp/tests/firstline4.txt' ) ;
+
+ note( 'Leaving tests_firstline()' ) ;
+ return ;
+}
+
+sub firstline
+{
+ # extract the first line of a file (without \n)
+ # return empty string if error or empty string
+
+ my $file = shift ;
+ my $line ;
+
+ $line = nthline( $file, 1 ) ;
+ return $line ;
+}
+
+
+
+sub tests_secondline
+{
+ note( 'Entering tests_secondline()' ) ;
+
+ is( q{}, secondline( 'W/tmp/tests/noexist.txt' ), 'secondline: getting empty string from inexisting W/tmp/tests/noexist.txt' ) ;
+ is( q{}, secondline( 'W/tmp/tests/noexist.txt', 2 ), 'secondline: 2nd getting empty string from inexisting W/tmp/tests/noexist.txt' ) ;
+
+ ok( (-d 'W/tmp/tests/' or mkpath( 'W/tmp/tests/' ) ), 'secondline: mkpath W/tmp/tests/' ) ;
+
+ is( "L1\nL2\nL3\nL4\n" , string_to_file( "L1\nL2\nL3\nL4\n", 'W/tmp/tests/secondline.txt' ), 'secondline: put L1\nL2\nL3\nL4\n in W/tmp/tests/secondline.txt' ) ;
+ is( 'L2' , secondline( 'W/tmp/tests/secondline.txt' ), 'secondline: get L2 from W/tmp/tests/secondline.txt' ) ;
+
+
+ note( 'Leaving tests_secondline()' ) ;
+ return ;
+}
+
+
+sub secondline
+{
+ # extract the second line of a file (without \n)
+ # return empty string if error or empty string
+
+ my $file = shift ;
+ my $line ;
+
+ $line = nthline( $file, 2 ) ;
+ return $line ;
+}
+
+
+
+
+sub tests_nthline
+{
+ note( 'Entering tests_nthline()' ) ;
+
+ is( q{}, nthline( 'W/tmp/tests/noexist.txt' ), 'nthline: getting empty string from inexisting W/tmp/tests/noexist.txt' ) ;
+ is( q{}, nthline( 'W/tmp/tests/noexist.txt', 2 ), 'nthline: 2nd getting empty string from inexisting W/tmp/tests/noexist.txt' ) ;
+
+ ok( (-d 'W/tmp/tests/' or mkpath( 'W/tmp/tests/' ) ), 'nthline: mkpath W/tmp/tests/' ) ;
+
+ is( "L1\nL2\nL3\nL4\n" , string_to_file( "L1\nL2\nL3\nL4\n", 'W/tmp/tests/nthline.txt' ), 'nthline: put L1\nL2\nL3\nL4\n in W/tmp/tests/nthline.txt' ) ;
+ is( 'L3' , nthline( 'W/tmp/tests/nthline.txt', 3 ), 'nthline: get L3 from W/tmp/tests/nthline.txt' ) ;
+
+
+ note( 'Leaving tests_nthline()' ) ;
+ return ;
+}
+
+
+sub nthline
+{
+ # extract the nth line of a file (without \n)
+ # return empty string if error or empty string
+
+ my $file = shift ;
+ my $num = shift ;
+
+ if ( ! all_defined( $file, $num ) ) { return q{} ; }
+
+ my $line ;
+
+ $line = ( file_to_array( $file ) )[$num - 1] ;
+ if ( ! defined $line )
+ {
+ return q{} ;
+ }
+ else
+ {
+ chomp $line ;
+ return $line ;
+ }
+}
+
+
+# Should be unit tested and then be used by file_to_string, refactoring file_to_string
+sub file_to_array
+{
+
+ my( $file ) = shift ;
+ my @string ;
+
+ open my $FILE, '<', $file or do {
+ myprint( "Error reading file $file : $OS_ERROR\n" ) ;
+ return ;
+ } ;
+ @string = <$FILE> ;
+ close $FILE ;
+ return( @string ) ;
+}
+
+
+sub tests_file_to_string
+{
+ note( 'Entering tests_file_to_string()' ) ;
+
+ is( undef, file_to_string( ), 'file_to_string: no args => undef' ) ;
+ is( undef, file_to_string( '/noexist' ), 'file_to_string: /noexist => undef' ) ;
+ is( undef, file_to_string( '/' ), 'file_to_string: reading a directory => undef' ) ;
+ ok( file_to_string( $PROGRAM_NAME ), 'file_to_string: reading myself' ) ;
+
+ ok( (-d 'W/tmp/tests/' or mkpath( 'W/tmp/tests/' ) ), 'file_to_string: mkpath W/tmp/tests/' ) ;
+
+ is( 'lilili', string_to_file( 'lilili', 'W/tmp/tests/canbewritten' ), 'file_to_string: string_to_file filling W/tmp/tests/canbewritten with lilili' ) ;
+ is( 'lilili', file_to_string( 'W/tmp/tests/canbewritten' ), 'file_to_string: reading W/tmp/tests/canbewritten is lilili' ) ;
+
+ is( q{}, string_to_file( q{}, 'W/tmp/tests/empty' ), 'file_to_string: string_to_file filling W/tmp/tests/empty with empty string' ) ;
+ is( q{}, file_to_string( 'W/tmp/tests/empty' ), 'file_to_string: reading W/tmp/tests/empty is empty' ) ;
+
+ note( 'Leaving tests_file_to_string()' ) ;
+ return ;
+}
+
+sub file_to_string
+{
+ my $file = shift ;
+ if ( ! $file ) { return ; }
+ if ( ! -e $file ) { return ; }
+ if ( ! -f $file ) { return ; }
+ if ( ! -r $file ) { return ; }
+ my @string ;
+ if ( open my $FILE, '<', $file ) {
+ @string = <$FILE> ;
+ close $FILE ;
+ return( join q{}, @string ) ;
+ }else{
+ myprint( "Error reading file $file : $OS_ERROR\n" ) ;
+ return ;
+ }
+}
+
+
+sub tests_string_to_file
+{
+ note( 'Entering tests_string_to_file()' ) ;
+
+ is( undef, string_to_file( ), 'string_to_file: no args => undef' ) ;
+ is( undef, string_to_file( 'lalala' ), 'string_to_file: one arg => undef' ) ;
+ is( undef, string_to_file( 'lalala', '.' ), 'string_to_file: writing a directory => undef' ) ;
+ ok( (-d 'W/tmp/tests/' or mkpath( 'W/tmp/tests/' ) ), 'string_to_file: mkpath W/tmp/tests/' ) ;
+ is( 'lalala', string_to_file( 'lalala', 'W/tmp/tests/canbewritten' ), 'string_to_file: W/tmp/tests/canbewritten with lalala' ) ;
+ is( q{}, string_to_file( q{}, 'W/tmp/tests/empty' ), 'string_to_file: W/tmp/tests/empty with empty string' ) ;
+
+ SKIP: {
+ Readonly my $NB_UNX_tests_string_to_file => 1 ;
+ skip( 'Not on Unix non-root', $NB_UNX_tests_string_to_file ) if ('MSWin32' eq $OSNAME or '0' eq $EFFECTIVE_USER_ID ) ;
+ is( undef, string_to_file( 'lalala', '/cantouch' ), 'string_to_file: /cantouch denied => undef' ) ;
+ }
+
+ note( 'Leaving tests_string_to_file()' ) ;
+ return ;
+}
+
+sub string_to_file
+{
+ my( $string, $file ) = @_ ;
+ if( ! defined $string ) { return ; }
+ if( ! defined $file ) { return ; }
+
+ if ( ! -e $file && ! -w dirname( $file ) ) {
+ myprint( "string_to_file: directory of $file is not writable\n" ) ;
+ return ;
+ }
+
+ if ( ! sysopen( FILE, $file, O_WRONLY|O_TRUNC|O_CREAT, 0600) ) {
+ myprint( "string_to_file: failure writing to $file with error: $OS_ERROR\n" ) ;
+ return ;
+ }
+ print FILE $string ;
+ close FILE ;
+ return $string ;
+}
+
+0 and <<'MULTILINE_COMMENT' ;
+This is a multiline comment.
+Based on David Carter discussion, to do:
+* Call parameters stay the same.
+* Now always "return( $string, $error )". Descriptions below.
+OK * Still capture STDOUT via "1> $output_tmpfile" to finish in $string and "return( $string, $error )"
+OK * Now also capture STDERR via "2> $error_tmpfile" to finish in $error and "return( $string, $error )"
+OK * in case of CHILD_ERROR, return( undef, $error )
+ and print $error, with folder/UID/maybeSubject context,
+ on console and at the end with the final error listing. Count this as a sync error.
+* in case of good command, take final $string as is, unless void. In case $error with value then print it.
+* in case of good command and final $string empty, consider it like CHILD_ERROR =>
+ return( undef, $error ) and print $error, with folder/UID/maybeSubject context,
+ on console and at the end with the final error listing. Count this as a sync error.
+MULTILINE_COMMENT
+# End of multiline comment.
+
+sub pipemess
+{
+ my ( $string, @commands ) = @_ ;
+ my $error = q{} ;
+ foreach my $command ( @commands ) {
+ my $input_tmpfile = "$sync->{ tmpdir }/imapsync_tmp_file.$PROCESS_ID.inp.txt" ;
+ my $output_tmpfile = "$sync->{ tmpdir }/imapsync_tmp_file.$PROCESS_ID.out.txt" ;
+ my $error_tmpfile = "$sync->{ tmpdir }/imapsync_tmp_file.$PROCESS_ID.err.txt" ;
+ string_to_file( $string, $input_tmpfile ) ;
+ ` $command < $input_tmpfile 1> $output_tmpfile 2> $error_tmpfile ` ;
+ my $is_command_ko = $CHILD_ERROR ;
+ my $error_cmd = file_to_string( $error_tmpfile ) ;
+ chomp( $error_cmd ) ;
+ $string = file_to_string( $output_tmpfile ) ;
+ my $string_len = length( $string ) ;
+ unlink $input_tmpfile, $output_tmpfile, $error_tmpfile ;
+
+ if ( $is_command_ko or ( ! $string_len ) ) {
+ my $cmd_exit_value = $CHILD_ERROR >> 8 ;
+ my $cmd_end_signal = $CHILD_ERROR & 127 ;
+ my $signal_log = ( $cmd_end_signal ) ? " signal $cmd_end_signal and" : q{} ;
+ my $error_log = qq{Failure: --pipemess command "$command" ended with$signal_log "$string_len" characters exit value "$cmd_exit_value" and STDERR "$error_cmd"\n} ;
+ myprint( $error_log ) ;
+ if ( wantarray ) {
+ return @{ [ undef, $error_log ] }
+ }else{
+ return ;
+ }
+ }
+ if ( $error_cmd ) {
+ $error .= qq{STDERR of --pipemess "$command": $error_cmd\n} ;
+ myprint( qq{STDERR of --pipemess "$command": $error_cmd\n} ) ;
+ }
+ }
+ #myprint( "[$string]\n" ) ;
+ if ( wantarray ) {
+ return ( $string, $error ) ;
+ }else{
+ return $string ;
+ }
+}
+
+
+
+sub tests_pipemess
+{
+ note( 'Entering tests_pipemess()' ) ;
+
+
+ SKIP: {
+ Readonly my $NB_WIN_tests_pipemess => 3 ;
+ skip( 'Not on MSWin32', $NB_WIN_tests_pipemess ) if ('MSWin32' ne $OSNAME) ;
+ # Windows
+ # "type" command does not accept redirection of STDIN with <
+ # "sort" does
+ ok( "nochange\n" eq pipemess( 'nochange', 'sort' ), 'pipemess: nearly no change by sort' ) ;
+ ok( "nochange2\n" eq pipemess( 'nochange2', qw( sort sort ) ), 'pipemess: nearly no change by sort,sort' ) ;
+ # command not found
+ #diag( 'Warning and failure about cacaprout are on purpose' ) ;
+ ok( ! defined( pipemess( q{}, 'cacaprout' ) ), 'pipemess: command not found' ) ;
+
+ } ;
+
+ my ( $stringT, $errorT ) ;
+
+ SKIP: {
+ Readonly my $NB_UNX_tests_pipemess => 25 ;
+ skip( 'Not on Unix', $NB_UNX_tests_pipemess ) if ('MSWin32' eq $OSNAME) ;
+ # Unix
+ ok( 'nochange' eq pipemess( 'nochange', 'cat' ), 'pipemess: no change by cat' ) ;
+
+ ok( 'nochange2' eq pipemess( 'nochange2', 'cat', 'cat' ), 'pipemess: no change by cat,cat' ) ;
+
+ ok( " 1\tnumberize\n" eq pipemess( "numberize\n", 'cat -n' ), 'pipemess: numberize by cat -n' ) ;
+ ok( " 1\tnumberize\n 2\tnumberize\n" eq pipemess( "numberize\nnumberize\n", 'cat -n' ), 'pipemess: numberize by cat -n' ) ;
+
+ ok( "A\nB\nC\n" eq pipemess( "A\nC\nB\n", 'sort' ), 'pipemess: sort' ) ;
+
+ # command not found
+ #diag( 'Warning and failure about cacaprout are on purpose' ) ;
+ is( undef, pipemess( q{}, 'cacaprout' ), 'pipemess: command not found' ) ;
+
+ # success with true but no output at all
+ is( undef, pipemess( q{blabla}, 'true' ), 'pipemess: true but no output' ) ;
+
+ # failure with false and no output at all
+ is( undef, pipemess( q{blabla}, 'false' ), 'pipemess: false and no output' ) ;
+
+ # Failure since pipemess is not a real pipe, so first cat wait for standard input
+ is( q{blabla}, pipemess( q{blabla}, '( cat|cat ) ' ), 'pipemess: ok by ( cat|cat )' ) ;
+
+
+ ( $stringT, $errorT ) = pipemess( 'nochange', 'cat' ) ;
+ is( $stringT, 'nochange', 'pipemess: list context, no change by cat, string' ) ;
+ is( $errorT, q{}, 'pipemess: list context, no change by cat, no error' ) ;
+
+ ( $stringT, $errorT ) = pipemess( 'dontcare', 'true' ) ;
+ is( $stringT, undef, 'pipemess: list context, true but no output, string' ) ;
+ like( $errorT, qr{\QFailure: --pipemess command "true" ended with "0" characters exit value "0" and STDERR ""\E}xm, 'pipemess: list context, true but no output, error' ) ;
+
+ ( $stringT, $errorT ) = pipemess( 'dontcare', 'false' ) ;
+ is( $stringT, undef, 'pipemess: list context, false and no output, string' ) ;
+ like( $errorT, qr{\QFailure: --pipemess command "false" ended with "0" characters exit value "1" and STDERR ""\E}xm,
+ 'pipemess: list context, false and no output, error' ) ;
+
+ ( $stringT, $errorT ) = pipemess( 'dontcare', '/bin/echo -n blablabla' ) ;
+ is( $stringT, q{blablabla}, 'pipemess: list context, "echo -n blablabla", string' ) ;
+ is( $errorT, q{}, 'pipemess: list context, "echo blablabla", error' ) ;
+
+
+ ( $stringT, $errorT ) = pipemess( 'dontcare', '( echo -n blablabla 3>&1 1>&2 2>&3 )' ) ;
+ is( $stringT, undef, 'pipemess: list context, "no output STDERR blablabla", string' ) ;
+ like( $errorT, qr{blablabla"}xm, 'pipemess: list context, "no output STDERR blablabla", error' ) ;
+
+
+ ( $stringT, $errorT ) = pipemess( 'dontcare', '( echo -n blablabla 3>&1 1>&2 2>&3 )', 'false' ) ;
+ is( $stringT, undef, 'pipemess: list context, "no output STDERR blablabla then false", string' ) ;
+ like( $errorT, qr{blablabla"}xm, 'pipemess: list context, "no output STDERR blablabla then false", error' ) ;
+
+ ( $stringT, $errorT ) = pipemess( 'dontcare', 'false', '( echo -n blablabla 3>&1 1>&2 2>&3 )' ) ;
+ is( $stringT, undef, 'pipemess: list context, "false then STDERR blablabla", string' ) ;
+ like( $errorT, qr{\QFailure: --pipemess command "false" ended with "0" characters exit value "1" and STDERR ""\E}xm,
+ 'pipemess: list context, "false then STDERR blablabla", error' ) ;
+
+ ( $stringT, $errorT ) = pipemess( 'dontcare', '( echo rrrrr ; echo -n error_blablabla 3>&1 1>&2 2>&3 )' ) ;
+ like( $stringT, qr{rrrrr}xm, 'pipemess: list context, "STDOUT rrrrr STDERR error_blablabla", string' ) ;
+ like( $errorT, qr{STDERR.*error_blablabla}xm, 'pipemess: list context, "STDOUT rrrrr STDERR error_blablabla", error' ) ;
+
+ }
+
+ ( $stringT, $errorT ) = pipemess( 'dontcare', 'cacaprout' ) ;
+ is( $stringT, undef, 'pipemess: list context, cacaprout not found, string' ) ;
+ like( $errorT, qr{\QFailure: --pipemess command "cacaprout" ended with "0" characters exit value\E}xm,
+ 'pipemess: list context, cacaprout not found, error' ) ;
+
+ note( 'Leaving tests_pipemess()' ) ;
+ return ;
+}
+
+
+
+sub tests_is_a_release_number
+{
+ note( 'Entering tests_is_a_release_number()' ) ;
+
+ is( undef, is_a_release_number( ), 'is_a_release_number: no args => undef' ) ;
+ ok( is_a_release_number( $RELEASE_NUMBER_EXAMPLE_1 ), 'is_a_release_number 1.351' ) ;
+ ok( is_a_release_number( $RELEASE_NUMBER_EXAMPLE_2 ), 'is_a_release_number 42.4242' ) ;
+ ok( is_a_release_number( imapsync_version( $sync ) ), 'is_a_release_number imapsync_version( )' ) ;
+ ok( ! is_a_release_number( 'blabla' ), '! is_a_release_number blabla' ) ;
+
+ note( 'Leaving tests_is_a_release_number()' ) ;
+ return ;
+}
+
+sub is_a_release_number
+{
+ my $number = shift ;
+ if ( ! defined $number ) { return ; }
+ return( $number =~ m{^\d+\.\d+$}xo ) ;
+}
+
+
+
+sub imapsync_version_public
+{
+
+ my $local_version = imapsync_version( $sync ) ;
+ my $imapsync_basename = imapsync_basename( ) ;
+ my $context = imapsync_context( ) ;
+ my $agent_info = "$OSNAME system, perl "
+ . mysprintf( '%vd', $PERL_VERSION)
+ . ", Mail::IMAPClient $Mail::IMAPClient::VERSION"
+ . " $imapsync_basename"
+ . " $context" ;
+ my $sock = IO::Socket::INET->new(
+ PeerAddr => 'imapsync.lamiral.info',
+ PeerPort => 80,
+ Proto => 'tcp',
+ ) ;
+ return( 'unknown' ) if not $sock ;
+ print $sock
+ "GET /prj/imapsync/VERSION HTTP/1.0\r\n",
+ "User-Agent: imapsync/$local_version ($agent_info)\r\n",
+ "Host: ks.lamiral.info\r\n\r\n" ;
+ my @line = <$sock> ;
+ close $sock ;
+ my $last_release = $line[$LAST] ;
+ chomp $last_release ;
+ return( $last_release ) ;
+}
+
+sub not_long_imapsync_version_public
+{
+ #myprint( "Entering not_long_imapsync_version_public\n" ) ;
+
+ my $fake = shift ;
+ if ( $fake ) { return $fake }
+
+ my $val ;
+
+ # Doesn't work with gethostbyname (see perlipc)
+ #local $SIG{ALRM} = sub { die "alarm\n" } ;
+
+ if ('MSWin32' eq $OSNAME) {
+ local $SIG{ALRM} = sub { die "alarm\n" } ;
+ }else{
+
+ POSIX::sigaction(SIGALRM,
+ POSIX::SigAction->new(sub { croak 'alarm' } ) )
+ or myprint( "Error setting SIGALRM handler: $OS_ERROR\n" ) ;
+ }
+
+ my $ret = eval {
+ alarm 3 ;
+ {
+ $val = imapsync_version_public( ) ;
+ #sleep 4 ;
+ #myprint( "End of imapsync_version_public\n" ) ;
+ }
+ alarm 0 ;
+ 1 ;
+ } ;
+ #myprint( "eval [$ret]\n" ) ;
+ if ( ( not $ret ) or $EVAL_ERROR ) {
+ #myprint( "$EVAL_ERROR" ) ;
+ if ($EVAL_ERROR =~ /alarm/) {
+ # timed out
+ return('timeout') ;
+ }else{
+ alarm 0 ;
+ return( 'unknown' ) ; # propagate unexpected errors
+ }
+ }else {
+ # Good!
+ return( $val ) ;
+ }
+}
+
+sub tests_not_long_imapsync_version_public
+{
+ note( 'Entering tests_not_long_imapsync_version_public()' ) ;
+
+
+ is( 1, is_a_release_number( not_long_imapsync_version_public( ) ),
+ 'not_long_imapsync_version_public: public release is a number' ) ;
+
+ note( 'Leaving tests_not_long_imapsync_version_public()' ) ;
+ return ;
+}
+
+sub check_last_release
+{
+ my $fake = shift ;
+ my $public_release = not_long_imapsync_version_public( $fake ) ;
+ $sync->{ debug } and myprint( "check_last_release: [$public_release]\n" ) ;
+ my $inline_help_when_on = '( Use --noreleasecheck to avoid this release check. )' ;
+
+ if ( $public_release eq 'unknown' ) {
+ return( 'Imapsync public release is unknown.' . $inline_help_when_on ) ;
+ }
+
+ if ( $public_release eq 'timeout' ) {
+ return( 'Imapsync public release is unknown (timeout).' . $inline_help_when_on ) ;
+ }
+
+ if ( ! is_a_release_number( $public_release ) ) {
+ return( "Imapsync public release is unknown ($public_release)." . $inline_help_when_on ) ;
+ }
+
+ my $imapsync_here = imapsync_version( $sync ) ;
+
+ if ( $public_release > $imapsync_here ) {
+ return( 'This imapsync is not up to date. ' . "( local $imapsync_here < official $public_release )" . $inline_help_when_on ) ;
+ }else{
+ return( 'This imapsync is up to date. ' . "( local $imapsync_here >= official $public_release )" . $inline_help_when_on ) ;
+ }
+
+ return( 'really unknown' ) ; # Should never arrive here
+}
+
+sub tests_check_last_release
+{
+ note( 'Entering tests_check_last_release()' ) ;
+
+ diag( check_last_release( 1.1 ) ) ;
+ # \Q \E here to avoid putting \ before each space
+ like( check_last_release( 1.1 ), qr/\Qis up to date\E/mxs, 'check_last_release: up to date' ) ;
+ like( check_last_release( 1.1 ), qr/1\.1/mxs, 'check_last_release: up to date, include number' ) ;
+ diag( check_last_release( 999.999 ) ) ;
+ like( check_last_release( 999.999 ), qr/\Qnot up to date\E/mxs, 'check_last_release: not up to date' ) ;
+ like( check_last_release( 999.999 ), qr/999\.999/mxs, 'check_last_release: not up to date, include number' ) ;
+ like( check_last_release( 'unknown' ), qr/\QImapsync public release is unknown\E/mxs, 'check_last_release: unknown' ) ;
+ like( check_last_release( 'timeout' ), qr/\QImapsync public release is unknown (timeout)\E/mxs, 'check_last_release: timeout' ) ;
+ like( check_last_release( 'lalala' ), qr/\QImapsync public release is unknown (lalala)\E/mxs, 'check_last_release: lalala' ) ;
+ diag( check_last_release( ) ) ;
+
+ note( 'Leaving tests_check_last_release()' ) ;
+ return ;
+}
+
+sub tests_imapsync_context
+{
+ note( 'Entering tests_imapsync_context()' ) ;
+
+ like( imapsync_context( ), qr/^CGI|^Docker|^DockerCGI|^Standard/, 'imapsync_context: CGI or Docker or DockerCGI or Standard' ) ;
+ note( 'Leaving tests_imapsync_context()' ) ;
+ return ;
+}
+
+sub imapsync_context
+{
+ my $mysync = shift ;
+
+ my $context = q{} ;
+
+ if ( under_docker_context( $mysync ) && under_cgi_context( $mysync ) )
+ {
+ $context = 'DockerCGI' ;
+ }
+ elsif ( under_docker_context( $mysync ) )
+ {
+ $context = 'Docker' ;
+ }
+ elsif ( under_cgi_context( $mysync ) )
+ {
+ $context = 'CGI' ;
+ }
+ else
+ {
+ $context = 'Standard' ;
+ }
+
+ return $context ;
+
+}
+
+sub imapsync_version
+{
+ my $mysync = shift ;
+ my $rcs = $mysync->{rcs} ;
+ my $version ;
+
+ $version = version_from_rcs( $rcs ) ;
+ return( $version ) ;
+}
+
+
+sub tests_version_from_rcs
+{
+ note( 'Entering tests_version_from_rcs()' ) ;
+
+ is( undef, version_from_rcs( ), 'version_from_rcs: no args => UNKNOWN' ) ;
+ is( 1.831, version_from_rcs( q{imapsync,v 1.831 2017/08/27} ), 'version_from_rcs: imapsync,v 1.831 2017/08/27 => 1.831' ) ;
+ is( 'UNKNOWN', version_from_rcs( 1.831 ), 'version_from_rcs: 1.831 => UNKNOWN' ) ;
+
+ note( 'Leaving tests_version_from_rcs()' ) ;
+ return ;
+}
+
+
+sub version_from_rcs
+{
+
+ my $rcs = shift ;
+ if ( ! $rcs ) { return ; }
+
+ my $version = 'UNKNOWN' ;
+
+ if ( $rcs =~ m{,v\s+(\d+\.\d+)}mxso ) {
+ $version = $1
+ }
+
+ return( $version ) ;
+}
+
+
+sub tests_imapsync_basename
+{
+ note( 'Entering tests_imapsync_basename()' ) ;
+
+ ok( imapsync_basename() =~ m/imapsync/, 'imapsync_basename: match imapsync');
+ ok( 'blabla' ne imapsync_basename(), 'imapsync_basename: do not equal blabla');
+
+ note( 'Leaving tests_imapsync_basename()' ) ;
+ return ;
+}
+
+sub imapsync_basename
+{
+
+ return basename( $PROGRAM_NAME ) ;
+
+}
+
+
+sub localhost_info
+{
+ my $mysync = shift ;
+ my( $infos ) = join( q{},
+ "Here is imapsync ", imapsync_version( $mysync ),
+ " on host " . hostname(),
+ ", a $OSNAME system with ",
+ ram_memory_info( ),
+ "\n",
+ 'with Perl ',
+ mysprintf( '%vd ', $PERL_VERSION),
+ "and Mail::IMAPClient $Mail::IMAPClient::VERSION",
+ ) ;
+ return( $infos ) ;
+}
+
+sub tests_cpu_number
+{
+ note( 'Entering tests_cpu_number()' ) ;
+
+ is( 1, is_an_integer( cpu_number( ) ), "cpu_number: is_an_integer" ) ;
+ ok( 1 <= cpu_number( ), "cpu_number: 1 or more" ) ;
+ is( 1, cpu_number( 1 ), "cpu_number: 1 => 1" ) ;
+ is( 1, cpu_number( $MINUS_ONE ), "cpu_number: -1 => 1" ) ;
+ is( 1, cpu_number( 'lalala' ), "cpu_number: lalala => 1" ) ;
+ is( $NUMBER_42, cpu_number( $NUMBER_42 ), "cpu_number: $NUMBER_42 => $NUMBER_42" ) ;
+ note( 'Leaving tests_cpu_number()' ) ;
+ return ;
+}
+
+sub cpu_number
+{
+
+ my $cpu_number_forced = shift ;
+ # Well, here 1 is better than 0 or undef
+ my $cpu_number = 1 ; # Default value, erased if better found
+
+ my @cpuinfo ;
+ if ( $ENV{"NUMBER_OF_PROCESSORS"} ) {
+ # might be under a Windows system
+ $cpu_number = $ENV{"NUMBER_OF_PROCESSORS"} ;
+ $sync->{ debug } and myprint( "Number of processors found by env var NUMBER_OF_PROCESSORS: $cpu_number\n" ) ;
+ }elsif ( 'darwin' eq $OSNAME or 'freebsd' eq $OSNAME ) {
+ $cpu_number = backtick( "sysctl -n hw.ncpu" ) ;
+ chomp( $cpu_number ) ;
+ $sync->{ debug } and myprint( "Number of processors found by cmd 'sysctl -n hw.ncpu': $cpu_number\n" ) ;
+ }elsif ( ! -e '/proc/cpuinfo' ) {
+ $sync->{ debug } and myprint( "Number of processors not found so I might assume there is only 1\n" ) ;
+ $cpu_number = 1 ;
+ }elsif( @cpuinfo = file_to_array( '/proc/cpuinfo' ) ) {
+ $cpu_number = grep { /^processor/mxs } @cpuinfo ;
+ $sync->{ debug } and myprint( "Number of processors found via /proc/cpuinfo: $cpu_number\n" ) ;
+ }
+
+ if ( defined $cpu_number_forced ) {
+ $cpu_number = $cpu_number_forced ;
+ }
+ return( integer_or_1( $cpu_number ) ) ;
+}
+
+
+sub tests_integer_or_1
+{
+ note( 'Entering tests_integer_or_1()' ) ;
+
+ is( 1, integer_or_1( ), 'integer_or_1: no args => 1' ) ;
+ is( 1, integer_or_1( undef ), 'integer_or_1: undef => 1' ) ;
+ is( $NUMBER_10, integer_or_1( $NUMBER_10 ), 'integer_or_1: 10 => 10' ) ;
+ is( 1, integer_or_1( q{} ), 'integer_or_1: empty string => 1' ) ;
+ is( 1, integer_or_1( 'lalala' ), 'integer_or_1: lalala => 1' ) ;
+
+ note( 'Leaving tests_integer_or_1()' ) ;
+ return ;
+}
+
+sub integer_or_1
+{
+ my $number = shift ;
+ if ( is_an_integer( $number ) ) {
+ return $number ;
+ }
+ # else
+ return 1 ;
+}
+
+sub tests_is_an_integer
+{
+ note( 'Entering tests_is_an_integer()' ) ;
+
+ is( undef, is_an_integer( ), 'is_an_integer: no args => undef ' ) ;
+ ok( is_an_integer( 1 ), 'is_an_integer: 1 => yes ') ;
+ ok( is_an_integer( $NUMBER_42 ), 'is_an_integer: 42 => yes ') ;
+ ok( is_an_integer( "$NUMBER_42" ), 'is_an_integer: "$NUMBER_42" => yes ') ;
+ ok( is_an_integer( '42' ), 'is_an_integer: "42" => yes ') ;
+ ok( is_an_integer( $NUMBER_104_857_600 ), 'is_an_integer: 104_857_600 => yes') ;
+ ok( is_an_integer( "$NUMBER_104_857_600" ), 'is_an_integer: "$NUMBER_104_857_600" => yes') ;
+ ok( is_an_integer( '104857600' ), 'is_an_integer: 104857600 => yes') ;
+ ok( ! is_an_integer( 'blabla' ), 'is_an_integer: blabla => no' ) ;
+ ok( ! is_an_integer( q{} ), 'is_an_integer: empty string => no' ) ;
+
+ note( 'Leaving tests_is_an_integer()' ) ;
+ return ;
+}
+
+sub is_an_integer
+{
+ my $number = shift ;
+ if ( ! defined $number ) { return ; }
+ return( $number =~ m{^\d+$}xo ) ;
+}
+
+
+
+
+sub tests_loadavg
+{
+ note( 'Entering tests_loadavg()' ) ;
+
+
+ SKIP: {
+ skip( 'Tests for darwin', 2 ) if ('darwin' ne $OSNAME) ;
+ is( undef, loadavg( '/noexist' ), 'loadavg: /noexist => undef' ) ;
+ is_deeply( [ '0.11', '0.22', '0.33' ],
+ [ loadavg( 'W/t/loadavg.out' ) ],
+ 'loadavg W/t/loadavg.out => 0.11 0.22 0.33' ) ;
+ } ;
+
+ SKIP: {
+ skip( 'Tests for linux', 3 ) if ('linux' ne $OSNAME) ;
+ is( undef, loadavg( '/noexist' ), 'loadavg: /noexist => undef' ) ;
+ ok( loadavg( ), 'loadavg: no args' ) ;
+
+ is_deeply( [ '0.39', '0.30', '0.37', '1/602' ],
+ [ loadavg( '0.39 0.30 0.37 1/602 6073' ) ],
+ 'loadavg 0.39 0.30 0.37 1/602 6073 => [0.39, 0.30, 0.37, 1/602]' ) ;
+ } ;
+
+ SKIP: {
+ skip( 'Tests for Windows', 1 ) if ('MSWin32' ne $OSNAME) ;
+ is_deeply( [ 0 ],
+ [ loadavg( ) ],
+ 'loadavg on MSWin32 => 0' ) ;
+
+ } ;
+
+ note( 'Leaving tests_loadavg()' ) ;
+ return ;
+}
+
+
+sub loadavg
+{
+ if ( 'linux' eq $OSNAME ) {
+ return ( loadavg_linux( @ARG ) ) ;
+ }
+ if ( 'freebsd' eq $OSNAME ) {
+ return ( loadavg_freebsd( @ARG ) ) ;
+ }
+ if ( 'darwin' eq $OSNAME ) {
+ return ( loadavg_darwin( @ARG ) ) ;
+ }
+ if ( 'MSWin32' eq $OSNAME ) {
+ return ( loadavg_windows( @ARG ) ) ;
+ }
+ return( 'unknown' ) ;
+
+}
+
+sub loadavg_linux
+{
+ my $line = shift ;
+
+ if ( ! $line ) {
+ $line = firstline( '/proc/loadavg' ) or return ;
+ }
+
+ my ( $avg_1_min, $avg_5_min, $avg_15_min, $current_runs ) = split /\s/mxs, $line ;
+ if ( all_defined( $avg_1_min, $avg_5_min, $avg_15_min ) ) {
+ $sync->{ debug } and myprint( "System load: $avg_1_min $avg_5_min $avg_15_min $current_runs\n" ) ;
+ return ( $avg_1_min, $avg_5_min, $avg_15_min, $current_runs ) ;
+ }
+ return ;
+}
+
+sub loadavg_freebsd
+{
+ my $file = shift ;
+ # Example of output of command "sysctl vm.loadavg":
+ # vm.loadavg: { 0.15 0.08 0.08 }
+ my $loadavg ;
+
+ if ( ! defined $file ) {
+ eval {
+ $loadavg = `/sbin/sysctl vm.loadavg` ;
+ #myprint( "LOADAVG FREEBSD: $loadavg\n" ) ;
+ } ;
+ if ( $EVAL_ERROR ) { myprint( "[$EVAL_ERROR]\n" ) ; return ; }
+ }else{
+ $loadavg = firstline( $file ) or return ;
+ }
+
+ my ( $avg_1_min, $avg_5_min, $avg_15_min )
+ = $loadavg =~ /vm\.loadavg\s*[:=]\s*\{?\s*(\d+\.?\d*)\s+(\d+\.?\d*)\s+(\d+\.?\d*)/mxs ;
+ $sync->{ debug } and myprint( "System load: $avg_1_min $avg_5_min $avg_15_min\n" ) ;
+ return ( $avg_1_min, $avg_5_min, $avg_15_min ) ;
+}
+
+sub loadavg_darwin
+{
+ my $file = shift ;
+ # Example of output of command "sysctl vm.loadavg":
+ # vm.loadavg: { 0.15 0.08 0.08 }
+ my $loadavg ;
+
+ if ( ! defined $file ) {
+ eval {
+ $loadavg = `/usr/sbin/sysctl vm.loadavg` ;
+ #myprint( "LOADAVG DARWIN: $loadavg\n" ) ;
+ } ;
+ if ( $EVAL_ERROR ) { myprint( "[$EVAL_ERROR]\n" ) ; return ; }
+ }else{
+ $loadavg = firstline( $file ) or return ;
+ }
+
+ my ( $avg_1_min, $avg_5_min, $avg_15_min )
+ = $loadavg =~ /vm\.loadavg\s*[:=]\s*\{?\s*(\d+\.?\d*)\s+(\d+\.?\d*)\s+(\d+\.?\d*)/mxs ;
+ $sync->{ debug } and myprint( "System load: $avg_1_min $avg_5_min $avg_15_min\n" ) ;
+ return ( $avg_1_min, $avg_5_min, $avg_15_min ) ;
+}
+
+sub loadavg_windows
+{
+ my $file = shift ;
+ # Example of output of command "wmic cpu get loadpercentage":
+ # LoadPercentage
+ # 12
+ my $loadavg ;
+
+ if ( ! defined $file ) {
+ eval {
+ #$loadavg = `CMD wmic cpu get loadpercentage` ;
+ $loadavg = "LoadPercentage\n0\n" ;
+ #myprint( "LOADAVG WIN: $loadavg\n" ) ;
+ } ;
+ if ( $EVAL_ERROR ) { myprint( "[$EVAL_ERROR]\n" ) ; return ; }
+ }else{
+ $loadavg = file_to_string( $file ) or return ;
+ #myprint( "$loadavg" ) ;
+ }
+ $loadavg =~ /LoadPercentage\n(\d+)/xms ;
+ my $num = $1 ;
+ $num /= 100 ;
+
+ $sync->{ debug } and myprint( "System load: $num\n" ) ;
+ return ( $num ) ;
+}
+
+
+
+
+
+
+sub tests_load_and_delay
+{
+ note( 'Entering tests_load_and_delay()' ) ;
+
+ is( undef, load_and_delay( ), 'load_and_delay: no args => undef ' ) ;
+ is( undef, load_and_delay( 1 ), 'load_and_delay: not 4 args => undef ' ) ;
+ is( undef, load_and_delay( 0, 1, 1, 1 ), 'load_and_delay: division per 0 => undef ' ) ;
+ is( 0, load_and_delay( 1, 1, 1, 1 ), 'load_and_delay: one core, loads are all 1 => ok ' ) ;
+ is( 0, load_and_delay( 1, 1, 1, 1, 'lalala' ), 'load_and_delay: five arguments is ok' ) ;
+ is( 0, load_and_delay( 2, 2, 2, 2 ), 'load_and_delay: two core, loads are all 2 => ok ' ) ;
+ is( 0, load_and_delay( 2, 2, 4, 5 ), 'load_and_delay: two core, load1m is 2 => ok ' ) ;
+
+# Old behavior, rather strict
+ # is( 0, load_and_delay( 1, 0, 0, 0 ), 'load_and_delay: one core, load1m=0 load5m=0 load15m=0 => 0 ' ) ;
+ # is( 0, load_and_delay( 1, 0, 0, 2 ), 'load_and_delay: one core, load1m=0 load5m=0 load15m=2 => 0 ' ) ;
+ # is( 0, load_and_delay( 1, 0, 2, 0 ), 'load_and_delay: one core, load1m=0 load5m=2 load15m=0 => 0 ' ) ;
+ # is( 0, load_and_delay( 1, 0, 2, 2 ), 'load_and_delay: one core, load1m=0 load5m=2 load15m=2 => 0 ' ) ;
+ # is( 1, load_and_delay( 1, 2, 0, 0 ), 'load_and_delay: one core, load1m=2 load5m=0 load15m=0 => 1 ' ) ;
+ # is( 1, load_and_delay( 1, 2, 0, 2 ), 'load_and_delay: one core, load1m=2 load5m=0 load15m=2 => 1 ' ) ;
+ # is( 5, load_and_delay( 1, 2, 2, 0 ), 'load_and_delay: one core, load1m=2 load5m=2 load15m=0 => 5 ' ) ;
+ # is( 15, load_and_delay( 1, 2, 2, 2 ), 'load_and_delay: one core, load1m=2 load5m=2 load15m=2 => 15 ' ) ;
+
+ # is( 0, load_and_delay( 4, 0, 2, 2 ), 'load_and_delay: four core, load1m=0 load5m=2 load15m=2 => 0 ' ) ;
+ # is( 1, load_and_delay( 4, 8, 0, 0 ), 'load_and_delay: four core, load1m=2 load5m=0 load15m=0 => 1 ' ) ;
+ # is( 1, load_and_delay( 4, 8, 0, 2 ), 'load_and_delay: four core, load1m=2 load5m=0 load15m=2 => 1 ' ) ;
+ # is( 5, load_and_delay( 4, 8, 8, 0 ), 'load_and_delay: four core, load1m=2 load5m=2 load15m=0 => 5 ' ) ;
+ # is( 15, load_and_delay( 4, 8, 8, 8 ), 'load_and_delay: four core, load1m=2 load5m=2 load15m=2 => 15 ' ) ;
+
+# New behavior, tolerate more load
+
+ is( 0, load_and_delay( 1, 0, 0, 0 ), 'load_and_delay: one core, load1m=0 load5m=0 load15m=0 => 0 ' ) ;
+ is( 0, load_and_delay( 1, 0, 0, 2 ), 'load_and_delay: one core, load1m=0 load5m=0 load15m=2 => 0 ' ) ;
+ is( 0, load_and_delay( 1, 0, 2, 0 ), 'load_and_delay: one core, load1m=0 load5m=2 load15m=0 => 0 ' ) ;
+ is( 0, load_and_delay( 1, 0, 2, 2 ), 'load_and_delay: one core, load1m=0 load5m=2 load15m=2 => 0 ' ) ;
+ is( 0, load_and_delay( 1, 2, 0, 0 ), 'load_and_delay: one core, load1m=2 load5m=0 load15m=0 => 1 ' ) ;
+ is( 0, load_and_delay( 1, 2, 0, 2 ), 'load_and_delay: one core, load1m=2 load5m=0 load15m=2 => 1 ' ) ;
+ is( 0, load_and_delay( 1, 2, 2, 0 ), 'load_and_delay: one core, load1m=2 load5m=2 load15m=0 => 5 ' ) ;
+ is( 0, load_and_delay( 1, 2, 2, 2 ), 'load_and_delay: one core, load1m=2 load5m=2 load15m=2 => 15 ' ) ;
+
+ is( 1, load_and_delay( 1, 4, 0, 0 ), 'load_and_delay: one core, load1m=4 load5m=0 load15m=0 => 1 ' ) ;
+ is( 1, load_and_delay( 1, 4, 0, 4 ), 'load_and_delay: one core, load1m=4 load5m=0 load15m=4 => 1 ' ) ;
+ is( 5, load_and_delay( 1, 4, 4, 0 ), 'load_and_delay: one core, load1m=4 load5m=4 load15m=0 => 5 ' ) ;
+ is( 15, load_and_delay( 1, 4, 4, 4 ), 'load_and_delay: one core, load1m=4 load5m=4 load15m=4 => 15 ' ) ;
+
+ is( 0, load_and_delay( 4, 0, 9, 9 ), 'load_and_delay: four core, load1m=0 load5m=9 load15m=9 => 0 ' ) ;
+ is( 1, load_and_delay( 4, 9, 0, 0 ), 'load_and_delay: four core, load1m=9 load5m=0 load15m=0 => 1 ' ) ;
+ is( 1, load_and_delay( 4, 9, 0, 9 ), 'load_and_delay: four core, load1m=9 load5m=0 load15m=9 => 1 ' ) ;
+ is( 5, load_and_delay( 4, 9, 9, 0 ), 'load_and_delay: four core, load1m=9 load5m=9 load15m=0 => 5 ' ) ;
+ is( 15, load_and_delay( 4, 9, 9, 9 ), 'load_and_delay: four core, load1m=9 load5m=9 load15m=9 => 15 ' ) ;
+
+ note( 'Leaving tests_load_and_delay()' ) ;
+ return ;
+}
+
+sub load_and_delay
+{
+ # Basically return 0 if load is not heavy, ie <= 1 per processor
+
+ # Not enough arguments
+ if ( 4 > scalar @ARG ) { return ; }
+
+ my ( $cpu_num, $avg_1_min, $avg_5_min, $avg_15_min ) = @ARG ;
+
+ if ( 0 == $cpu_num ) { return ; }
+
+ # Let divide by number of cores
+ ( $avg_1_min, $avg_5_min, $avg_15_min ) = map { $_ / $cpu_num } ( $avg_1_min, $avg_5_min, $avg_15_min ) ;
+ # One of avg ok => ok, for now it is a OR
+ if ( $avg_1_min <= 2 ) { return 0 ; }
+ if ( $avg_5_min <= 2 ) { return 1 ; } # Retry in 1 minute
+ if ( $avg_15_min <= 2 ) { return 5 ; } # Retry in 5 minutes
+ return 15 ; # Retry in 15 minutes
+}
+
+sub ram_memory_info
+{
+ # In GigaBytes so division by 1024 * 1024 * 1024
+ #
+ return(
+ sprintf( "%.1f/%.1f free GiB of RAM",
+ Sys::MemInfo::get("freemem") / ( $KIBI ** 3 ),
+ Sys::MemInfo::get("totalmem") / ( $KIBI ** 3 ),
+ )
+ ) ;
+}
+
+
+
+sub tests_memory_stress
+{
+ note( 'Entering tests_memory_stress()' ) ;
+
+ is( undef, memory_stress( ), 'memory_stress: => undef' ) ;
+
+ note( 'Leaving tests_memory_stress()' ) ;
+ return ;
+}
+
+sub memory_stress
+{
+
+ my $total_ram_in_MB = Sys::MemInfo::get("totalmem") / ( $KIBI * $KIBI ) ;
+ my $i = 1 ;
+
+ myprintf("Stress memory consumption before: %.1f MiB\n", memory_consumption( ) / $KIBI / $KIBI ) ;
+ while ( $i < $total_ram_in_MB / 1.7 ) { $a .= "A" x 1000_000; $i++ } ;
+ myprintf("Stress memory consumption after: %.1f MiB\n", memory_consumption( ) / $KIBI / $KIBI ) ;
+ return ;
+
+}
+
+sub tests_memory_consumption
+{
+ note( 'Entering tests_memory_consumption()' ) ;
+
+ like( memory_consumption( ), qr{\d+}xms,'memory_consumption no args') ;
+ like( memory_consumption( 1 ), qr{\d+}xms,'memory_consumption 1') ;
+ like( memory_consumption( $PROCESS_ID ), qr{\d+}xms,"memory_consumption_of_pids $PROCESS_ID") ;
+
+ like( memory_consumption_ratio(), qr{\d+}xms, 'memory_consumption_ratio' ) ;
+ like( memory_consumption_ratio(1), qr{\d+}xms, 'memory_consumption_ratio 1' ) ;
+ like( memory_consumption_ratio(10), qr{\d+}xms, 'memory_consumption_ratio 10' ) ;
+
+ like( memory_consumption(), qr{\d+}xms, "memory_consumption\n" ) ;
+
+ note( 'Leaving tests_memory_consumption()' ) ;
+ return ;
+}
+
+sub memory_consumption
+{
+ # memory consumed by imapsync until now in bytes
+ return( ( memory_consumption_of_pids( ) )[0] );
+}
+
+sub debugmemory
+{
+ my $mysync = shift ;
+ if ( ! $mysync->{debugmemory} ) { return q{} ; }
+
+ my $precision = shift ;
+ return( mysprintf( "Memory consumption$precision: %.1f MiB\n", memory_consumption( ) / $KIBI / $KIBI ) ) ;
+}
+
+sub memory_consumption_of_pids
+{
+
+ my @pid = @_;
+ @pid = ( @pid ) ? @pid : ( $PROCESS_ID ) ;
+
+ $sync->{ debug } and myprint( "memory_consumption_of_pids PIDs: @pid\n" ) ;
+ my @val ;
+ if ( ( 'MSWin32' eq $OSNAME ) or ( 'cygwin' eq $OSNAME ) ) {
+ @val = memory_consumption_of_pids_win32( @pid ) ;
+ }else{
+ # Unix
+ my @ps = qx{ ps -o vsz -p @pid } ;
+ #myprint( "ps: @ps" ) ;
+
+ # Use IPC::Open3 from perlcrit -3
+ # It stalls on Darwin, don't understand why!
+ #my @ps = backtick( "ps -o vsz -p @pid" ) ;
+ #myprint( "ps: @ps" ) ;
+
+ shift @ps; # First line is column name "VSZ"
+ chomp @ps;
+ # convert to octets
+
+ @val = map { $_ * $KIBI } @ps ;
+ }
+ $sync->{ debug } and myprint( "@val\n" ) ;
+ return( @val ) ;
+}
+
+sub memory_consumption_of_pids_win32
+{
+ # Windows
+ my @PID = @_;
+ my %PID;
+ # hash of pids as key values
+ map { $PID{$_}++ } @PID;
+
+ # Does not work but should work reading the tasklist documentation
+ #@ps = qx{ tasklist /FI "PID eq @PID" };
+
+ my @ps = qx{ tasklist /NH /FO CSV } ;
+ #my @ps = backtick( 'tasklist /NH /FO CSV' ) ;
+ #myprint( "-" x $STD_CHAR_PER_LINE, "\n", @ps, "-" x $STD_CHAR_PER_LINE, "\n" ) ;
+ my @val;
+ foreach my $line (@ps) {
+ my($name, $pid, $mem) = (split ',', $line )[0,1,4];
+ next if (! $pid);
+ #myprint( "[$name][$pid][$mem]" ) ;
+ if ($PID{remove_qq($pid)}) {
+ #myprint( "MATCH !\n" ) ;
+ chomp $mem ;
+ $mem = remove_qq($mem);
+ $mem = remove_Ko($mem);
+ $mem = remove_not_num($mem);
+ #myprint( "[$mem]\n" ) ;
+ push @val, $mem * $KIBI;
+ }
+ }
+ return(@val);
+}
+
+
+sub tests_backtick
+{
+ note( 'Entering tests_backtick()' ) ;
+
+ is( undef, backtick( ), 'backtick: no args' ) ;
+ is( undef, backtick( q{} ), 'backtick: empty command' ) ;
+
+ SKIP: {
+ skip( 'test for MSWin32', 5 ) if ('MSWin32' ne $OSNAME) ;
+ my @output ;
+ @output = backtick( 'echo Hello World!' ) ;
+ # Add \r on Windows.
+ ok( "Hello World!\r\n" eq $output[0], 'backtick: echo Hello World!' ) ;
+ $sync->{ debug } and myprint( "[@output]" ) ;
+ @output = backtick( 'echo Hello & echo World!' ) ;
+ ok( "Hello \r\n" eq $output[0], 'backtick: echo Hello & echo World! line 1' ) ;
+ ok( "World!\r\n" eq $output[1], 'backtick: echo Hello & echo World! line 2' ) ;
+ $sync->{ debug } and myprint( "[@output][$output[0]][$output[1]]" ) ;
+ # Scalar context
+ ok( "Hello World!\r\n" eq backtick( 'echo Hello World!' ),
+ 'backtick: echo Hello World! scalar' ) ;
+ ok( "Hello \r\nWorld!\r\n" eq backtick( 'echo Hello & echo World!' ),
+ 'backtick: echo Hello & echo World! scalar 2 lines' ) ;
+ } ;
+ SKIP: {
+ skip( 'test for Unix', 7 ) if ('MSWin32' eq $OSNAME) ;
+ is( undef, backtick( 'aaaarrrg' ), 'backtick: aaaarrrg command not found' ) ;
+ # Array context
+ my @output ;
+ @output = backtick( 'echo Hello World!' ) ;
+ ok( "Hello World!\n" eq $output[0], 'backtick: echo Hello World!' ) ;
+ $sync->{ debug } and myprint( "[@output]" ) ;
+ @output = backtick( "echo Hello\necho World!" ) ;
+ ok( "Hello\n" eq $output[0], 'backtick: echo Hello; echo World! line 1' ) ;
+ ok( "World!\n" eq $output[1], 'backtick: echo Hello; echo World! line 2' ) ;
+ $sync->{ debug } and myprint( "[@output]" ) ;
+ # Scalar context
+ ok( "Hello World!\n" eq backtick( 'echo Hello World!' ),
+ 'backtick: echo Hello World! scalar' ) ;
+ ok( "Hello\nWorld!\n" eq backtick( "echo Hello\necho World!" ),
+ 'backtick: echo Hello; echo World! scalar 2 lines' ) ;
+ # Return error positive value, that's ok
+ is( undef, backtick( 'false' ), 'backtick: false returns no output' ) ;
+ my $mem = backtick( "ps -o vsz -p $PROCESS_ID" ) ;
+ $sync->{ debug } and myprint( "MEM=$mem\n" ) ;
+
+ }
+
+ note( 'Leaving tests_backtick()' ) ;
+ return ;
+}
+
+
+sub backtick
+{
+ my $command = shift ;
+
+ if ( ! $command ) { return ; }
+
+ my ( $writer, $reader, $err ) ;
+ my @output ;
+ my $pid ;
+ my $eval = eval {
+ $pid = IPC::Open3::open3( $writer, $reader, $err, $command ) ;
+ } ;
+ if ( $EVAL_ERROR ) {
+ myprint( $EVAL_ERROR ) ;
+ return ;
+ }
+ if ( ! $eval ) { return ; }
+ if ( ! $pid ) { return ; }
+ waitpid( $pid, 0 ) ;
+ @output = <$reader>; # Output here
+ #
+ #my @errors = <$err>; #Errors here, instead of the console
+ if ( not @output ) { return ; }
+ #myprint( @output ) ;
+
+ if ( $output[0] =~ /\Qopen3: exec of $command failed\E/mxs ) { return ; }
+ if ( wantarray ) {
+ return( @output ) ;
+ } else {
+ return( join( q{}, @output) ) ;
+ }
+}
+
+
+
+sub tests_check_binary_embed_all_dyn_libs
+{
+ note( 'Entering tests_check_binary_embed_all_dyn_libs()' ) ;
+
+ is( 1, check_binary_embed_all_dyn_libs( ), 'check_binary_embed_all_dyn_libs: no args => 1' ) ;
+
+ note( 'Leaving tests_check_binary_embed_all_dyn_libs()' ) ;
+
+ return ;
+}
+
+
+sub check_binary_embed_all_dyn_libs
+{
+ my @search_dyn_lib_locale = search_dyn_lib_locale( ) ;
+
+ if ( @search_dyn_lib_locale )
+ {
+ myprint( "Found myself $PROGRAM_NAME pid $PROCESS_ID using locale dynamic libraries that seems out of myself:\n" ) ;
+ myprint( @search_dyn_lib_locale ) ;
+ if ( $PROGRAM_NAME =~ m{imapsync_bin_Darwin} )
+ {
+ return 0 ;
+ }
+ elsif ( $PROGRAM_NAME =~ m{imapsync.*\.exe} )
+ {
+ return 0 ;
+ }
+ else
+ {
+ # is always ok for non binary
+ return 1 ;
+ }
+ }
+ else
+ {
+ # Found only embedded dynamic lib
+ myprint( "Found nothing\n" ) ;
+ return 1 ;
+ }
+}
+
+sub search_dyn_lib_locale
+{
+ if ( 'darwin' eq $OSNAME )
+ {
+ return search_dyn_lib_locale_darwin( ) ;
+ }
+ if ( 'linux' eq $OSNAME )
+ {
+ return search_dyn_lib_locale_linux( ) ;
+ }
+ if ( 'MSWin32' eq $OSNAME )
+ {
+ return search_dyn_lib_locale_MSWin32( ) ;
+ }
+
+}
+
+sub search_dyn_lib_locale_darwin
+{
+ my $command = qq{ lsof -p $PID | grep ' REG ' | grep .dylib | grep -v '/par-' } ;
+ myprint( "Search non embeded dynamic libs with the command: $command\n" ) ;
+ return backtick( $command ) ;
+}
+
+sub search_dyn_lib_locale_linux
+{
+ my $command = qq{ lsof -p $PID | grep ' REG ' | grep -v '/tmp/par-' | grep '\.so' } ;
+ myprint( "Search non embeded dynamic libs with the command: $command\n" ) ;
+ return backtick( $command ) ;
+}
+
+sub search_dyn_lib_locale_MSWin32
+{
+ my $command = qq{ Listdlls.exe $PID|findstr Strawberry } ;
+ # $command = qq{ Listdlls.exe $PID|findstr Strawberry } ;
+ myprint( "Search non embeded dynamic libs with the command: $command\n" ) ;
+ return qx( $command ) ;
+}
+
+
+
+sub remove_not_num
+{
+
+ my $string = shift ;
+ $string =~ tr/0-9//cd ;
+ #myprint( "tr [$string]\n" ) ;
+ return( $string ) ;
+}
+
+sub tests_remove_not_num
+{
+ note( 'Entering tests_remove_not_num()' ) ;
+
+ ok( '123' eq remove_not_num( 123 ), 'remove_not_num( 123 )' ) ;
+ ok( '123' eq remove_not_num( '123' ), q{remove_not_num( '123' )} ) ;
+ ok( '123' eq remove_not_num( '12 3' ), q{remove_not_num( '12 3' )} ) ;
+ ok( '123' eq remove_not_num( 'a 12 3 Ko' ), q{remove_not_num( 'a 12 3 Ko' )} ) ;
+
+ note( 'Leaving tests_remove_not_num()' ) ;
+ return ;
+}
+
+sub remove_Ko
+{
+ my $string = shift;
+ if ($string =~ /^(.*)\sKo$/xo) {
+ return($1);
+ }else{
+ return($string);
+ }
+}
+
+sub remove_qq
+{
+ my $string = shift;
+ if ($string =~ /^"(.*)"$/xo) {
+ return($1);
+ }else{
+ return($string);
+ }
+}
+
+sub memory_consumption_ratio
+{
+
+ my ($base) = @_;
+ $base ||= 1;
+ my $consu = memory_consumption();
+ return($consu / $base);
+}
+
+
+sub date_from_rcs
+{
+ my $d = shift ;
+
+ my %num2mon = qw( 01 Jan 02 Feb 03 Mar 04 Apr 05 May 06 Jun 07 Jul 08 Aug 09 Sep 10 Oct 11 Nov 12 Dec ) ;
+ if ($d =~ m{(\d{4})/(\d{2})/(\d{2})\s(\d{2}):(\d{2}):(\d{2})}xo ) {
+ # Handles the following format
+ # 2015/07/10 11:05:59 -- Generated by RCS Date tag.
+ #myprint( "$d\n" ) ;
+ #myprint( "header: [$1][$2][$3][$4][$5][$6]\n" ) ;
+ my ($year, $month, $day, $hour, $min, $sec) = ($1,$2,$3,$4,$5,$6) ;
+ $month = $num2mon{$month} ;
+ $d = "$day-$month-$year $hour:$min:$sec +0000" ;
+ #myprint( "$d\n" ) ;
+ }
+ return( $d ) ;
+}
+
+sub tests_date_from_rcs
+{
+ note( 'Entering tests_date_from_rcs()' ) ;
+
+ ok('19-Sep-2015 16:11:07 +0000'
+ eq date_from_rcs('Date: 2015/09/19 16:11:07 '), 'date_from_rcs from RCS date' ) ;
+
+ note( 'Leaving tests_date_from_rcs()' ) ;
+ return ;
+}
+
+sub good_date
+{
+ # two incoming formats:
+ # header Tue, 24 Aug 2010 16:00:00 +0200
+ # internal 24-Aug-2010 16:00:00 +0200
+
+ # outgoing format: internal date format
+ # 24-Aug-2010 16:00:00 +0200
+
+ my $d = shift ;
+ return(q{}) if not defined $d;
+
+ SWITCH: {
+ if ( $d =~ m{(\d?)(\d-...-\d{4})(\s\d{2}:\d{2}:\d{2})(\s(?:\+|-)\d{4})?}xo ) {
+ #myprint( "internal: [$1][$2][$3][$4]\n" ) ;
+ my ($day_1, $date_rest, $hour, $zone) = ($1,$2,$3,$4) ;
+ $day_1 = '0' if ($day_1 eq q{}) ;
+ $zone = ' +0000' if not defined $zone ;
+ $d = $day_1 . $date_rest . $hour . $zone ;
+ last SWITCH ;
+ }
+
+ if ($d =~ m{(?:\w{3,},\s)?(\d{1,2}),?\s+(\w{3,})\s+(\d{2,4})\s+(\d{1,2})(?::|\.)(\d{1,2})(?:(?::|\.)(\d{1,2}))?\s*((?:\+|-)\d{4})?}xo ) {
+ # Handles any combination of following formats
+ # Tue, 24 Aug 2010 16:00:00 +0200 -- Standard
+ # 24 Aug 2010 16:00:00 +0200 -- Missing Day of Week
+ # Tue, 24 Aug 97 16:00:00 +0200 -- Two digit year
+ # Tue, 24 Aug 1997 16.00.00 +0200 -- Periods instead of colons
+ # Tue, 24 Aug 1997 16:00:00 +0200 -- Extra whitespace between year and hour
+ # Tue, 24 Aug 1997 6:5:2 +0200 -- Single digit hour, min, or second
+ # Tue, 24, Aug 1997 16:00:00 +0200 -- Extra comma
+
+ #myprint( "header: [$1][$2][$3][$4][$5][$6][$7][$8]\n" ) ;
+ my ($day, $month, $year, $hour, $min, $sec, $zone) = ($1,$2,$3,$4,$5,$6,$7,$8);
+ $year = '19' . $year if length($year) == 2 && $year =~ m/^[789]/xo;
+ $year = '20' . $year if length($year) == 2;
+
+ $month = substr $month, 0, 3 if length($month) > 4;
+ $day = mysprintf( '%02d', $day);
+ $hour = mysprintf( '%02d', $hour);
+ $min = mysprintf( '%02d', $min);
+ $sec = '00' if not defined $sec ;
+ $sec = mysprintf( '%02d', $sec ) ;
+ $zone = '+0000' if not defined $zone ;
+ $d = "$day-$month-$year $hour:$min:$sec $zone" ;
+ last SWITCH ;
+ }
+
+ if ($d =~ m{(?:.{3})\s(...)\s+(\d{1,2})\s(\d{1,2}):(\d{1,2}):(\d{1,2})\s(?:\w{3})?\s?(\d{4})}xo ) {
+ # Handles any combination of following formats
+ # Sun Aug 20 11:55:09 2006
+ # Wed Jan 24 11:58:38 MST 2007
+ # Wed Jan 2 08:40:57 2008
+
+ #myprint( "header: [$1][$2][$3][$4][$5][$6]\n" ) ;
+ my ($month, $day, $hour, $min, $sec, $year) = ($1,$2,$3,$4,$5,$6);
+ $day = mysprintf( '%02d', $day ) ;
+ $hour = mysprintf( '%02d', $hour ) ;
+ $min = mysprintf( '%02d', $min ) ;
+ $sec = mysprintf( '%02d', $sec ) ;
+ $d = "$day-$month-$year $hour:$min:$sec +0000" ;
+ last SWITCH ;
+ }
+ my %num2mon = qw( 01 Jan 02 Feb 03 Mar 04 Apr 05 May 06 Jun 07 Jul 08 Aug 09 Sep 10 Oct 11 Nov 12 Dec ) ;
+
+ if ($d =~ m{(\d{4})/(\d{2})/(\d{2})\s(\d{2}):(\d{2}):(\d{2})}xo ) {
+ # Handles the following format
+ # 2015/07/10 11:05:59 -- Generated by RCS Date tag.
+ #myprint( "$d\n" ) ;
+ #myprint( "header: [$1][$2][$3][$4][$5][$6]\n" ) ;
+ my ($year, $month, $day, $hour, $min, $sec) = ($1,$2,$3,$4,$5,$6) ;
+ $month = $num2mon{$month} ;
+ $d = "$day-$month-$year $hour:$min:$sec +0000" ;
+ #myprint( "$d\n" ) ;
+ last SWITCH ;
+ }
+
+ if ($d =~ m{(\d{2})/(\d{2})/(\d{2})\s(\d{2}):(\d{2}):(\d{2})}xo ) {
+ # Handles the following format
+ # 02/06/09 22:18:08 -- Generated by AVTECH TemPageR devices
+
+ #myprint( "header: [$1][$2][$3][$4][$5][$6]\n" ) ;
+ my ($month, $day, $year, $hour, $min, $sec) = ($1,$2,$3,$4,$5,$6);
+ $year = '20' . $year;
+ $month = $num2mon{$month};
+ $d = "$day-$month-$year $hour:$min:$sec +0000";
+ last SWITCH ;
+ }
+
+ if ($d =~ m{\w{6,},\s(\w{3})\w+\s+(\d{1,2}),\s(\d{4})\s(\d{2}):(\d{2})\s(AM|PM)}xo ) {
+ # Handles the following format
+ # Saturday, December 14, 2002 05:00 PM - KBtoys.com order confirmations
+
+ my ($month, $day, $year, $hour, $min, $apm) = ($1,$2,$3,$4,$5,$6);
+
+ $hour += 12 if $apm eq 'PM' ;
+ $day = mysprintf( '%02d', $day ) ;
+ $d = "$day-$month-$year $hour:$min:00 +0000" ;
+ last SWITCH ;
+ }
+
+ if ($d =~ m{(\w{3})\s(\d{1,2})\s(\d{4})\s(\d{2}):(\d{2}):(\d{2})\s((?:\+|-)\d{4})}xo ) {
+ # Handles the following format
+ # Saturday, December 14, 2002 05:00 PM - jr.com order confirmations
+
+ my ($month, $day, $year, $hour, $min, $sec, $zone) = ($1,$2,$3,$4,$5,$6,$7);
+
+ $day = mysprintf( '%02d', $day ) ;
+ $d = "$day-$month-$year $hour:$min:$sec $zone";
+ last SWITCH ;
+ }
+
+ if ($d =~ m{(\d{1,2})-(\w{3})-(\d{4})}xo ) {
+ # Handles the following format
+ # 21-Jun-2001 - register.com domain transfer email circa 2001
+
+ my ($day, $month, $year) = ($1,$2,$3);
+ $day = mysprintf( '%02d', $day);
+ $d = "$day-$month-$year 11:11:11 +0000";
+ last SWITCH ;
+ }
+
+ # unknown or unmatch => return same string
+ return($d);
+ }
+
+ $d = qq("$d") ;
+ return( $d ) ;
+}
+
+
+sub tests_good_date
+{
+ note( 'Entering tests_good_date()' ) ;
+
+ ok(q{} eq good_date(), 'good_date no arg');
+ ok('"24-Aug-2010 16:00:00 +0200"' eq good_date('24-Aug-2010 16:00:00 +0200'), 'good_date internal 2digit zone');
+ ok('"24-Aug-2010 16:00:00 +0000"' eq good_date('24-Aug-2010 16:00:00'), 'good_date internal 2digit no zone');
+ ok('"01-Sep-2010 16:00:00 +0200"' eq good_date( '1-Sep-2010 16:00:00 +0200'), 'good_date internal SP 1digit');
+ ok('"24-Aug-2010 16:00:00 +0200"' eq good_date('Tue, 24 Aug 2010 16:00:00 +0200'), 'good_date header 2digit zone');
+ ok('"01-Sep-2010 16:00:00 +0000"' eq good_date('Wed, 1 Sep 2010 16:00:00'), 'good_date header SP 1digit zone');
+ ok('"01-Sep-2010 16:00:00 +0200"' eq good_date('Wed, 1 Sep 2010 16:00:00 +0200'), 'good_date header SP 1digit zone');
+ ok('"01-Sep-2010 16:00:00 +0200"' eq good_date('Wed, 1 Sep 2010 16:00:00 +0200 (CEST)'), 'good_date header SP 1digit zone');
+ ok('"06-Feb-2009 22:18:08 +0000"' eq good_date('02/06/09 22:18:08'), 'good_date header TemPageR');
+ ok('"02-Jan-2008 08:40:57 +0000"' eq good_date('Wed Jan 2 08:40:57 2008'), 'good_date header dice.com support 1digit day');
+ ok('"20-Aug-2006 11:55:09 +0000"' eq good_date('Sun Aug 20 11:55:09 2006'), 'good_date header dice.com support 2digit day');
+ ok('"24-Jan-2007 11:58:38 +0000"' eq good_date('Wed Jan 24 11:58:38 MST 2007'), 'good_date header status-now.com');
+ ok('"24-Aug-2010 16:00:00 +0200"' eq good_date('24 Aug 2010 16:00:00 +0200'), 'good_date header missing date of week');
+ ok('"24-Aug-2067 16:00:00 +0200"' eq good_date('Tue, 24 Aug 67 16:00:00 +0200'), 'good_date header 2digit year');
+ ok('"24-Aug-1977 16:00:00 +0200"' eq good_date('Tue, 24 Aug 77 16:00:00 +0200'), 'good_date header 2digit year');
+ ok('"24-Aug-1987 16:00:00 +0200"' eq good_date('Tue, 24 Aug 87 16:00:00 +0200'), 'good_date header 2digit year');
+ ok('"24-Aug-1997 16:00:00 +0200"' eq good_date('Tue, 24 Aug 97 16:00:00 +0200'), 'good_date header 2digit year');
+ ok('"24-Aug-2004 16:00:00 +0200"' eq good_date('Tue, 24 Aug 04 16:00:00 +0200'), 'good_date header 2digit year');
+ ok('"24-Aug-1997 16:00:00 +0200"' eq good_date('Tue, 24 Aug 1997 16.00.00 +0200'), 'good_date header period time sep');
+ ok('"24-Aug-1997 16:00:00 +0200"' eq good_date('Tue, 24 Aug 1997 16:00:00 +0200'), 'good_date header extra white space type1');
+ ok('"24-Aug-1997 05:06:02 +0200"' eq good_date('Tue, 24 Aug 1997 5:6:2 +0200'), 'good_date header 1digit time vals');
+ ok('"24-Aug-1997 05:06:02 +0200"' eq good_date('Tue, 24, Aug 1997 05:06:02 +0200'), 'good_date header extra commas');
+ ok('"01-Oct-2003 12:45:24 +0000"' eq good_date('Wednesday, 01 October 2003 12:45:24 CDT'), 'good_date header no abbrev');
+ ok('"11-Jan-2005 17:58:27 -0500"' eq good_date('Tue, 11 Jan 2005 17:58:27 -0500'), 'good_date extra white space');
+ ok('"18-Dec-2002 15:07:00 +0000"' eq good_date('Wednesday, December 18, 2002 03:07 PM'), 'good_date kbtoys.com orders');
+ ok('"16-Dec-2004 02:01:49 -0500"' eq good_date('Dec 16 2004 02:01:49 -0500'), 'good_date jr.com orders');
+ ok('"21-Jun-2001 11:11:11 +0000"' eq good_date('21-Jun-2001'), 'good_date register.com domain transfer');
+ ok('"18-Nov-2012 18:34:38 +0100"' eq good_date('Sun, 18 Nov 2012 18:34:38 +0100'), 'good_date pop2imap bug (Westeuropäische Normalzeit)');
+ ok('"19-Sep-2015 16:11:07 +0000"' eq good_date('Date: 2015/09/19 16:11:07 '), 'good_date from RCS date' ) ;
+
+ note( 'Leaving tests_good_date()' ) ;
+ return ;
+}
+
+
+sub tests_list_keys_in_2_not_in_1
+{
+ note( 'Entering tests_list_keys_in_2_not_in_1()' ) ;
+
+
+ my @list;
+ ok( ! list_keys_in_2_not_in_1( {}, {}), 'list_keys_in_2_not_in_1: {} {}');
+ ok( 0 == compare_lists( [], [ list_keys_in_2_not_in_1( {}, {} ) ] ), 'list_keys_in_2_not_in_1: {} {}');
+ ok( 0 == compare_lists( ['a','b'], [ list_keys_in_2_not_in_1( {}, {'a' => 1, 'b' => 1}) ]), 'list_keys_in_2_not_in_1: {} {a, b}');
+ ok( 0 == compare_lists( ['b'], [ list_keys_in_2_not_in_1( {'a' => 1}, {'a' => 1, 'b' => 1}) ]), 'list_keys_in_2_not_in_1: {a} {a, b}');
+ ok( 0 == compare_lists( [], [ list_keys_in_2_not_in_1( {'a' => 1, 'b' => 1}, {'a' => 1, 'b' => 1}) ]), 'list_keys_in_2_not_in_1: {a, b} {a, b}');
+ ok( 0 == compare_lists( [], [ list_keys_in_2_not_in_1( {'a' => 1, 'b' => 1, 'c' => 1}, {'a' => 1, 'b' => 1}) ]), 'list_keys_in_2_not_in_1: {a, b, c} {a, b}');
+ ok( 0 == compare_lists( ['b'], [ list_keys_in_2_not_in_1( {'a' => 1, 'c' => 1}, {'a' => 1, 'b' => 1}) ]), 'list_keys_in_2_not_in_1: {a, b, c} {a, b}');
+
+ note( 'Leaving tests_list_keys_in_2_not_in_1()' ) ;
+ return ;
+}
+
+sub list_keys_in_2_not_in_1
+{
+ my $hash_1_ref = shift;
+ my $hash_2_ref = shift;
+ my @list;
+
+ foreach my $key ( sort keys %{ $hash_2_ref } ) {
+ #$sync->{ debug } and print "$key\n" ;
+ if ( exists $hash_1_ref->{$key} )
+ {
+ next ;
+ }
+ #$sync->{ debug } and print "list_keys_in_2_not_in_1: $key\n" ;
+ push @list, $key ;
+ }
+ #$sync->{ debug } and print "@list\n" ;
+ return( @list ) ;
+}
+
+
+sub list_folders_in_2_not_in_1
+{
+
+ my ( @h2_folders_not_in_h1, %h2_folders_not_in_h1 ) ;
+ @h2_folders_not_in_h1 = list_keys_in_2_not_in_1( \%h1_folders_all, \%h2_folders_all ) ;
+ map { $h2_folders_not_in_h1{$_} = 1} @h2_folders_not_in_h1 ;
+ @h2_folders_not_in_h1 = list_keys_in_2_not_in_1( \%h2_folders_from_1_all, \%h2_folders_not_in_h1 ) ;
+ #$sync->{ debug } and print "h2_folders_not_in_h1: @h2_folders_not_in_h1\n" ;
+ return( reverse @h2_folders_not_in_h1 ) ;
+}
+
+sub tests_nb_messages_in_2_not_in_1
+{
+ note( 'Entering tests_stats_across_folders()' ) ;
+ is( undef, nb_messages_in_2_not_in_1( ), 'nb_messages_in_2_not_in_1: no args => undef' ) ;
+
+ my $mysync->{ h1_folders_of_md5 }->{ 'some_id_01' }->{ 'some_folder_01' } = 1 ;
+ is( 0, nb_messages_in_2_not_in_1( $mysync ), 'nb_messages_in_2_not_in_1: no messages in 2 => 0' ) ;
+
+ $mysync->{ h1_folders_of_md5 }->{ 'some_id_in_1_and_2' }->{ 'some_folder_01' } = 2 ;
+ $mysync->{ h2_folders_of_md5 }->{ 'some_id_in_1_and_2' }->{ 'some_folder_02' } = 4 ;
+
+ is( 0, nb_messages_in_2_not_in_1( $mysync ), 'nb_messages_in_2_not_in_1: a common message => 0' ) ;
+
+ $mysync->{ h2_folders_of_md5 }->{ 'some_id_in_2_not_in_1' }->{ 'some_folder_02' } = 1 ;
+ is( 1, nb_messages_in_2_not_in_1( $mysync ), 'nb_messages_in_2_not_in_1: one message in_2_not_in_1 => 1' ) ;
+
+ $mysync->{ h2_folders_of_md5 }->{ 'some_other_id_in_2_not_in_1' }->{ 'some_folder_02' } = 3 ;
+ is( 2, nb_messages_in_2_not_in_1( $mysync ), 'nb_messages_in_2_not_in_1: two messages in_2_not_in_1 => 2' ) ;
+
+ note( 'Leaving tests_stats_across_folders()' ) ;
+ return ;
+}
+
+sub nb_messages_in_2_not_in_1
+{
+ my $mysync = shift ;
+ if ( not defined $mysync ) { return ; }
+
+ $mysync->{ nb_messages_in_2_not_in_1 } = scalar(
+ list_keys_in_2_not_in_1(
+ $mysync->{ h1_folders_of_md5 },
+ $mysync->{ h2_folders_of_md5 } ) ) ;
+
+ return $mysync->{ nb_messages_in_2_not_in_1 } ;
+}
+
+
+sub nb_messages_in_1_not_in_2
+{
+ my $mysync = shift ;
+ if ( not defined $mysync ) { return ; }
+
+ $mysync->{ nb_messages_in_1_not_in_2 } = scalar(
+ list_keys_in_2_not_in_1(
+ $mysync->{ h2_folders_of_md5 },
+ $mysync->{ h1_folders_of_md5 } ) ) ;
+
+ return $mysync->{ nb_messages_in_1_not_in_2 } ;
+}
+
+
+
+sub comment_on_final_diff_in_1_not_in_2
+{
+ my $mysync = shift ;
+
+ if ( not defined $mysync
+ or $mysync->{ justfolders }
+ or $mysync->{ useuid }
+ )
+ {
+ return ;
+ }
+
+ my $nb_identified_h1_messages = scalar( keys %{ $mysync->{ h1_folders_of_md5 } } ) ;
+ my $nb_identified_h2_messages = scalar( keys %{ $mysync->{ h2_folders_of_md5 } } ) ;
+ $mysync->{ debug } and myprint( "nb_keys h1_folders_of_md5 $nb_identified_h1_messages\n" ) ;
+ $mysync->{ debug } and myprint( "nb_keys h2_folders_of_md5 $nb_identified_h2_messages\n" ) ;
+
+ if ( 0 == $nb_identified_h1_messages ) { return ; }
+
+ # Calculate if not yet done
+ if ( not defined $mysync->{ nb_messages_in_1_not_in_2 } )
+ {
+ nb_messages_in_1_not_in_2( $mysync ) ;
+ }
+
+
+ if ( 0 == $mysync->{ nb_messages_in_1_not_in_2 } )
+ {
+ myprint( "The sync looks good, all ",
+ $nb_identified_h1_messages,
+ " identified messages in host1 are on host2.\n" ) ;
+ }
+ else
+ {
+ myprint( "The sync is not finished, there are ",
+ $mysync->{ nb_messages_in_1_not_in_2 },
+ " identified messages in host1 that are not on host2.\n" ) ;
+ }
+
+
+ if ( 1 <= $mysync->{ h1_nb_msg_noheader } )
+ {
+ myprint( "There are ",
+ $mysync->{ h1_nb_msg_noheader },
+ " unidentified messages (usually Sent or Draft messages).",
+ " To sync them add option --addheader\n" ) ;
+ }
+ else
+ {
+ myprint( "There is no unidentified message\n" ) ;
+ }
+
+ return ;
+}
+
+sub comment_on_final_diff_in_2_not_in_1
+{
+ my $mysync = shift ;
+
+ if ( not defined $mysync
+ or $mysync->{ justfolders }
+ or $mysync->{ useuid }
+ )
+ {
+ return ;
+ }
+
+ my $nb_identified_h2_messages = scalar( keys %{ $mysync->{ h2_folders_of_md5 } } ) ;
+ # Calculate if not done yet
+ if ( not defined $mysync->{ nb_messages_in_2_not_in_1 } )
+ {
+ nb_messages_in_2_not_in_1( $mysync ) ;
+ }
+
+ if ( 0 == $mysync->{ nb_messages_in_2_not_in_1 } )
+ {
+ myprint( "The sync is strict, all ",
+ $nb_identified_h2_messages,
+ " identified messages in host2 are on host1.\n" ) ;
+ }
+ else
+ {
+ myprint( "The sync is not strict, there are ",
+ $mysync->{ nb_messages_in_2_not_in_1 },
+ " messages in host2 that are not on host1.",
+ " Use --delete2 to delete them and have a strict sync.",
+ " ($nb_identified_h2_messages identified messages in host2)\n" ) ;
+ }
+ return ;
+}
+
+
+sub tests_match
+{
+ note( 'Entering tests_match()' ) ;
+
+ # undef serie
+ is( undef, match( ), 'match: no args => undef' ) ;
+ is( undef, match( 'lalala' ), 'match: one args => undef' ) ;
+
+ # This one gives 0 under a binary made by pp
+ # but 1 under "normal" Perl interpreter. So a PAR bug?
+ #is( 1, match( q{}, q{} ), 'match: q{} =~ q{} => 1' ) ;
+
+ is( 'lalala', match( 'lalala', 'lalala' ), 'match: lalala =~ lalala => lalala' ) ;
+ is( 'lalala', match( 'lalala', '^lalala' ), 'match: lalala =~ ^lalala => lalala' ) ;
+ is( 'lalala', match( 'lalala', 'lalala$' ), 'match: lalala =~ lalala$ => lalala' ) ;
+ is( 'lalala', match( 'lalala', '^lalala$' ), 'match: lalala =~ ^lalala$ => lalala' ) ;
+ is( '_lalala_', match( '_lalala_', 'lalala' ), 'match: _lalala_ =~ lalala => _lalala_' ) ;
+ is( 'lalala', match( 'lalala', '.*' ), 'match: lalala =~ .* => lalala' ) ;
+ is( 'lalala', match( 'lalala', '.' ), 'match: lalala =~ . => lalala' ) ;
+ is( '/lalala/', match( '/lalala/', '/lalala/' ), 'match: /lalala/ =~ /lalala/ => /lalala/' ) ;
+
+ is( 0, match( 'foo', 's/foo/bar/g' ), 'match: foo =~ s/foo/bar/g => 0' ) ;
+ is( 's/foo/bar/g', match( 's/foo/bar/g', 's/foo/bar/g' ), 'match: s/foo/bar/g =~ s/foo/bar/g => s/foo/bar/g' ) ;
+
+
+ is( 0, match( 'lalala', 'ooo' ), 'match: lalala =~ ooo => 0' ) ;
+ is( 0, match( 'lalala', 'lal_ala' ), 'match: lalala =~ lal_ala => 0' ) ;
+ is( 0, match( 'lalala', '\.' ), 'match: lalala =~ \. => 0' ) ;
+ is( 0, match( 'lalalaX', '^lalala$' ), 'match: lalalaX =~ ^lalala$ => 0' ) ;
+ is( 0, match( 'lalala', '/lalala/' ), 'match: lalala =~ /lalala/ => 0' ) ;
+
+ is( 'LALALA', match( 'LALALA', '(?i:lalala)' ), 'match: LALALA =~ (?i:lalala) => 1' ) ;
+
+ is( undef, match( 'LALALA', '(?{`ls /`})' ), 'match: LALALA =~ (?{`ls /`}) => undef' ) ;
+ is( undef, match( 'LALALA', '(?{print "CACA"})' ), 'match: LALALA =~ (?{print "CACA"}) => undef' ) ;
+ is( undef, match( 'CACA', '(??{print "CACA"})' ), 'match: CACA =~ (??{print "CACA"}) => undef' ) ;
+
+ note( 'Leaving tests_match()' ) ;
+
+ return ;
+}
+
+sub match
+{
+ my( $var, $regex ) = @ARG ;
+
+ # undef cases
+ if ( ( ! defined $var ) or ( ! defined $regex ) ) { return ; }
+
+ # normal cases
+ if ( eval { $var =~ qr{$regex} } ) {
+ return $var ;
+ }elsif ( $EVAL_ERROR ) {
+ myprint( "Fatal regex $regex\n" ) ;
+ return ;
+ } else {
+ return 0 ;
+ }
+ return ;
+}
+
+
+sub tests_notmatch
+{
+ note( 'Entering tests_notmatch()' ) ;
+
+ # undef serie
+ is( undef, notmatch( ), 'notmatch: no args => undef' ) ;
+ is( undef, notmatch( 'lalala' ), 'notmatch: one args => undef' ) ;
+
+ is( 1, notmatch( 'lalala', '/lalala/' ), 'notmatch: lalala !~ /lalala/ => 1' ) ;
+ is( 0, notmatch( '/lalala/', '/lalala/' ), 'notmatch: /lalala/ !~ /lalala/ => 0' ) ;
+ is( 1, notmatch( 'lalala', '/ooo/' ), 'notmatch: lalala !~ /ooo/ => 1' ) ;
+
+ # This one gives 1 under a binary made by pp
+ # but 0 under "normal" Perl interpreter. So a PAR bug, same in tests_match .
+ #is( 0, notmatch( q{}, q{} ), 'notmatch: q{} !~ q{} => 0' ) ;
+
+ is( 0, notmatch( 'lalala', 'lalala' ), 'notmatch: lalala !~ lalala => 0' ) ;
+ is( 0, notmatch( 'lalala', '^lalala' ), 'notmatch: lalala !~ ^lalala => 0' ) ;
+ is( 0, notmatch( 'lalala', 'lalala$' ), 'notmatch: lalala !~ lalala$ => 0' ) ;
+ is( 0, notmatch( 'lalala', '^lalala$' ), 'notmatch: lalala !~ ^lalala$ => 0' ) ;
+ is( 0, notmatch( '_lalala_', 'lalala' ), 'notmatch: _lalala_ !~ lalala => 0' ) ;
+ is( 0, notmatch( 'lalala', '.*' ), 'notmatch: lalala !~ .* => 0' ) ;
+ is( 0, notmatch( 'lalala', '.' ), 'notmatch: lalala !~ . => 0' ) ;
+
+
+ is( 1, notmatch( 'lalala', 'ooo' ), 'notmatch: does not match regex => 1' ) ;
+ is( 1, notmatch( 'lalala', 'lal_ala' ), 'notmatch: does not match regex => 1' ) ;
+ is( 1, notmatch( 'lalala', '\.' ), 'notmatch: matches regex => 0' ) ;
+ is( 1, notmatch( 'lalalaX', '^lalala$' ), 'notmatch: does not match regex => 1' ) ;
+
+ note( 'Leaving tests_notmatch()' ) ;
+
+ return ;
+}
+
+sub notmatch
+{
+ my( $var, $regex ) = @ARG ;
+
+ # undef cases
+ if ( ( ! defined $var ) or ( ! defined $regex ) ) { return ; }
+
+ # normal cases
+ if ( eval { $var !~ $regex } ) {
+ return 1 ;
+ }elsif ( $EVAL_ERROR ) {
+ myprint( "Fatal regex $regex\n" ) ;
+ return ;
+ }else{
+ return 0 ;
+ }
+ return ;
+}
+
+
+sub delete_folders_in_2_not_in_1
+{
+
+ foreach my $folder ( @h2_folders_not_in_1 ) {
+ if ( defined $delete2foldersonly and eval "\$folder !~ $delete2foldersonly" ) {
+ myprint( "Not deleting $folder because of --delete2foldersonly $delete2foldersonly\n" ) ;
+ next ;
+ }
+ if ( defined $delete2foldersbutnot and eval "\$folder =~ $delete2foldersbutnot" ) {
+ myprint( "Not deleting $folder because of --delete2foldersbutnot $delete2foldersbutnot\n" ) ;
+ next ;
+ }
+ my $res = $sync->{dry} ; # always success in dry mode!
+ $sync->{imap2}->unsubscribe( $folder ) if ( ! $sync->{dry} ) ;
+ $res = $sync->{imap2}->delete( $folder ) if ( ! $sync->{dry} ) ;
+ if ( $res ) {
+ myprint( "Deleted $folder", "$sync->{dry_message}", "\n" ) ;
+ }else{
+ myprint( "Deleting $folder failed", "\n" ) ;
+ }
+ }
+ return ;
+}
+
+sub delete_folder
+{
+ my ( $mysync, $imap, $folder, $Side ) = @_ ;
+ if ( ! $mysync ) { return ; }
+ if ( ! $imap ) { return ; }
+ if ( ! $folder ) { return ; }
+ $Side ||= 'HostX' ;
+
+ my $res = $mysync->{dry} ; # always success in dry mode!
+ if ( ! $mysync->{dry} ) {
+ $imap->unsubscribe( $folder ) ;
+ $res = $imap->delete( $folder ) ;
+ }
+ if ( $res ) {
+ myprint( "$Side deleted $folder", $mysync->{dry_message}, "\n" ) ;
+ return 1 ;
+ }else{
+ myprint( "$Side deleting $folder failed", "\n" ) ;
+ return ;
+ }
+}
+
+sub delete1emptyfolders
+{
+ my $mysync = shift ;
+ if ( ! $mysync ) { return ; } # abort if no parameter
+ if ( ! $mysync->{delete1emptyfolders} ) { return ; } # abort if --delete1emptyfolders off
+ my $imap = $mysync->{imap1} ;
+ if ( ! $imap ) { return ; } # abort if no imap
+ if ( $imap->IsUnconnected( ) ) { return ; } # abort if disconnected
+
+ my %folders_kept ;
+ myprint( qq{Host1 deleting empty folders\n} ) ;
+ foreach my $folder ( reverse sort @{ $mysync->{h1_folders_wanted} } ) {
+ my $parenthood = $imap->is_parent( $folder ) ;
+ if ( defined $parenthood and $parenthood ) {
+ myprint( "Host1: folder $folder has subfolders\n" ) ;
+ $folders_kept{ $folder }++ ;
+ next ;
+ }
+ my $nb_messages_select = examine_folder_and_count( $mysync, $imap, $folder, 'Host1' ) ;
+ if ( ! defined $nb_messages_select ) { next ; } # Select failed => Neither continue nor keep this folder }
+ my $nb_messages_search = scalar( @{ $imap->messages( ) } ) ;
+ if ( 0 != $nb_messages_select and 0 != $nb_messages_search ) {
+ myprint( "Host1: folder $folder has messages: $nb_messages_search (search) $nb_messages_select (select)\n" ) ;
+ $folders_kept{ $folder }++ ;
+ next ;
+ }
+ if ( 0 != $nb_messages_select + $nb_messages_search ) {
+ myprint( "Host1: folder $folder odd messages count: $nb_messages_search (search) $nb_messages_select (select)\n" ) ;
+ $folders_kept{ $folder }++ ;
+ next ;
+ }
+ # Here we must have 0 messages by messages() aka "SEARCH ALL" and also "EXAMINE"
+ if ( uc $folder eq 'INBOX' ) {
+ myprint( "Host1: Not deleting $folder\n" ) ;
+ $folders_kept{ $folder }++ ;
+ next ;
+ }
+ myprint( "Host1: deleting empty folder $folder\n" ) ;
+ # can not delete a SELECTed or EXAMINEd folder so closing it
+ # could changed be SELECT INBOX
+ $imap->close( ) ; # close after examine does not expunge; anyway expunging an empty folder...
+ if ( delete_folder( $mysync, $imap, $folder, 'Host1' ) ) {
+ next ; # Deleted, good!
+ }else{
+ $folders_kept{ $folder }++ ;
+ next ; # Not deleted, bad!
+ }
+ }
+ remove_deleted_folders_from_wanted_list( $mysync, %folders_kept ) ;
+ myprint( qq{Host1 ended deleting empty folders\n} ) ;
+ return ;
+}
+
+sub remove_deleted_folders_from_wanted_list
+{
+ my ( $mysync, %folders_kept ) = @ARG ;
+
+ my @h1_folders_wanted_init = @{ $mysync->{h1_folders_wanted} } ;
+ my @h1_folders_wanted_last ;
+ foreach my $folder ( @h1_folders_wanted_init ) {
+ if ( $folders_kept{ $folder } ) {
+ push @h1_folders_wanted_last, $folder ;
+ }
+ }
+ @{ $mysync->{h1_folders_wanted} } = @h1_folders_wanted_last ;
+ return ;
+}
+
+
+sub examine_folder_and_count
+{
+ my ( $mysync, $imap, $folder, $Side ) = @_ ;
+ $Side ||= 'HostX' ;
+
+ if ( ! examine_folder( $mysync, $imap, $folder, $Side ) ) {
+ return ;
+ }
+ my $nb_messages_select = count_from_select( $imap->History ) ;
+ return $nb_messages_select ;
+}
+
+
+sub tests_delete1emptyfolders
+{
+ note( 'Entering tests_delete1emptyfolders()' ) ;
+
+
+ is( undef, delete1emptyfolders( ), q{delete1emptyfolders: undef} ) ;
+ my $syncT ;
+ is( undef, delete1emptyfolders( $syncT ), q{delete1emptyfolders: undef 2} ) ;
+ my $imapT ;
+ $syncT->{imap1} = $imapT ;
+ is( undef, delete1emptyfolders( $syncT ), q{delete1emptyfolders: undef imap} ) ;
+
+ require_ok( "Test::MockObject" ) ;
+ $imapT = Test::MockObject->new( ) ;
+ $syncT->{imap1} = $imapT ;
+
+ $imapT->set_true( 'IsUnconnected' ) ;
+ is( undef, delete1emptyfolders( $syncT ), q{delete1emptyfolders: Unconnected imap} ) ;
+
+ # Now connected tests
+ $imapT->set_false( 'IsUnconnected' ) ;
+ $imapT->mock( 'LastError', sub { q{LastError mocked} } ) ;
+
+ $syncT->{delete1emptyfolders} = 0 ;
+ tests_delete1emptyfolders_unit(
+ $syncT,
+ [ qw{ INBOX DELME1 DELME2 } ],
+ [ qw{ INBOX DELME1 DELME2 } ],
+ q{tests_delete1emptyfolders: --delete1emptyfolders OFF}
+ ) ;
+
+ # All are parents => no deletion at all
+ $imapT->set_true( 'is_parent' ) ;
+ $syncT->{delete1emptyfolders} = 1 ;
+ tests_delete1emptyfolders_unit(
+ $syncT,
+ [ qw{ INBOX DELME1 DELME2 } ],
+ [ qw{ INBOX DELME1 DELME2 } ],
+ q{tests_delete1emptyfolders: --delete1emptyfolders ON}
+ ) ;
+
+ # No parents but examine false for all => skip all
+ $imapT->set_false( 'is_parent', 'examine' ) ;
+
+ tests_delete1emptyfolders_unit(
+ $syncT,
+ [ qw{ INBOX DELME1 DELME2 } ],
+ [ ],
+ q{tests_delete1emptyfolders: EXAMINE fails}
+ ) ;
+
+ # examine ok for all but History bad => skip all
+ $imapT->set_true( 'examine' ) ;
+ $imapT->mock( 'History', sub { ( q{History badly mocked} ) } ) ;
+ tests_delete1emptyfolders_unit(
+ $syncT,
+ [ qw{ INBOX DELME1 DELME2 } ],
+ [ ],
+ q{tests_delete1emptyfolders: examine ok but History badly mocked so count messages fails}
+ ) ;
+
+ # History good but some messages EXISTS == messages() => no deletion
+ $imapT->mock( 'History', sub { ( q{* 2 EXISTS} ) } ) ;
+ $imapT->mock( 'messages', sub { [ qw{ UID_1 UID_2 } ] } ) ;
+ tests_delete1emptyfolders_unit(
+ $syncT,
+ [ qw{ INBOX DELME1 DELME2 } ],
+ [ qw{ INBOX DELME1 DELME2 } ],
+ q{tests_delete1emptyfolders: History EXAMINE ok, several messages}
+ ) ;
+
+ # 0 EXISTS but != messages() => no deletion
+ $imapT->mock( 'History', sub { ( q{* 0 EXISTS} ) } ) ;
+ $imapT->mock( 'messages', sub { [ qw{ UID_1 UID_2 } ] } ) ;
+ tests_delete1emptyfolders_unit(
+ $syncT,
+ [ qw{ INBOX DELME1 DELME2 } ],
+ [ qw{ INBOX DELME1 DELME2 } ],
+ q{tests_delete1emptyfolders: 0 EXISTS but 2 by messages()}
+ ) ;
+
+ # 1 EXISTS but != 0 == messages() => no deletion
+ $imapT->mock( 'History', sub { ( q{* 1 EXISTS} ) } ) ;
+ $imapT->mock( 'messages', sub { [ ] } ) ;
+ tests_delete1emptyfolders_unit(
+ $syncT,
+ [ qw{ INBOX DELME1 DELME2 } ],
+ [ qw{ INBOX DELME1 DELME2 } ],
+ q{tests_delete1emptyfolders: 1 EXISTS but 0 by messages()}
+ ) ;
+
+ # 0 EXISTS and 0 == messages() => deletion except INBOX
+ $imapT->mock( 'History', sub { ( q{* 0 EXISTS} ) } ) ;
+ $imapT->mock( 'messages', sub { [ ] } ) ;
+ $imapT->set_true( qw{ delete close unsubscribe } ) ;
+ $syncT->{dry_message} = q{ (not really since in a mocked test)} ;
+ tests_delete1emptyfolders_unit(
+ $syncT,
+ [ qw{ INBOX DELME1 DELME2 } ],
+ [ qw{ INBOX } ],
+ q{tests_delete1emptyfolders: 0 EXISTS 0 by messages() delete folders, keep INBOX}
+ ) ;
+
+ note( 'Leaving tests_delete1emptyfolders()' ) ;
+ return ;
+}
+
+sub tests_delete1emptyfolders_unit
+{
+ note( 'Entering tests_delete1emptyfolders_unit()' ) ;
+
+ my $syncT = shift ;
+ my $folders1wanted_init_ref = shift ;
+ my $folders1wanted_after_ref = shift ;
+ my $comment = shift || q{delete1emptyfolders:} ;
+
+ my @folders1wanted_init = @{ $folders1wanted_init_ref } ;
+ my @folders1wanted_after = @{ $folders1wanted_after_ref } ;
+
+ @{ $syncT->{h1_folders_wanted} } = @folders1wanted_init ;
+
+ is_deeply( $syncT->{h1_folders_wanted}, \@folders1wanted_init, qq{$comment, init check} ) ;
+ delete1emptyfolders( $syncT ) ;
+ is_deeply( $syncT->{h1_folders_wanted}, \@folders1wanted_after, qq{$comment, after check} ) ;
+
+ note( 'Leaving tests_delete1emptyfolders_unit()' ) ;
+ return ;
+}
+
+sub extract_header
+{
+ my $string = shift ;
+
+ my ( $header ) = split /\n\n/x, $string ;
+ if ( ! $header ) { return( q{} ) ; }
+ #myprint( "[$header]\n" ) ;
+ return( $header ) ;
+}
+
+sub tests_extract_header
+{
+ note( 'Entering tests_extract_header()' ) ;
+
+my $h = <<'EOM';
+Message-Id: <20100428101817.A66CB162474E@plume.est.belle>
+Date: Wed, 28 Apr 2010 12:18:17 +0200 (CEST)
+From: gilles@louloutte.dyndns.org (Gilles LAMIRAL)
+EOM
+chomp $h ;
+ok( $h eq extract_header(
+<<'EOM'
+Message-Id: <20100428101817.A66CB162474E@plume.est.belle>
+Date: Wed, 28 Apr 2010 12:18:17 +0200 (CEST)
+From: gilles@louloutte.dyndns.org (Gilles LAMIRAL)
+
+body
+lalala
+EOM
+), 'extract_header: 1') ;
+
+
+
+ note( 'Leaving tests_extract_header()' ) ;
+ return ;
+}
+
+sub decompose_header{
+ my $string = shift ;
+
+ # a hash, for a keyword header KEY value are list of strings [VAL1, VAL1_other, etc]
+ # Think of multiple "Received:" header lines.
+ my $header = { } ;
+
+ my ($key, $val ) ;
+ my @line = split /\n|\r\n/x, $string ;
+ foreach my $line ( @line ) {
+ #myprint( "DDD $line\n" ) ;
+ # End of header
+ last if ( $line =~ m{^$}xo ) ;
+ # Key: value
+ if ( $line =~ m/(^[^:]+):\s(.*)/xo ) {
+ $key = $1 ;
+ $val = $2 ;
+ $debugdev and myprint( "DDD KV [$key] [$val]\n" ) ;
+ push @{ $header->{ $key } }, $val ;
+ # blanc and value => value from previous line continues
+ }elsif( $line =~ m/^(\s+)(.*)/xo ) {
+ $val = $2 ;
+ $debugdev and myprint( "DDD V [$val]\n" ) ;
+ @{ $header->{ $key } }[ $LAST ] .= " $val" if $key ;
+ # dirty line?
+ }else{
+ next ;
+ }
+ }
+
+ #myprint( Data::Dumper->Dump( [ $header ] ) ) ;
+
+ return( $header ) ;
+}
+
+
+sub tests_decompose_header{
+ note( 'Entering tests_decompose_header()' ) ;
+
+
+ my $header_dec ;
+
+ $header_dec = decompose_header(
+<<'EOH'
+KEY_1: VAL_1
+KEY_2: VAL_2
+ VAL_2_+
+ VAL_2_++
+KEY_3: VAL_3
+KEY_1: VAL_1_other
+KEY_4: VAL_4
+ VAL_4_+
+KEY_5 BLANC: VAL_5
+
+KEY_6_BAD_BODY: VAL_6
+EOH
+ ) ;
+
+ ok( 'VAL_3'
+ eq $header_dec->{ 'KEY_3' }[0], 'decompose_header: VAL_3' ) ;
+
+ ok( 'VAL_1'
+ eq $header_dec->{ 'KEY_1' }[0], 'decompose_header: VAL_1' ) ;
+
+ ok( 'VAL_1_other'
+ eq $header_dec->{ 'KEY_1' }[1], 'decompose_header: VAL_1_other' ) ;
+
+ ok( 'VAL_2 VAL_2_+ VAL_2_++'
+ eq $header_dec->{ 'KEY_2' }[0], 'decompose_header: VAL_2 VAL_2_+ VAL_2_++' ) ;
+
+ ok( 'VAL_4 VAL_4_+'
+ eq $header_dec->{ 'KEY_4' }[0], 'decompose_header: VAL_4 VAL_4_+' ) ;
+
+ ok( ' VAL_5'
+ eq $header_dec->{ 'KEY_5 BLANC' }[0], 'decompose_header: KEY_5 BLANC' ) ;
+
+ ok( not( defined $header_dec->{ 'KEY_6_BAD_BODY' }[0] ), 'decompose_header: KEY_6_BAD_BODY' ) ;
+
+
+ $header_dec = decompose_header(
+<<'EOH'
+Message-Id: <20100428101817.A66CB162474E@plume.est.belle>
+Date: Wed, 28 Apr 2010 12:18:17 +0200 (CEST)
+From: gilles@louloutte.dyndns.org (Gilles LAMIRAL)
+EOH
+ ) ;
+
+ ok( '<20100428101817.A66CB162474E@plume.est.belle>'
+ eq $header_dec->{ 'Message-Id' }[0], 'decompose_header: 1' ) ;
+
+ $header_dec = decompose_header(
+<<'EOH'
+Return-Path: <gilles@louloutte.dyndns.org>
+Received: by plume.est.belle (Postfix, from userid 1000)
+ id 120A71624742; Wed, 28 Apr 2010 01:46:40 +0200 (CEST)
+Subject: test:eekahceishukohpe
+EOH
+) ;
+ ok(
+'by plume.est.belle (Postfix, from userid 1000) id 120A71624742; Wed, 28 Apr 2010 01:46:40 +0200 (CEST)'
+ eq $header_dec->{ 'Received' }[0], 'decompose_header: 2' ) ;
+
+ $header_dec = decompose_header(
+<<'EOH'
+Received: from plume (localhost [127.0.0.1])
+ by plume.est.belle (Postfix) with ESMTP id C6EB73F6C9
+ for <gilles@localhost>; Mon, 26 Nov 2007 10:39:06 +0100 (CET)
+Received: from plume [192.168.68.7]
+ by plume with POP3 (fetchmail-6.3.6)
+ for <gilles@localhost> (single-drop); Mon, 26 Nov 2007 10:39:06 +0100 (CET)
+EOH
+ ) ;
+ ok(
+ 'from plume (localhost [127.0.0.1]) by plume.est.belle (Postfix) with ESMTP id C6EB73F6C9 for <gilles@localhost>; Mon, 26 Nov 2007 10:39:06 +0100 (CET)'
+ eq $header_dec->{ 'Received' }[0], 'decompose_header: 3' ) ;
+ ok(
+ 'from plume [192.168.68.7] by plume with POP3 (fetchmail-6.3.6) for <gilles@localhost> (single-drop); Mon, 26 Nov 2007 10:39:06 +0100 (CET)'
+ eq $header_dec->{ 'Received' }[1], 'decompose_header: 3' ) ;
+
+# Bad header beginning with a blank character
+ $header_dec = decompose_header(
+<<'EOH'
+ KEY_1: VAL_1
+KEY_2: VAL_2
+ VAL_2_+
+ VAL_2_++
+KEY_3: VAL_3
+KEY_1: VAL_1_other
+EOH
+ ) ;
+
+ ok( 'VAL_3'
+ eq $header_dec->{ 'KEY_3' }[0], 'decompose_header: Bad header VAL_3' ) ;
+
+ ok( 'VAL_1_other'
+ eq $header_dec->{ 'KEY_1' }[0], 'decompose_header: Bad header VAL_1_other' ) ;
+
+ ok( 'VAL_2 VAL_2_+ VAL_2_++'
+ eq $header_dec->{ 'KEY_2' }[0], 'decompose_header: Bad header VAL_2 VAL_2_+ VAL_2_++' ) ;
+
+ note( 'Leaving tests_decompose_header()' ) ;
+ return ;
+}
+
+sub tests_epoch
+{
+ note( 'Entering tests_epoch()' ) ;
+
+ ok( '1282658400' eq epoch( '24-Aug-2010 16:00:00 +0200' ), 'epoch 24-Aug-2010 16:00:00 +0200 -> 1282658400' ) ;
+ ok( '1282658400' eq epoch( '24-Aug-2010 14:00:00 +0000' ), 'epoch 24-Aug-2010 14:00:00 +0000 -> 1282658400' ) ;
+ ok( '1282658400' eq epoch( '24-Aug-2010 12:00:00 -0200' ), 'epoch 24-Aug-2010 12:00:00 -0200 -> 1282658400' ) ;
+ ok( '1282658400' eq epoch( '24-Aug-2010 16:01:00 +0201' ), 'epoch 24-Aug-2010 16:01:00 +0201 -> 1282658400' ) ;
+ ok( '1282658400' eq epoch( '24-Aug-2010 14:01:00 +0001' ), 'epoch 24-Aug-2010 14:01:00 +0001 -> 1282658400' ) ;
+
+ ok( '1280671200' eq epoch( '1-Aug-2010 16:00:00 +0200' ), 'epoch 1-Aug-2010 16:00:00 +0200 -> 1280671200' ) ;
+ ok( '1280671200' eq epoch( '1-Aug-2010 14:00:00 +0000' ), 'epoch 1-Aug-2010 14:00:00 +0000 -> 1280671200' ) ;
+ ok( '1280671200' eq epoch( '1-Aug-2010 12:00:00 -0200' ), 'epoch 1-Aug-2010 12:00:00 -0200 -> 1280671200' ) ;
+ ok( '1280671200' eq epoch( '1-Aug-2010 16:01:00 +0201' ), 'epoch 1-Aug-2010 16:01:00 +0201 -> 1280671200' ) ;
+ ok( '1280671200' eq epoch( '1-Aug-2010 14:01:00 +0001' ), 'epoch 1-Aug-2010 14:01:00 +0001 -> 1280671200' ) ;
+
+ is( '1280671200', epoch( '1-Aug-2010 14:01:00 +0001' ), 'epoch 1-Aug-2010 14:01:00 +0001 -> 1280671200' ) ;
+ is( '946684800', epoch( '00-Jan-0000 00:00:00 +0000' ), 'epoch 1-Aug-2010 14:01:00 +0001 -> 1280671200' ) ;
+
+ note( 'Leaving tests_epoch()' ) ;
+ return ;
+}
+
+sub epoch
+{
+ # incoming format:
+ # internal date 24-Aug-2010 16:00:00 +0200
+
+ # outgoing format: epoch
+
+
+ my $d = shift ;
+ return(q{}) if not defined $d;
+
+ my ( $mday, $month, $year, $hour, $min, $sec, $sign, $zone_h, $zone_m ) ;
+ my $time ;
+
+ if ( $d =~ m{(\d{1,2})-([A-Z][a-z]{2})-(\d{4})\s(\d{2}):(\d{2}):(\d{2})\s((?:\+|-))(\d{2})(\d{2})}xo ) {
+ #myprint( "internal: [$1][$2][$3][$4][$5][$6][$7][$8][$9]\n" ) ;
+ ( $mday, $month, $year, $hour, $min, $sec, $sign, $zone_h, $zone_m )
+ = ( $1, $2, $3, $4, $5, $6, $7, $8, $9 ) ;
+ #myprint( "( $mday, $month, $year, $hour, $min, $sec, $sign, $zone_h, $zone_m )\n" ) ;
+
+ $sign = +1 if ( '+' eq $sign ) ;
+ $sign = $MINUS_ONE if ( '-' eq $sign ) ;
+
+ if ( 0 == $mday ) {
+ myprint( "buggy day in $d. Fixed to 01\n" ) ;
+ $mday = '01' ;
+ }
+ $time = timegm( $sec, $min, $hour, $mday, $month_abrev{$month}, $year )
+ - $sign * ( 3600 * $zone_h + 60 * $zone_m ) ;
+
+ #myprint( "$time ", scalar localtime($time), "\n");
+ }
+ return( $time ) ;
+}
+
+sub tests_add_header
+{
+ note( 'Entering tests_add_header()' ) ;
+
+ ok( 'Message-Id: <mistake@imapsync>' eq add_header(), 'add_header no arg' ) ;
+ ok( 'Message-Id: <123456789@imapsync>' eq add_header( '123456789' ), 'add_header 123456789' ) ;
+
+ note( 'Leaving tests_add_header()' ) ;
+ return ;
+}
+
+sub add_header
+{
+ my $header_uid = shift || 'mistake' ;
+ my $header_Message_Id = 'Message-Id: <' . $header_uid . '@imapsync>' ;
+ return( $header_Message_Id ) ;
+}
+
+
+
+
+sub tests_max_line_length
+{
+ note( 'Entering tests_max_line_length()' ) ;
+
+ ok( 0 == max_line_length( q{} ), 'max_line_length: 0 == null string' ) ;
+ ok( 1 == max_line_length( "\n" ), 'max_line_length: 1 == \n' ) ;
+ ok( 1 == max_line_length( "\n\n" ), 'max_line_length: 1 == \n\n' ) ;
+ ok( 1 == max_line_length( "\n" x 500 ), 'max_line_length: 1 == 500 \n' ) ;
+ ok( 1 == max_line_length( 'a' ), 'max_line_length: 1 == a' ) ;
+ ok( 2 == max_line_length( "a\na" ), 'max_line_length: 2 == a\na' ) ;
+ ok( 2 == max_line_length( "a\na\n" ), 'max_line_length: 2 == a\na\n' ) ;
+ ok( 3 == max_line_length( "a\nab\n" ), 'max_line_length: 3 == a\nab\n' ) ;
+ ok( 3 == max_line_length( "a\nab\n" x 1_000 ), 'max_line_length: 3 == 1_000 a\nab\n' ) ;
+ ok( 3 == max_line_length( "a\nab\nabc" ), 'max_line_length: 3 == a\nab\nabc' ) ;
+
+ ok( 4 == max_line_length( "a\nab\nabc\n" ), 'max_line_length: 4 == a\nab\nabc\n' ) ;
+ ok( 5 == max_line_length( "a\nabcd\nabc\n" ), 'max_line_length: 5 == a\nabcd\nabc\n' ) ;
+ ok( 5 == max_line_length( "a\nabcd\nabc\n\nabcd\nabcd\nabcd\nabcd\nabcd\nabcd\nabcd\nabcd" ), 'max_line_length: 5 == a\nabcd\nabc\n\nabcd\nabcd\nabcd\nabcd\nabcd\nabcd\nabcd\nabcd' ) ;
+
+ note( 'Leaving tests_max_line_length()' ) ;
+ return ;
+}
+
+sub max_line_length
+{
+ my $string = shift ;
+ my $max = 0 ;
+
+ while ( $string =~ m/([^\n]*\n?)/msxg ) {
+ $max = max( $max, length $1 ) ;
+ }
+ return( $max ) ;
+}
+
+
+sub tests_setlogfile
+{
+ note( 'Entering tests_setlogfile()' ) ;
+
+ my $mysync = {} ;
+ $mysync->{logdir} = 'vallogdir' ;
+ $mysync->{logfile} = 'vallogfile.txt' ;
+ is( 'vallogdir/vallogfile.txt', setlogfile( $mysync ),
+ 'setlogfile: logdir vallogdir, logfile vallogfile.txt, vallogdir/vallogfile.txt' ) ;
+
+ SKIP: {
+ skip( 'Too hard to have a well known timezone on Windows', 9 ) if ( 'MSWin32' eq $OSNAME ) ;
+
+ local $ENV{TZ} = 'GMT' ;
+
+ $mysync = {
+ timestart => 2,
+ } ;
+
+ is( "$DEFAULT_LOGDIR/1970_01_01_00_00_02_000__.txt", setlogfile( $mysync ),
+ "setlogfile: default is like $DEFAULT_LOGDIR/1970_01_01_00_00_02_000__.txt" ) ;
+
+ $mysync = {
+ timestart => 2,
+ user1 => 'user1',
+ user2 => 'user2',
+ abort => 1,
+ } ;
+
+ is( "$DEFAULT_LOGDIR/1970_01_01_00_00_02_000_user1_user2_abort.txt", setlogfile( $mysync ),
+ "setlogfile: default is like $DEFAULT_LOGDIR/1970_01_01_00_00_02_000_user1_user2_abort.txt" ) ;
+
+ $mysync = {
+ timestart => 2,
+ user1 => 'user1',
+ user2 => 'user2',
+ remote => 'zzz',
+ } ;
+
+ is( "$DEFAULT_LOGDIR/1970_01_01_00_00_02_000_user1_user2_remote.txt", setlogfile( $mysync ),
+ "setlogfile: default is like $DEFAULT_LOGDIR/1970_01_01_00_00_02_000_user1_user2_remote.txt" ) ;
+
+ $mysync = {
+ timestart => 2,
+ user1 => 'user1',
+ user2 => 'user2',
+ remote => 'zzz',
+ abort => 1,
+ } ;
+
+ is( "$DEFAULT_LOGDIR/1970_01_01_00_00_02_000_user1_user2_remote_abort.txt", setlogfile( $mysync ),
+ "setlogfile: default is like $DEFAULT_LOGDIR/1970_01_01_00_00_02_000_user1_user2_remote_abort.txt" ) ;
+
+
+ $mysync = {
+ timestart => 2,
+ user1 => 'user1',
+ user2 => 'user2',
+ } ;
+
+ is( "$DEFAULT_LOGDIR/1970_01_01_00_00_02_000_user1_user2.txt", setlogfile( $mysync ),
+ "setlogfile: default is like $DEFAULT_LOGDIR/1970_01_01_00_00_02_000_user1_user2.txt" ) ;
+
+ $mysync->{logdir} = undef ;
+ $mysync->{logfile} = undef ;
+ is( "$DEFAULT_LOGDIR/1970_01_01_00_00_02_000_user1_user2.txt", setlogfile( $mysync ),
+ "setlogfile: logdir undef, $DEFAULT_LOGDIR/1970_01_01_00_00_02_000_user1_user2.txt" ) ;
+
+ $mysync->{logdir} = q{} ;
+ $mysync->{logfile} = undef ;
+ is( '1970_01_01_00_00_02_000_user1_user2.txt', setlogfile( $mysync ),
+ 'setlogfile: logdir empty, 1970_01_01_00_00_02_000_user1_user2.txt' ) ;
+
+ $mysync->{logdir} = 'vallogdir' ;
+ $mysync->{logfile} = undef ;
+ is( 'vallogdir/1970_01_01_00_00_02_000_user1_user2.txt', setlogfile( $mysync ),
+ 'setlogfile: logdir vallogdir, vallogdir/1970_01_01_00_00_02_000_user1_user2.txt' ) ;
+
+ $mysync = {
+ user1 => 'us/er1a*|?:"<>b',
+ user2 => 'u/ser2a*|?:"<>b',
+ } ;
+
+ is( "$DEFAULT_LOGDIR/1970_01_01_00_00_00_000_us_er1a_______b_u_ser2a_______b.txt", setlogfile( $mysync ),
+ "setlogfile: logdir undef, $DEFAULT_LOGDIR/1970_01_01_00_00_00_000_us_er1a_______b_u_ser2a_______b.txt" ) ;
+
+
+
+ } ;
+
+ note( 'Leaving tests_setlogfile()' ) ;
+ return ;
+}
+
+sub setlogfile
+{
+ my( $mysync ) = shift ;
+
+ # When aborting another process the log file name finishes with "_abort.txt"
+ my $abort_suffix = ( $mysync->{abort} ) ? '_abort' : q{} ;
+ # When acting as a proxy the log file name finishes with "_remote.txt"
+ # proxy mode is not done yet
+ my $remote_suffix = ( $mysync->{remote} ) ? '_remote' : q{} ;
+
+ my $suffix = (
+ filter_forbidden_characters( slash_to_underscore( $mysync->{user1} ) ) || q{} )
+ . '_'
+ . ( filter_forbidden_characters( slash_to_underscore( $mysync->{user2} ) ) || q{} )
+ . $remote_suffix . $abort_suffix ;
+
+ $mysync->{logdir} = defined $mysync->{logdir} ? $mysync->{logdir} : $DEFAULT_LOGDIR ;
+
+ $mysync->{logfile} = defined $mysync->{logfile}
+ ? "$mysync->{logdir}/$mysync->{logfile}"
+ : logfile( $mysync->{timestart}, $suffix, $mysync->{logdir} ) ;
+
+ return( $mysync->{logfile} ) ;
+}
+
+sub tests_logfile
+{
+ note( 'Entering tests_logfile()' ) ;
+
+ SKIP: {
+ # Too hard to have a well known timezone on Windows
+ skip( 'Too hard to have a well known timezone on Windows', 10 ) if ( 'MSWin32' eq $OSNAME ) ;
+
+ local $ENV{TZ} = 'GMT' ;
+ { POSIX::tzset unless ('MSWin32' eq $OSNAME) ;
+ is( '1970_01_01_00_00_00_000.txt', logfile( ), 'logfile: no args => 1970_01_01_00_00_00.txt' ) ;
+ is( '1970_01_01_00_00_00_000.txt', logfile( 0 ), 'logfile: 0 => 1970_01_01_00_00_00.txt' ) ;
+ is( '1970_01_01_00_01_01_000.txt', logfile( 61 ), 'logfile: 0 => 1970_01_01_00_01_01.txt' ) ;
+ is( '1970_01_01_00_01_01_234.txt', logfile( 61.234 ), 'logfile: 0 => 1970_01_01_00_01_01.txt' ) ;
+ is( '2010_08_24_14_00_00_000.txt', logfile( 1_282_658_400 ), 'logfile: 1_282_658_400 => 2010_08_24_14_00_00.txt' ) ;
+ is( '2010_08_24_14_01_01_000.txt', logfile( 1_282_658_461 ), 'logfile: 1_282_658_461 => 2010_08_24_14_01_01.txt' ) ;
+ is( '2010_08_24_14_01_01_000_poupinette.txt', logfile( 1_282_658_461, 'poupinette' ), 'logfile: 1_282_658_461 poupinette => 2010_08_24_14_01_01_poupinette.txt' ) ;
+ is( '2010_08_24_14_01_01_000_removeblanks.txt', logfile( 1_282_658_461, ' remove blanks ' ), 'logfile: 1_282_658_461 remove blanks => 2010_08_24_14_01_01_000_removeblanks' ) ;
+
+ is( '2010_08_24_14_01_01_234_poup.txt', logfile( 1_282_658_461.2347, 'poup' ),
+ 'logfile: 1_282_658_461.2347 poup => 2010_08_24_14_01_01_234_poup.txt' ) ;
+
+ is( 'dirdir/2010_08_24_14_01_01_234_poup.txt', logfile( 1_282_658_461.2347, 'poup', 'dirdir' ),
+ 'logfile: 1_282_658_461.2347 poup dirdir => dirdir/2010_08_24_14_01_01_234_poup.txt' ) ;
+
+
+
+ }
+ POSIX::tzset unless ('MSWin32' eq $OSNAME) ;
+ } ;
+
+ note( 'Leaving tests_logfile()' ) ;
+ return ;
+}
+
+
+sub logfile
+{
+ my ( $time, $suffix, $dir ) = @_ ;
+
+ $time ||= 0 ;
+ $suffix ||= q{} ;
+ $suffix =~ tr/ //ds ;
+ my $sep_suffix = ( $suffix ) ? '_' : q{} ;
+ $dir ||= q{} ;
+ my $sep_dir = ( $dir ) ? '/' : q{} ;
+
+ my $date_str = POSIX::strftime( '%Y_%m_%d_%H_%M_%S', localtime $time ) ;
+ # Because of ab tests or web accesses, more than one sync withing one second is possible
+ # so we add also milliseconds
+ $date_str .= sprintf "_%03d", ($time - int( $time ) ) * 1000 ; # without rounding
+ my $logfile = "${dir}${sep_dir}${date_str}${sep_suffix}${suffix}.txt" ;
+ return( $logfile ) ;
+}
+
+
+
+sub tests_slash_to_underscore
+{
+ note( 'Entering tests_slash_to_underscore()' ) ;
+
+ is( undef, slash_to_underscore( ), 'slash_to_underscore: no parameters => undef' ) ;
+ is( '_', slash_to_underscore( '/' ), 'slash_to_underscore: / => _' ) ;
+ is( '_abc_def_', slash_to_underscore( '/abc/def/' ), 'slash_to_underscore: /abc/def/ => _abc_def_' ) ;
+ note( 'Leaving tests_slash_to_underscore()' ) ;
+ return ;
+}
+
+sub slash_to_underscore
+{
+ my $string = shift ;
+
+ if ( ! defined $string ) { return ; }
+
+ $string =~ tr{/}{_} ;
+
+ return( $string ) ;
+}
+
+
+
+
+sub tests_million_folders_baby_2
+{
+ note( 'Entering tests_million_folders_baby_2()' ) ;
+
+ my %long ;
+ @long{ 1 .. 900_000 } = (1) x 900_000 ;
+ #myprint( %long, "\n" ) ;
+ my $pasglop = 0 ;
+ foreach my $elem ( 1 .. 900_000 ) {
+ #$debug and myprint( "$elem " ) ;
+ if ( not exists $long{ $elem } ) {
+ $pasglop++ ;
+ }
+ }
+ ok( 0 == $pasglop, 'tests_million_folders_baby_2: search among 900_000' ) ;
+ # myprint( "$pasglop\n" ) ;
+
+ note( 'Leaving tests_million_folders_baby_2()' ) ;
+ return ;
+}
+
+
+
+sub tests_always_fail
+{
+ note( 'Entering tests_always_fail()' ) ;
+
+ is( 0, 1, 'always_fail: 0 is 1' ) ;
+
+ note( 'Leaving tests_always_fail()' ) ;
+ return ;
+}
+
+
+sub tests_logfileprepa
+{
+ note( 'Entering tests_logfileprepa()' ) ;
+
+ is( undef, logfileprepa( ), 'logfileprepa: no args => undef' ) ;
+ my $logfile = 'W/tmp/tests/tests_logfileprepa.txt' ;
+ is( 1, logfileprepa( $logfile ), 'logfileprepa: W/tmp/tests/tests_logfileprepa.txt => 1' ) ;
+
+ note( 'Leaving tests_logfileprepa()' ) ;
+ return ;
+}
+
+sub logfileprepa
+{
+ my $logfile = shift ;
+
+ if ( ! defined( $logfile ) )
+ {
+ return ;
+ }else
+ {
+ #myprint( "[$logfile]\n" ) ;
+ my $dirname = dirname( $logfile ) ;
+ do_valid_directory( $dirname ) || return( 0 ) ;
+ return( 1 ) ;
+ }
+}
+
+
+sub tests_teelaunch
+{
+ note( 'Entering tests_teelaunch()' ) ;
+
+ is( undef, teelaunch( ), 'teelaunch: no args => undef' ) ;
+ my $mysync = {} ;
+ is( undef, teelaunch( $mysync ), 'teelaunch: arg empty {} => undef' ) ;
+ $mysync->{logfile} = q{} ;
+ is( undef, teelaunch( $mysync ), 'teelaunch: logfile empty string => undef' ) ;
+ $mysync->{logfile} = 'W/tmp/tests/tests_teelaunch.txt' ;
+ isa_ok( my $tee = teelaunch( $mysync ), 'IO::Tee' , 'teelaunch: logfile W/tmp/tests/tests_teelaunch.txt' ) ;
+ is( 1, print( $tee "Hi!\n" ), 'teelaunch: write Hi!') ;
+ is( "Hi!\n", file_to_string( 'W/tmp/tests/tests_teelaunch.txt' ), 'teelaunch: reading W/tmp/tests/tests_teelaunch.txt is Hi!\n' ) ;
+ is( 1, print( $tee "Hoo\n" ), 'teelaunch: write Hoo') ;
+ is( "Hi!\nHoo\n", file_to_string( 'W/tmp/tests/tests_teelaunch.txt' ), 'teelaunch: reading W/tmp/tests/tests_teelaunch.txt is Hi!\nHoo\n' ) ;
+
+ note( 'Leaving tests_teelaunch()' ) ;
+ return ;
+}
+
+sub teelaunch
+{
+ my $mysync = shift ;
+
+ if ( ! defined( $mysync ) )
+ {
+ return ;
+ }
+
+ my $logfile = $mysync->{logfile} ;
+
+ if ( ! $logfile )
+ {
+ return ;
+ }
+
+ logfileprepa( $logfile ) || croak "Error no valid directory to write log file $logfile : $OS_ERROR" ;
+
+ # This is a log file opened during the whole sync
+ ## no critic (InputOutput::RequireBriefOpen)
+ open my $logfile_handle, '>', $logfile
+ or croak( "Can not open $logfile for write: $OS_ERROR" ) ;
+ binmode $logfile_handle, ":encoding(UTF-8)" ;
+ my $tee = IO::Tee->new( $logfile_handle, \*STDOUT ) ;
+ $tee->autoflush( 1 ) ;
+ $mysync->{logfile_handle} = $logfile_handle ;
+ $mysync->{tee} = $tee ;
+ return $tee ;
+}
+
+sub getpwuid_any_os
+{
+ my $uid = shift ;
+
+ return( scalar getlogin ) if ( 'MSWin32' eq $OSNAME ) ; # Windows system
+ return( scalar getpwuid $uid ) ; # Unix system
+
+
+}
+
+sub simulong
+{
+ my $max_seconds = shift ;
+ my $division = 5 ;
+ my $last_count = $division * $max_seconds ;
+ foreach my $i ( 1 .. ( $last_count ) ) {
+ myprint( "Are you still here ETA: " . ($last_count - $i) . "/$last_count msgs left\n" ) ;
+ #myprint( "Are you still here ETA: " . ($last_count - $i) . "/$last_count msgs left\n" . ( "Ah" x 40 . "\n") x 4000 ) ;
+ sleep( 1 / $division ) ;
+ }
+
+ return ;
+}
+
+
+
+sub printenv
+{
+ myprint( "Environment variables listing:\n",
+ ( map { "$_ => $ENV{$_}\n" } sort keys %ENV),
+ "Environment variables listing end\n" ) ;
+ return ;
+}
+
+sub testsexit
+{
+ my $mysync = shift ;
+ if ( ! ( $mysync->{ tests } or $mysync->{ testsdebug } or $mysync->{ testsunit } ) ) {
+ return ;
+ }
+ my $test_builder = Test::More->builder ;
+ tests( $mysync ) ;
+ testsdebug( $mysync ) ;
+ testunitsession( $mysync ) ;
+
+ my @summary = $test_builder->summary() ;
+ my @details = $test_builder->details() ;
+ my $nb_tests_run = scalar( @summary ) ;
+ my $nb_tests_expected = $test_builder->expected_tests() ;
+ my $nb_tests_failed = count_0s( @summary ) ;
+ my $tests_failed = report_failures( @details ) ;
+ if ( $nb_tests_failed or ( $nb_tests_run != $nb_tests_expected ) ) {
+ #$test_builder->reset( ) ;
+ myprint( "Summary of tests: failed $nb_tests_failed tests, run $nb_tests_run tests, expected to run $nb_tests_expected tests.\n",
+ "List of failed tests:\n", $tests_failed ) ;
+ exit $EXIT_TESTS_FAILED ;
+ }
+
+ cleanup_mess_from_tests( ) ;
+ # Cover is larger with --tests --testslive
+ if ( ! $mysync->{ testslive } )
+ {
+ exit ;
+ }
+ return ;
+}
+
+sub cleanup_mess_from_tests
+{
+ undef @pipemess ;
+ return ;
+}
+
+sub after_get_options
+{
+ my $mysync = shift ;
+ my $numopt = shift ;
+
+
+ # exit with --help option or no option at all
+ $mysync->{ debug } and myprint( "numopt:$numopt\n" ) ;
+
+ if ( $help or not $numopt ) {
+ myprint( usage( $mysync ) ) ;
+ exit ;
+ }
+
+ return ;
+}
+
+sub tests_remove_edging_blanks
+{
+ note( 'Entering tests_remove_edging_blanks()' ) ;
+
+ is( undef, remove_edging_blanks( ), 'remove_edging_blanks: no args => undef' ) ;
+ is( 'abcd', remove_edging_blanks( 'abcd' ), 'remove_edging_blanks: abcd => abcd' ) ;
+ is( 'ab cd', remove_edging_blanks( ' ab cd ' ), 'remove_edging_blanks: " ab cd " => "ab cd"' ) ;
+
+ note( 'Leaving tests_remove_edging_blanks()' ) ;
+ return ;
+}
+
+
+
+sub remove_edging_blanks
+{
+ my $string = shift ;
+ if ( ! defined $string )
+ {
+ return ;
+ }
+ $string =~ s,^ +| +$,,g ;
+ return $string ;
+}
+
+
+sub tests_sanitize
+{
+ note( 'Entering tests_remove_edging_blanks()' ) ;
+
+ is( undef, sanitize( ), 'sanitize: no args => undef' ) ;
+ my $mysync = {} ;
+
+ $mysync->{ host1 } = ' example.com ' ;
+ $mysync->{ user1 } = ' to to ' ;
+ $mysync->{ password1 } = ' sex is good! ' ;
+ is( undef, sanitize( $mysync ), 'sanitize: => undef' ) ;
+ is( 'example.com', $mysync->{ host1 }, 'sanitize: host1 " example.com " => "example.com"' ) ;
+ is( 'to to', $mysync->{ user1 }, 'sanitize: user1 " to to " => "to to"' ) ;
+ is( 'sex is good!', $mysync->{ password1 }, 'sanitize: password1 " sex is good! " => "sex is good!"' ) ;
+ note( 'Leaving tests_remove_edging_blanks()' ) ;
+ return ;
+}
+
+
+sub sanitize
+{
+ my $mysync = shift ;
+ if ( ! defined $mysync )
+ {
+ return ;
+ }
+
+ foreach my $parameter ( qw( host1 host2 user1 user2 password1 password2 ) )
+ {
+ $mysync->{ $parameter } = remove_edging_blanks( $mysync->{ $parameter } ) ;
+ }
+ return ;
+}
+
+sub easyany
+{
+ my $mysync = shift ;
+
+ # Gmail
+ if ( $mysync->{gmail1} and $mysync->{gmail2} ) {
+ $mysync->{ debug } and myprint( "gmail1 gmail2\n") ;
+ gmail12( $mysync ) ;
+ return ;
+ }
+ if ( $mysync->{gmail1} ) {
+ $mysync->{ debug } and myprint( "gmail1\n" ) ;
+ gmail1( $mysync ) ;
+ }
+ if ( $mysync->{gmail2} ) {
+ $mysync->{ debug } and myprint( "gmail2\n" ) ;
+ gmail2( $mysync ) ;
+ }
+ # Office 365
+ if ( $mysync->{office1} ) {
+ office1( $mysync ) ;
+ }
+
+ if ( $mysync->{office2} ) {
+ office2( $mysync ) ;
+ }
+
+ # Exchange
+ if ( $mysync->{exchange1} ) {
+ exchange1( $mysync ) ;
+ }
+
+ if ( $mysync->{exchange2} ) {
+ exchange2( $mysync ) ;
+ }
+
+
+ # Domino
+ if ( $mysync->{domino1} ) {
+ domino1( $mysync ) ;
+ }
+
+ if ( $mysync->{domino2} ) {
+ domino2( $mysync ) ;
+ }
+
+ return ;
+}
+
+# From and for https://imapsync.lamiral.info/FAQ.d/FAQ.Gmail.txt
+sub gmail12
+{
+ my $mysync = shift ;
+ # Gmail at host1 and host2
+ $mysync->{host1} ||= 'imap.gmail.com' ;
+ $mysync->{ssl1} = ( defined $mysync->{ssl1} ) ? $mysync->{ssl1} : 1 ;
+ $mysync->{host2} ||= 'imap.gmail.com' ;
+ $mysync->{ssl2} = ( defined $mysync->{ssl2} ) ? $mysync->{ssl2} : 1 ;
+ $mysync->{maxbytespersecond} ||= 20_000 ; # should be 10_000 when computed from Gmail documentation
+ $mysync->{maxbytesafter} ||= 1_000_000_000 ;
+ $mysync->{automap} = ( defined $mysync->{automap} ) ? $mysync->{automap} : 1 ;
+ $mysync->{maxsleep} = ( defined $mysync->{maxsleep} ) ? $mysync->{maxsleep} : $MAX_SLEEP ; ;
+ $skipcrossduplicates = ( defined $skipcrossduplicates ) ? $skipcrossduplicates : 0 ;
+ $mysync->{ synclabels } = ( defined $mysync->{ synclabels } ) ? $mysync->{ synclabels } : 1 ;
+ $mysync->{ resynclabels } = ( defined $mysync->{ resynclabels } ) ? $mysync->{ resynclabels } : 1 ;
+ push @exclude, '\[Gmail\]$' ;
+ push @folderlast, '[Gmail]/All Mail' ;
+ return ;
+}
+
+
+sub gmail1
+{
+ my $mysync = shift ;
+ # Gmail at host2
+ $mysync->{host1} ||= 'imap.gmail.com' ;
+ $mysync->{ssl1} = ( defined $mysync->{ssl1} ) ? $mysync->{ssl1} : 1 ;
+ $mysync->{maxbytespersecond} ||= 40_000 ; # should be 20_000 computed from by Gmail documentation
+ $mysync->{maxbytesafter} ||= 2_500_000_000 ;
+ $mysync->{automap} = ( defined $mysync->{automap} ) ? $mysync->{automap} : 1 ;
+ $mysync->{maxsleep} = ( defined $mysync->{maxsleep} ) ? $mysync->{maxsleep} : $MAX_SLEEP ; ;
+ $skipcrossduplicates = ( defined $skipcrossduplicates ) ? $skipcrossduplicates : 1 ;
+
+ push @useheader, 'X-Gmail-Received', 'Message-Id' ;
+ push @{ $mysync->{ regextrans2 } }, 's,\[Gmail\].,,' ;
+ push @folderlast, '[Gmail]/All Mail' ;
+ return ;
+}
+
+sub gmail2
+{
+ my $mysync = shift ;
+ # Gmail at host2
+ $mysync->{host2} ||= 'imap.gmail.com' ;
+ $mysync->{ssl2} = ( defined $mysync->{ssl2} ) ? $mysync->{ssl2} : 1 ;
+ $mysync->{maxbytespersecond} ||= 20_000 ; # should be 10_000 computed from by Gmail documentation
+ $mysync->{maxbytesafter} ||= 1_000_000_000 ; # In fact it is documented as half: 500_000_000
+
+ $mysync->{automap} = ( defined $mysync->{automap} ) ? $mysync->{automap} : 1 ;
+ #$skipcrossduplicates = ( defined $skipcrossduplicates ) ? $skipcrossduplicates : 1 ;
+ $mysync->{ expunge1 } = ( defined $mysync->{ expunge1 } ) ? $mysync->{ expunge1 } : 1 ;
+ $mysync->{addheader} = ( defined $mysync->{addheader} ) ? $mysync->{addheader} : 1 ;
+ $mysync->{maxsleep} = ( defined $mysync->{maxsleep} ) ? $mysync->{maxsleep} : $MAX_SLEEP ; ;
+
+ $mysync->{maxsize} = ( defined $mysync->{maxsize} ) ? $mysync->{maxsize} : $GMAIL_MAXSIZE ;
+
+ if ( ! $mysync->{noexclude} ) {
+ push @exclude, '\[Gmail\]$' ;
+ }
+ push @useheader, 'Message-Id' ;
+ push @{ $mysync->{ regextrans2 } }, 's,\[Gmail\].,,' ;
+
+ # push @{ $mysync->{ regextrans2 } }, 's/[ ]+/_/g' ; # is now replaced
+ # by the two more specific following regexes,
+ # they remove just the beginning and trailing blanks, not all.
+ push @{ $mysync->{ regextrans2 } }, 's,^ +| +$,,g' ;
+ push @{ $mysync->{ regextrans2 } }, 's,/ +| +/,/,g' ;
+ #
+ push @{ $mysync->{ regextrans2 } }, q{s/['\\^"]/_/g} ; # Verified this
+ push @folderlast, '[Gmail]/All Mail' ;
+ return ;
+}
+
+
+# From https://imapsync.lamiral.info/FAQ.d/FAQ.Exchange.txt
+sub office1
+{
+ # Office 365 at host1
+ my $mysync = shift ;
+
+ output( $mysync, q{Option --office1 is like: --host1 outlook.office365.com --ssl1 --exclude "^Files$"} . "\n" ) ;
+ output( $mysync, "Option --office1 (cont) : unless overrided with --host1 otherhost --nossl1 --noexclude\n" ) ;
+ $mysync->{host1} ||= 'outlook.office365.com' ;
+ $mysync->{ssl1} = ( defined $mysync->{ssl1} ) ? $mysync->{ssl1} : 1 ;
+ if ( ! $mysync->{noexclude} ) {
+ push @exclude, '^Files$' ;
+ }
+ return ;
+}
+
+
+sub office2
+{
+ # Office 365 at host2
+ my $mysync = shift ;
+ output( $mysync, qq{Option --office2 is like: --host2 outlook.office365.com --ssl2 --maxsize 45_000_000 --maxmessagespersecond 4\n} ) ;
+ output( $mysync, qq{Option --office2 (cont) : --disarmreadreceipts --regexmess "wrap 10500" --f1f2 "Files=Files_renamed_by_imapsync"\n} ) ;
+ output( $mysync, qq{Option --office2 (cont) : unless overrided with --host2 otherhost --nossl2 ... --nodisarmreadreceipts --noregexmess\n} ) ;
+ output( $mysync, qq{Option --office2 (cont) : and --nof1f2 to avoid Files folder renamed to Files_renamed_by_imapsync\n} ) ;
+ $mysync->{host2} ||= 'outlook.office365.com' ;
+ $mysync->{ssl2} = ( defined $mysync->{ssl2} ) ? $mysync->{ssl2} : 1 ;
+ $mysync->{ maxsize } ||= 45_000_000 ;
+ $mysync->{maxmessagespersecond} ||= 4 ;
+ #push @regexflag, 's/\\\\Flagged//g' ; # No problem without! tested 2018_09_10
+ $disarmreadreceipts = ( defined $disarmreadreceipts ) ? $disarmreadreceipts : 1 ;
+ # I dislike double negation but here is one
+ if ( ! $mysync->{noregexmess} )
+ {
+ push @regexmess, 's,(.{10239}),$1\r\n,g' ;
+ }
+ # and another...
+ if ( ! $mysync->{nof1f2} )
+ {
+ push @{ $mysync->{f1f2} }, 'Files=Files_renamed_by_imapsync' ;
+ }
+ return ;
+}
+
+sub exchange1
+{
+ # Exchange 2010/2013 at host1
+ my $mysync = shift ;
+ output( $mysync, "Option --exchange1 does nothing (except printing this line...)\n" ) ;
+ # Well nothing to do so far
+ return ;
+}
+
+sub exchange2
+{
+ # Exchange 2010/2013 at host2
+ my $mysync = shift ;
+ output( $mysync, "Option --exchange2 is like: --maxsize 10_000_000 --maxmessagespersecond 4 --disarmreadreceipts\n" ) ;
+ output( $mysync, "Option --exchange2 (cont) : --regexflag del Flagged --regexmess wrap 10500\n" ) ;
+ output( $mysync, "Option --exchange2 (cont) : unless overrided with --maxsize xxx --nodisarmreadreceipts --noregexflag --noregexmess\n" ) ;
+ $mysync->{ maxsize } ||= 10_000_000 ;
+ $mysync->{maxmessagespersecond} ||= 4 ;
+ $disarmreadreceipts = ( defined $disarmreadreceipts ) ? $disarmreadreceipts : 1 ;
+ # I dislike double negation but here are two
+ if ( ! $mysync->{noregexflag} ) {
+ push @regexflag, 's/\\\\Flagged//g' ;
+ }
+ if ( ! $mysync->{noregexmess} ) {
+ push @regexmess, 's,(.{10239}),$1\r\n,g' ;
+ }
+ return ;
+}
+
+sub domino1
+{
+ # Domino at host1
+ my $mysync = shift ;
+
+ $mysync->{ sep1 } = q{\\} ;
+ $prefix1 = q{} ;
+ $messageidnodomain = ( defined $messageidnodomain ) ? $messageidnodomain : 1 ;
+ return ;
+}
+
+sub domino2
+{
+ # Domino at host1
+ my $mysync = shift ;
+
+ $mysync->{ sep2 } = q{\\} ;
+ $prefix2 = q{} ;
+ $messageidnodomain = ( defined $messageidnodomain ) ? $messageidnodomain : 1 ;
+ push @{ $mysync->{ regextrans2 } }, 's,^Inbox\\\\(.*),$1,i' ;
+ return ;
+}
+
+
+sub tests_resolv
+{
+ note( 'Entering tests_resolv()' ) ;
+
+ # is( , resolv( ), 'resolv: => ' ) ;
+ is( undef, resolv( ), 'resolv: no args => undef' ) ;
+ is( undef, resolv( q{} ), 'resolv: empty string => undef' ) ;
+ is( undef, resolv( 'hostnotexist' ), 'resolv: hostnotexist => undef' ) ;
+ is( '127.0.0.1', resolv( '127.0.0.1' ), 'resolv: 127.0.0.1 => 127.0.0.1' ) ;
+ is( '127.0.0.1', resolv( 'localhost' ), 'resolv: localhost => 127.0.0.1' ) ;
+ is( '5.135.158.182', resolv( 'imapsync.lamiral.info' ), 'resolv: imapsync.lamiral.info => 5.135.158.182' ) ;
+
+ # ip6-localhost ( in /etc/hosts )
+ is( '::1', resolv( 'ip6-localhost' ), 'resolv: ip6-localhost => ::1' ) ;
+ is( '::1', resolv( '::1' ), 'resolv: ::1 => ::1' ) ;
+ # ks2
+ is( '2001:41d0:8:d8b6::1', resolv( '2001:41d0:8:d8b6::1' ), 'resolv: 2001:41d0:8:d8b6::1 => 2001:41d0:8:d8b6::1' ) ;
+ is( '2001:41d0:8:d8b6::1', resolv( 'ks2ipv6.lamiral.info' ), 'resolv: ks2ipv6.lamiral.info => 2001:41d0:8:d8b6::1' ) ;
+ # ks3
+ is( '2001:41d0:8:bebd::1', resolv( '2001:41d0:8:bebd::1' ), 'resolv: 2001:41d0:8:bebd::1 => 2001:41d0:8:bebd::1' ) ;
+ is( '2001:41d0:8:bebd::1', resolv( 'ks3ipv6.lamiral.info' ), 'resolv: ks3ipv6.lamiral.info => 2001:41d0:8:bebd::1' ) ;
+
+
+ note( 'Leaving tests_resolv()' ) ;
+ return ;
+}
+
+
+
+sub resolv
+{
+ my $host = shift @ARG ;
+
+ if ( ! $host ) { return ; }
+ my $addr ;
+ if ( defined &Socket::getaddrinfo ) {
+ $addr = resolv_with_getaddrinfo( $host ) ;
+ return( $addr ) ;
+ }
+
+
+
+ my $iaddr = inet_aton( $host ) ;
+ if ( ! $iaddr ) { return ; }
+ $addr = inet_ntoa( $iaddr ) ;
+
+ return $addr ;
+}
+
+sub resolv_with_getaddrinfo
+{
+ my $host = shift @ARG ;
+
+ if ( ! $host ) { return ; }
+
+ my ( $err_getaddrinfo, @res ) = Socket::getaddrinfo( $host, "", { socktype => Socket::SOCK_RAW } ) ;
+ if ( $err_getaddrinfo ) {
+ myprint( "Cannot getaddrinfo of $host: $err_getaddrinfo\n" ) ;
+ return ;
+ }
+
+ my @addr ;
+ while( my $ai = shift @res ) {
+ my ( $err_getnameinfo, $ipaddr ) = Socket::getnameinfo( $ai->{addr}, Socket::NI_NUMERICHOST(), Socket::NIx_NOSERV() ) ;
+ if ( $err_getnameinfo ) {
+ myprint( "Cannot getnameinfo of $host: $err_getnameinfo\n" ) ;
+ return ;
+ }
+ $sync->{ debug } and myprint( "$host => $ipaddr\n" ) ;
+ push @addr, $ipaddr ;
+ my $reverse ;
+ ( $err_getnameinfo, $reverse ) = Socket::getnameinfo( $ai->{addr}, 0, Socket::NIx_NOSERV() ) ;
+ $sync->{ debug } and myprint( "$host => $ipaddr => $reverse\n" ) ;
+ }
+
+ return $addr[0] ;
+}
+
+sub tests_resolvrev
+{
+ note( 'Entering tests_resolvrev()' ) ;
+
+ # is( , resolvrev( ), 'resolvrev: => ' ) ;
+ is( undef, resolvrev( ), 'resolvrev: no args => undef' ) ;
+ is( undef, resolvrev( q{} ), 'resolvrev: empty string => undef' ) ;
+ is( undef, resolvrev( 'hostnotexist' ), 'resolvrev: hostnotexist => undef' ) ;
+ is( 'localhost', resolvrev( '127.0.0.1' ), 'resolvrev: 127.0.0.1 => localhost' ) ;
+ is( 'localhost', resolvrev( 'localhost' ), 'resolvrev: localhost => localhost' ) ;
+ is( 'ks.lamiral.info', resolvrev( 'imapsync.lamiral.info' ), 'resolvrev: imapsync.lamiral.info => ks.lamiral.info' ) ;
+
+ # ip6-localhost ( in /etc/hosts )
+ is( 'ip6-localhost', resolvrev( 'ip6-localhost' ), 'resolvrev: ip6-localhost => ip6-localhost' ) ;
+ is( 'ip6-localhost', resolvrev( '::1' ), 'resolvrev: ::1 => ip6-localhost' ) ;
+ # ks2
+ is( 'ks2ipv6.lamiral.info', resolvrev( '2001:41d0:8:d8b6::1' ), 'resolvrev: 2001:41d0:8:d8b6::1 => ks2ipv6.lamiral.info' ) ;
+ is( 'ks2ipv6.lamiral.info', resolvrev( 'ks2ipv6.lamiral.info' ), 'resolvrev: ks2ipv6.lamiral.info => ks2ipv6.lamiral.info' ) ;
+ # ks3
+ is( 'ks3ipv6.lamiral.info', resolvrev( '2001:41d0:8:bebd::1' ), 'resolvrev: 2001:41d0:8:bebd::1 => ks3ipv6.lamiral.info' ) ;
+ is( 'ks3ipv6.lamiral.info', resolvrev( 'ks3ipv6.lamiral.info' ), 'resolvrev: ks3ipv6.lamiral.info => ks3ipv6.lamiral.info' ) ;
+
+
+ note( 'Leaving tests_resolvrev()' ) ;
+ return ;
+}
+
+sub resolvrev
+{
+ my $host = shift @ARG ;
+
+ if ( ! $host ) { return ; }
+
+ if ( defined &Socket::getaddrinfo ) {
+ my $name = resolvrev_with_getaddrinfo( $host ) ;
+ return( $name ) ;
+ }
+
+ return ;
+}
+
+sub resolvrev_with_getaddrinfo
+{
+ my $host = shift @ARG ;
+
+ if ( ! $host ) { return ; }
+
+ my ( $err, @res ) = Socket::getaddrinfo( $host, "", { socktype => Socket::SOCK_RAW } ) ;
+ if ( $err ) {
+ myprint( "Cannot getaddrinfo of $host: $err\n" ) ;
+ return ;
+ }
+
+ my @name ;
+ while( my $ai = shift @res ) {
+ my ( $err, $reverse ) = Socket::getnameinfo( $ai->{addr}, 0, Socket::NIx_NOSERV() ) ;
+ if ( $err ) {
+ myprint( "Cannot getnameinfo of $host: $err\n" ) ;
+ return ;
+ }
+ $sync->{ debug } and myprint( "$host => $reverse\n" ) ;
+ push @name, $reverse ;
+ }
+
+ return $name[0] ;
+}
+
+
+
+sub tests_imapsping
+{
+ note( 'Entering tests_imapsping()' ) ;
+
+ is( undef, imapsping( ), 'imapsping: no args => undef' ) ;
+ is( undef, imapsping( 'hostnotexist' ), 'imapsping: hostnotexist => undef' ) ;
+ is( 1, imapsping( 'imapsync.lamiral.info' ), 'imapsping: imapsync.lamiral.info => 1' ) ;
+ is( 1, imapsping( 'ks2ipv6.lamiral.info' ), 'imapsping: ks2ipv6.lamiral.info => 1' ) ;
+ note( 'Leaving tests_imapsping()' ) ;
+ return ;
+}
+
+sub imapsping
+{
+ my $host = shift ;
+ return tcpping( $host, $IMAP_SSL_PORT ) ;
+}
+
+sub tests_tcpping
+{
+ note( 'Entering tests_tcpping()' ) ;
+
+ is( undef, tcpping( ), 'tcpping: no args => undef' ) ;
+ is( undef, tcpping( 'hostnotexist' ), 'tcpping: one arg => undef' ) ;
+ is( undef, tcpping( undef, 888 ), 'tcpping: arg undef, port => undef' ) ;
+ is( undef, tcpping( 'hostnotexist', 993 ), 'tcpping: hostnotexist 993 => undef' ) ;
+ is( undef, tcpping( 'hostnotexist', 888 ), 'tcpping: hostnotexist 888 => undef' ) ;
+ is( 1, tcpping( 'imapsync.lamiral.info', 993 ), 'tcpping: imapsync.lamiral.info 993 => 1' ) ;
+ is( 0, tcpping( 'imapsync.lamiral.info', 888 ), 'tcpping: imapsync.lamiral.info 888 => 0' ) ;
+ is( 1, tcpping( '5.135.158.182', 993 ), 'tcpping: 5.135.158.182 993 => 1' ) ;
+ is( 0, tcpping( '5.135.158.182', 888 ), 'tcpping: 5.135.158.182 888 => 0' ) ;
+
+ # Net::Ping supports ipv6 only after release 1.50
+ # http://cpansearch.perl.org/src/RURBAN/Net-Ping-2.59/Changes
+ # Anyway I plan to avoid Net-Ping for that too long standing feature
+ # Net-Ping is integrated in Perl itself, who knows ipv6 for a long time
+ is( 1, tcpping( '2001:41d0:8:d8b6::1', 993 ), 'tcpping: 2001:41d0:8:d8b6::1 993 => 1' ) ;
+ is( 0, tcpping( '2001:41d0:8:d8b6::1', 888 ), 'tcpping: 2001:41d0:8:d8b6::1 888 => 0' ) ;
+
+ note( 'Leaving tests_tcpping()' ) ;
+ return ;
+}
+
+sub tcpping
+{
+ if ( 2 != scalar( @ARG ) ) {
+ return ;
+ }
+ my ( $host, $port ) = @ARG ;
+ if ( ! $host ) { return ; }
+ if ( ! $port ) { return ; }
+
+ my $mytimeout = $TCP_PING_TIMEOUT ;
+ require Net::Ping ;
+ #my $p = Net::Ping->new( 'tcp' ) ;
+ my $p = Net::Ping->new( ) ;
+ $p->{port_num} = $port ;
+ $p->service_check( 1 ) ;
+ $p->hires( 1 ) ;
+ my ($ping_ok, $rtt, $ip ) = $p->ping( $host, $mytimeout ) ;
+ if ( ! defined $ping_ok ) { return ; }
+ my $rtt_approx = sprintf( "%.3f", $rtt ) ;
+ $sync->{ debug } and myprint( "Host $host timeout $mytimeout port $port ok $ping_ok ip $ip acked in $rtt_approx s\n" ) ;
+ $p->close( ) ;
+ if( $ping_ok ) {
+ return 1 ;
+ }else{
+ return 0 ;
+ }
+}
+
+sub tests_sslcheck
+{
+ note( 'Entering tests_sslcheck()' ) ;
+
+ my $mysync ;
+
+ is( undef, sslcheck( $mysync ), 'sslcheck: no sslcheck => undef' ) ;
+
+ $mysync = {
+ sslcheck => 1,
+ } ;
+
+ is( 0, sslcheck( $mysync ), 'sslcheck: no host => 0' ) ;
+
+ $mysync = {
+ sslcheck => 1,
+ host1 => 'imapsync.lamiral.info',
+ tls1 => 1,
+ } ;
+
+ is( 0, sslcheck( $mysync ), 'sslcheck: tls1 => 0' ) ;
+
+ $mysync = {
+ sslcheck => 1,
+ host1 => 'imapsync.lamiral.info',
+ } ;
+
+
+ is( 1, sslcheck( $mysync ), 'sslcheck: imapsync.lamiral.info => 1' ) ;
+ is( 1, $mysync->{ssl1}, 'sslcheck: imapsync.lamiral.info => ssl1 1' ) ;
+
+ $mysync->{sslcheck} = 0 ;
+ is( undef, sslcheck( $mysync ), 'sslcheck: sslcheck off => undef' ) ;
+
+ $mysync = {
+ sslcheck => 1,
+ host1 => 'imapsync.lamiral.info',
+ host2 => 'test2.lamiral.info',
+ } ;
+
+ is( 2, sslcheck( $mysync ), 'sslcheck: imapsync.lamiral.info + test2.lamiral.info => 2' ) ;
+
+ $mysync = {
+ sslcheck => 1,
+ host1 => 'imapsync.lamiral.info',
+ host2 => 'test2.lamiral.info',
+ tls1 => 1,
+ } ;
+
+ is( 1, sslcheck( $mysync ), 'sslcheck: imapsync.lamiral.info + test2.lamiral.info + tls1 => 1' ) ;
+
+ note( 'Leaving tests_sslcheck()' ) ;
+ return ;
+}
+
+sub sslcheck
+{
+ my $mysync = shift ;
+
+ if ( ! $mysync->{sslcheck} ) {
+ return ;
+ }
+ my $nb_on = 0 ;
+ $mysync->{ debug } and myprint( "sslcheck\n" ) ;
+ if (
+ ( ! defined $mysync->{port1} )
+ and
+ ( ! defined $mysync->{tls1} )
+ and
+ ( ! defined $mysync->{ssl1} )
+ and
+ ( defined $mysync->{host1} )
+ ) {
+ myprint( "Host1: probing ssl on port $IMAP_SSL_PORT ( use --nosslcheck to avoid this ssl probe ) \n" ) ;
+ if ( probe_imapssl( $mysync->{host1} ) ) {
+ $mysync->{ssl1} = 1 ;
+ myprint( "Host1: sslcheck detected open ssl port $IMAP_SSL_PORT so turning ssl on (use --nossl1 --notls1 to turn off SSL and TLS wizardry)\n" ) ;
+ $nb_on++ ;
+ }else{
+ myprint( "Host1: sslcheck did not detected open ssl port $IMAP_SSL_PORT. Will use standard $IMAP_PORT port.\n" ) ;
+ }
+ }
+
+ if (
+ ( ! defined $mysync->{port2} )
+ and
+ ( ! defined $mysync->{tls2} )
+ and
+ ( ! defined $mysync->{ssl2} )
+ and
+ ( defined $mysync->{host2} )
+ ) {
+ myprint( "Host2: probing ssl on port $IMAP_SSL_PORT ( use --nosslcheck to avoid this ssl probe ) \n" ) ;
+ if ( probe_imapssl( $mysync->{host2} ) ) {
+ $mysync->{ssl2} = 1 ;
+ myprint( "Host2: sslcheck detected open ssl port $IMAP_SSL_PORT so turning ssl on (use --nossl2 --notls2 to turn off SSL and TLS wizardry)\n" ) ;
+ $nb_on++ ;
+ }else{
+ myprint( "Host2: sslcheck did not detected open ssl port $IMAP_SSL_PORT. Will use standard $IMAP_PORT port.\n" ) ;
+ }
+ }
+ return $nb_on ;
+}
+
+
+sub testslive_init
+{
+ my $mysync = shift ;
+ $mysync->{host1} ||= 'test1.lamiral.info' ;
+ $mysync->{user1} ||= 'test1' ;
+ $mysync->{password1} ||= 'secret1' ;
+ $mysync->{host2} ||= 'test2.lamiral.info' ;
+ $mysync->{user2} ||= 'test2' ;
+ $mysync->{password2} ||= 'secret2' ;
+ return ;
+}
+
+sub testslive6_init
+{
+ my $mysync = shift ;
+ $mysync->{host1} ||= 'ks2ipv6.lamiral.info' ;
+ $mysync->{user1} ||= 'test1' ;
+ $mysync->{password1} ||= 'secret1' ;
+ $mysync->{host2} ||= 'ks2ipv6.lamiral.info' ;
+ $mysync->{user2} ||= 'test2' ;
+ $mysync->{password2} ||= 'secret2' ;
+ return ;
+}
+
+
+sub tests_backslash_caret
+{
+ note( 'Entering tests_backslash_caret()' ) ;
+
+ is( "lalala", backslash_caret( "lalala" ), 'backslash_caret: lalala => lalala' ) ;
+ is( "lalala\n", backslash_caret( "lalala\n" ), 'backslash_caret: lalala => lalala 2nd' ) ;
+ is( '^', backslash_caret( '\\' ), 'backslash_caret: \\ => ^' ) ;
+ is( "^\n", backslash_caret( "\\\n" ), 'backslash_caret: \\ => ^' ) ;
+ is( "\\lalala", backslash_caret( "\\lalala" ), 'backslash_caret: \\lalala => \\lalala' ) ;
+ is( "\\lal\\ala", backslash_caret( "\\lal\\ala" ), 'backslash_caret: \\lal\\ala => \\lal\\ala' ) ;
+ is( "\\lalala\n", backslash_caret( "\\lalala\n" ), 'backslash_caret: \\lalala => \\lalala 2nd' ) ;
+ is( "lalala^\n", backslash_caret( "lalala\\\n" ), 'backslash_caret: lalala\\\n => lalala^\n' ) ;
+ is( "lalala^\nlalala^\n", backslash_caret( "lalala\\\nlalala\\\n" ), 'backslash_caret: lalala\\\nlalala\\\n => lalala^\nlalala^\n' ) ;
+ is( "lal\\ala^\nlalala^\n", backslash_caret( "lal\\ala\\\nlalala\\\n" ), 'backslash_caret: lal\\ala\\\nlalala\\\n => lal\\ala^\nlalala^\n' ) ;
+
+ note( 'Leaving tests_backslash_caret()' ) ;
+ return ;
+}
+
+sub backslash_caret
+{
+ my $string = shift ;
+
+ $string =~ s{\\ $ }{^}gxms ;
+
+ return $string ;
+}
+
+sub tests_split_around_equal
+{
+ note( 'Entering tests_split_around_equal()' ) ;
+
+ is( undef, split_around_equal( ), 'split_around_equal: no args => undef' ) ;
+ is_deeply( { toto => 'titi' }, { split_around_equal( 'toto=titi' ) }, 'split_around_equal: toto=titi => toto => titi' ) ;
+ is_deeply( { A => 'B', C => 'D' }, { split_around_equal( 'A=B=C=D' ) }, 'split_around_equal: toto=titi => toto => titi' ) ;
+ is_deeply( { A => 'B', C => 'D' }, { split_around_equal( 'A=B', 'C=D' ) }, 'split_around_equal: A=B C=D => A => B, C=>D' ) ;
+
+ note( 'Leaving tests_split_around_equal()' ) ;
+ return ;
+}
+
+sub split_around_equal
+{
+ if ( ! @ARG ) { return ; } ;
+ return map { split /=/mxs, $_ } @ARG ;
+
+}
+
+
+
+sub tests_sig_install
+{
+ note( 'Entering tests_sig_install()' ) ;
+
+ my $mysync ;
+ is( undef, sig_install( ), 'sig_install: no args => undef' ) ;
+ is( undef, sig_install( $mysync ), 'sig_install: arg undef => undef' ) ;
+ $mysync = { } ;
+ is( undef, sig_install( $mysync ), 'sig_install: empty hash => undef' ) ;
+
+ SKIP: {
+ Readonly my $SKIP_15 => 15 ;
+ if ( 'MSWin32' eq $OSNAME ) { skip( 'Tests only for Unix', $SKIP_15 ) ; }
+ # Default to ignore USR1 USR2 in case future install fails
+ local $SIG{ USR1 } = local $SIG{ USR2 } = sub { } ;
+ kill( 'USR1', $PROCESS_ID ) ;
+
+ $mysync->{ debugsig } = 1 ;
+ # Assign USR1 to call sub tototo
+ # Surely a better value than undef should be returned when doing real signal stuff
+ is( undef, sig_install( $mysync, 'tototo', 'USR1' ), 'sig_install: USR1 tototo' ) ;
+
+ is( 1, kill( 'USR1', $PROCESS_ID ), 'sig_install: kill USR1 myself 1' ) ;
+ is( 1, $mysync->{ tototo_calls }, 'sig_install: tototo call nb 1' ) ;
+
+ #return ;
+ # Assign USR2 to call sub tototo
+ is( undef, sig_install( $mysync, 'tototo', 'USR2' ), 'sig_install: USR2 tototo' ) ;
+
+ is( 1, kill( 'USR2', $PROCESS_ID ), 'sig_install: kill USR2 myself 1' ) ;
+ is( 2, $mysync->{ tototo_calls }, 'sig_install: tototo call nb 2' ) ;
+
+ is( 1, kill( 'USR1', $PROCESS_ID ), 'sig_install: kill USR1 myself 2' ) ;
+ is( 3, $mysync->{ tototo_calls }, 'sig_install: tototo call nb 3' ) ;
+
+
+ local $SIG{ USR1 } = local $SIG{ USR2 } = sub { } ;
+ is( 1, kill( 'USR1', $PROCESS_ID ), 'sig_install: kill USR1 myself 3' ) ;
+ is( 3, $mysync->{ tototo_calls }, 'sig_install: tototo call still nb 3' ) ;
+
+ # Assign USR1 + USR2 to call sub tototo
+ is( undef, sig_install( $mysync, 'tototo', 'USR1', 'USR2' ), 'sig_install: USR1 USR2 tototo' ) ;
+ is( 1, kill( 'USR1', $PROCESS_ID ), 'sig_install: kill USR1 myself 4' ) ;
+ is( 4, $mysync->{ tototo_calls }, 'sig_install: tototo call now nb 4' ) ;
+
+ is( 1, kill( 'USR2', $PROCESS_ID ), 'sig_install: kill USR1 myself 2' ) ;
+ is( 5, $mysync->{ tototo_calls }, 'sig_install: tototo call now nb 5' ) ;
+ }
+
+
+ note( 'Leaving tests_sig_install()' ) ;
+ return ;
+}
+
+
+#
+sub sig_install
+{
+ my $mysync = shift ;
+ if ( ! $mysync ) { return ; }
+ my $mysubname = shift ;
+ if ( ! $mysubname ) { return ; }
+
+ if ( ! @ARG ) { return ; }
+
+ my @signals = @ARG ;
+
+ my $mysub = \&$mysubname ;
+ #$mysync->{ debugsig } = 1 ;
+ $mysync->{ debugsig } and myprint( "In sig_install with sub $mysubname and signal @ARG\n" ) ;
+
+ my $subsignal = sub {
+ my $signame = shift ;
+ $mysync->{ debugsig } and myprint( "In subsignal with $signame and $mysubname\n" ) ;
+ &$mysub( $mysync, $signame ) ;
+ } ;
+
+ foreach my $signal ( @signals ) {
+ $mysync->{ debugsig } and myprint( "Installing signal $signal to call sub $mysubname\n") ;
+ output( $mysync, "kill -$signal $PROCESS_ID # special behavior: call to sub $mysubname\n" ) ;
+ ## no critic (RequireLocalizedPunctuationVars)
+ $SIG{ $signal } = $subsignal ;
+ }
+ return ;
+}
+
+
+sub tototo
+{
+ my $mysync = shift ;
+ myprint("In tototo with @ARG\n" ) ;
+ $mysync->{ tototo_calls } += 1 ;
+ return ;
+}
+
+sub mygetppid
+{
+ if ( 'MSWin32' eq $OSNAME ) {
+ return( 'unknown under MSWin32 (too complicated)' ) ;
+ } else {
+ # Unix
+ return( getppid( ) ) ;
+ }
+}
+
+
+
+sub tests_toggle_sleep
+{
+ note( 'Entering tests_toggle_sleep()' ) ;
+
+ is( undef, toggle_sleep( ), 'toggle_sleep: no args => undef' ) ;
+ my $mysync ;
+ is( undef, toggle_sleep( $mysync ), 'toggle_sleep: undef => undef' ) ;
+ $mysync = { } ;
+ is( undef, toggle_sleep( $mysync ), 'toggle_sleep: no maxsleep => undef' ) ;
+
+ $mysync->{maxsleep} = 3 ;
+ is( 0, toggle_sleep( $mysync ), 'toggle_sleep: 3 => 0' ) ;
+
+ is( $MAX_SLEEP, toggle_sleep( $mysync ), "toggle_sleep: 0 => $MAX_SLEEP" ) ;
+ is( 0, toggle_sleep( $mysync ), "toggle_sleep: $MAX_SLEEP => 0" ) ;
+ is( $MAX_SLEEP, toggle_sleep( $mysync ), "toggle_sleep: 0 => $MAX_SLEEP" ) ;
+ is( 0, toggle_sleep( $mysync ), "toggle_sleep: $MAX_SLEEP => 0" ) ;
+
+ SKIP: {
+ Readonly my $SKIP_9 => 9 ;
+ if ( 'MSWin32' eq $OSNAME ) { skip( 'Tests only for Unix', $SKIP_9 ) ; }
+ # Default to ignore USR1 USR2 in case future install fails
+ local $SIG{ USR1 } = sub { } ;
+ kill( 'USR1', $PROCESS_ID ) ;
+
+ $mysync->{ debugsig } = 1 ;
+ # Assign USR1 to call sub toggle_sleep
+ is( undef, sig_install( $mysync, \&toggle_sleep, 'USR1' ), 'toggle_sleep: install USR1 toggle_sleep' ) ;
+
+
+ $mysync->{maxsleep} = 4 ;
+ is( 1, kill( 'USR1', $PROCESS_ID ), 'toggle_sleep: kill USR1 myself' ) ;
+ is( 0, $mysync->{ maxsleep }, 'toggle_sleep: toggle_sleep called => sleeps are 0s' ) ;
+
+ is( 1, kill( 'USR1', $PROCESS_ID ), 'toggle_sleep: kill USR1 myself again' ) ;
+ is( $MAX_SLEEP, $mysync->{ maxsleep }, "toggle_sleep: toggle_sleep called => sleeps are ${MAX_SLEEP}s" ) ;
+
+ is( 1, kill( 'USR1', $PROCESS_ID ), 'toggle_sleep: kill USR1 myself' ) ;
+ is( 0, $mysync->{ maxsleep }, 'toggle_sleep: toggle_sleep called => sleeps are 0s' ) ;
+
+ is( 1, kill( 'USR1', $PROCESS_ID ), 'toggle_sleep: kill USR1 myself again' ) ;
+ is( $MAX_SLEEP, $mysync->{ maxsleep }, "toggle_sleep: toggle_sleep called => sleeps are ${MAX_SLEEP}s" ) ;
+ }
+
+ note( 'Leaving tests_toggle_sleep()' ) ;
+ return ;
+}
+
+
+sub toggle_sleep
+{
+ my $mysync = shift ;
+
+ myprint("In toggle_sleep with @ARG\n" ) ;
+
+ if ( !defined( $mysync ) ) { return ; }
+ if ( !defined( $mysync->{maxsleep} ) ) { return ; }
+
+ $mysync->{ maxsleep } = max( 0, $MAX_SLEEP - $mysync->{maxsleep} ) ;
+ myprint("Resetting maxsleep to ", $mysync->{maxsleep}, "s\n" ) ;
+ return $mysync->{maxsleep} ;
+}
+
+sub mypod2usage
+{
+ my $fh_pod2usage = shift ;
+
+ pod2usage(
+ -exitval => 'NOEXIT',
+ -noperldoc => 1,
+ -verbose => 99,
+ -sections => [ qw(NAME VERSION USAGE OPTIONS) ],
+ -indent => 1,
+ -loose => 1,
+ -output => $fh_pod2usage,
+ ) ;
+
+ return ;
+}
+
+sub usage
+{
+ my $mysync = shift ;
+
+ if ( ! defined $mysync ) { return ; }
+
+ my $usage = q{} ;
+ my $usage_from_pod ;
+ my $usage_footer = usage_footer( $mysync ) ;
+
+ # pod2usage writes on a filehandle only and I want a variable
+ open my $fh_pod2usage, ">", \$usage_from_pod
+ or do { warn $OS_ERROR ; return ; } ;
+ mypod2usage( $fh_pod2usage ) ;
+ close $fh_pod2usage ;
+
+ if ( 'MSWin32' eq $OSNAME ) {
+ $usage_from_pod = backslash_caret( $usage_from_pod ) ;
+ }
+ $usage = join( q{}, $usage_from_pod, $usage_footer ) ;
+
+ return( $usage ) ;
+}
+
+sub tests_usage
+{
+ note( 'Entering tests_usage()' ) ;
+
+ my $usage ;
+ like( $usage = usage( $sync ), qr/Name:/, 'usage: contains Name:' ) ;
+ myprint( $usage ) ;
+ like( $usage, qr/Version:/, 'usage: contains Version:' ) ;
+ like( $usage, qr/Usage:/, 'usage: contains Usage:' ) ;
+ like( $usage, qr/imapsync/, 'usage: contains imapsync' ) ;
+
+ is( undef, usage( ), 'usage: no args => undef' ) ;
+
+ note( 'Leaving tests_usage()' ) ;
+ return ;
+}
+
+
+sub usage_footer
+{
+ my $mysync = shift ;
+
+ my $footer = q{} ;
+
+ my $localhost_info = localhost_info( $mysync ) ;
+ my $rcs = $mysync->{rcs} ;
+ my $homepage = homepage( ) ;
+
+ my $imapsync_release = $STR_use_releasecheck ;
+
+ if ( $mysync->{releasecheck} ) {
+ $imapsync_release = check_last_release( ) ;
+ }
+
+ $footer = qq{$localhost_info
+$rcs
+$imapsync_release
+$homepage
+} ;
+ return( $footer ) ;
+}
+
+
+
+sub usage_complete
+{
+ # Unused, I guess this function could be deleted
+ my $usage = <<'EOF' ;
+--skipheader reg : Don't take into account header keyword
+ matching reg ex: --skipheader 'X.*'
+
+--skipsize : Don't take message size into account to compare
+ messages on both sides. On by default.
+ Use --no-skipsize for using size comparaison.
+--allowsizemismatch : allow RFC822.SIZE != fetched msg size
+ consider also --skipsize to avoid duplicate messages
+ when running syncs more than one time per mailbox
+
+--reconnectretry1 int : reconnect to host1 if connection is lost up to
+ int times per imap command (default is 3)
+--reconnectretry2 int : same as --reconnectretry1 but for host2
+--split1 int : split the requests in several parts on host1.
+ int is the number of messages handled per request.
+ default is like --split1 100.
+--split2 int : same thing on host2.
+--nofixInboxINBOX : Don't fix Inbox INBOX mapping.
+EOF
+ return( $usage ) ;
+}
+
+sub myGetOptions
+{
+
+ # Started as a copy of Luke Ross Getopt::Long::CGI
+ # https://metacpan.org/release/Getopt-Long-CGI
+ # So this sub function is under the same license as Getopt-Long-CGI Luke Ross wants it,
+ # which was Perl 5.6 or later licenses at the date of the copy.
+
+ my $mysync = shift @ARG ;
+ my $arguments_ref = shift @ARG ;
+ my %options = @ARG ;
+
+ my $mycgi = $mysync->{cgi} ;
+
+ if ( not under_cgi_context() ) {
+
+ # Not CGI - pass upstream for normal command line handling
+ return Getopt::Long::GetOptionsFromArray( $arguments_ref, %options ) ;
+ }
+
+ # We must be in CGI context now
+ if ( ! defined( $mycgi ) ) { return ; }
+
+ my $badthings = 0 ;
+ foreach my $key ( sort keys %options ) {
+ my $val = $options{$key} ;
+
+ if ( $key !~ m/^([\w\d\|]+)([=:][isf])?([\+!\@\%])?$/mxs ) {
+ $badthings++ ;
+ next ; # Unknown item
+ }
+
+ my $name = [ split '|', $1, 1 ]->[0] ;
+
+ if ( ( $3 || q{} ) eq '+' ) {
+ ${$val} = $mycgi->param( $name ) ; # "Incremental" integer
+ }
+ elsif ( $2 ) {
+ my @values = $mycgi->multi_param( $name ) ;
+ my $type = $2 ;
+
+ #myprint( "type[$type]values[@values]\$3[", $3 || q{}, "]val[$val]ref(val)[", ref($val), "]\n" ) ;
+ if ( ( $3 || q{} ) eq '%' or ref( $val ) eq 'HASH' ) {
+ my %values = map { split /=/mxs, $_ } @values ;
+
+ if ( $type =~ m/i$/mxs ) {
+ foreach my $k ( keys %values ) {
+ $values{$k} = int $values{$k} ;
+ }
+ }
+ elsif ( $type =~ m/f$/mxs ) {
+ foreach my $k ( keys %values ) {
+ $values{$k} = 0 + $values{$k};
+ }
+ }
+ if ( 'REF' eq ref $val ) {
+ %{ ${$val} } = %values ;
+ }
+ else {
+ %{$val} = %values ;
+ }
+ }
+ else {
+ if ( $type =~ m/i$/mxs ) {
+ @values = map { q{} ne $_ ? int $_ : undef } @values ;
+ }
+ elsif ( $type =~ m/f$/mxs ) {
+ @values = map { 0 + $_ } @values ;
+ }
+ if ( ( $3 || q{} ) eq '@' ) {
+ @{ ${$val} } = @values ;
+ my @option = map { +( "--$name", "$_" ) } @values ;
+ push @{ $mysync->{ cmdcgi } }, @option ;
+ }
+ elsif ( ref( $val ) eq 'ARRAY' ) {
+ @{$val} = @values ;
+ }
+ elsif ( my $value = $values[0] )
+ {
+ ${$val} = $value ;
+ push @{ $mysync->{ cmdcgi } }, "--$name", $value ;
+ }
+ else
+ {
+
+ }
+ }
+ }
+ else
+ {
+ # Checkbox
+ # Considers only --name
+ # Should consider also --no-name and --noname
+ my $value = $mycgi->param( $name ) ;
+ if ( $value )
+ {
+ ${$val} = 1 ;
+ push @{ $mysync->{ cmdcgi } }, "--$name" ;
+ }
+ else
+ {
+ ${$val} = undef ;
+ }
+ }
+ }
+ if ( $badthings ) {
+ return ; # undef or ()
+ }
+ else {
+ return ( 1 ) ;
+ }
+}
+
+
+my @blabla ; # just used to check get_options_cgi() with an array
+
+sub tests_get_options_cgi_context
+{
+ note( 'Entering tests_get_options_cgi()' ) ;
+
+# Temporary, have to think harder about testing CGI context in command line --tests
+ # API:
+ # * input arguments: two ways, command line or CGI
+ # * the program arguments
+ # * QUERY_STRING env variable
+ # * return
+ # * QUERY_STRING length
+
+ # CGI context
+ local $ENV{SERVER_SOFTWARE} = 'Votre serviteur' ;
+
+ # Real full test
+ # = 'host1=test1.lamiral.info&user1=test1&password1=secret1&host2=test2.lamiral.info&user2=test2&password2=secret2&debugenv=on'
+ my $mysync ;
+ is( undef, get_options( $mysync ), 'get_options cgi context: no CGI module => undef' ) ;
+
+ require CGI ;
+ CGI->import( qw( -no_debug -utf8 ) ) ;
+
+ is( undef, get_options( $mysync ), 'get_options cgi context: no CGI param => undef' ) ;
+ # Testing boolean
+ $mysync->{cgi} = CGI->new( 'version=on&debugenv=on' ) ;
+ local $ENV{'QUERY_STRING'} = 'version=on&debugenv=on' ;
+ is( 22, get_options( $mysync ), 'get_options cgi context: QUERY_STRING => 22' ) ;
+ is( 1, $mysync->{ version }, 'get_options cgi context: --version => 1' ) ;
+ # debugenv is not allowed in cgi context
+ is( undef, $mysync->{debugenv}, 'get_options cgi context: $mysync->{debugenv} => undef' ) ;
+
+ # QUERY_STRING in this test is only for return value of get_options
+ # Have to think harder, GET/POST context, is this return value a good thing?
+ local $ENV{'QUERY_STRING'} = 'host1=test1.lamiral.info&user1=test1' ;
+ $mysync->{cgi} = CGI->new( 'host1=test1.lamiral.info&user1=test1' ) ;
+ is( 36, get_options( $mysync, ), 'get_options cgi context: QUERY_STRING => 36' ) ;
+ is( 'test1', $mysync->{user1}, 'get_options cgi context: $mysync->{user1} => test1' ) ;
+ #local $ENV{'QUERY_STRING'} = undef ;
+
+ # Testing @
+ $mysync->{cgi} = CGI->new( 'blabla=fd1' ) ;
+ get_options( $mysync ) ;
+ is_deeply( [ 'fd1' ], [ @blabla ], 'get_options cgi context: @blabla => fd1' ) ;
+ $mysync->{cgi} = CGI->new( 'blabla=fd1&blabla=fd2' ) ;
+ get_options( $mysync ) ;
+ is_deeply( [ 'fd1', 'fd2' ], [ @blabla ], 'get_options cgi context: @blabla => fd1, fd2' ) ;
+
+ # Testing s@ as ref
+ $mysync->{cgi} = CGI->new( 'folder=fd1' ) ;
+ get_options( $mysync ) ;
+ is_deeply( [ 'fd1' ], $mysync->{ folder }, 'get_options cgi context: $mysync->{ folder } => fd1' ) ;
+ $mysync->{cgi} = CGI->new( 'folder=fd1&folder=fd2' ) ;
+ get_options( $mysync ) ;
+ is_deeply( [ 'fd1', 'fd2' ], $mysync->{ folder }, 'get_options cgi context: $mysync->{ folder } => fd1, fd2' ) ;
+
+ # Testing %
+ $mysync->{cgi} = CGI->new( 'f1f2h=s1=d1&f1f2h=s2=d2&f1f2h=s3=d3' ) ;
+ get_options( $mysync ) ;
+
+ is_deeply( { 's1' => 'd1', 's2' => 'd2', 's3' => 'd3' },
+ $mysync->{f1f2h}, 'get_options cgi context: f1f2h => s1=d1 s2=d2 s3=d3' ) ;
+
+ # Testing boolean ! with --noxxx, doesnot work
+ $mysync->{cgi} = CGI->new( 'nodry=on' ) ;
+ get_options( $mysync ) ;
+ is( undef, $mysync->{dry}, 'get_options cgi context: --nodry => $mysync->{dry} => undef' ) ;
+
+ $mysync->{cgi} = CGI->new( 'host1=example.com' ) ;
+ get_options( $mysync ) ;
+ is( 'example.com', $mysync->{host1}, 'get_options cgi context: --host1=example.com => $mysync->{host1} => example.com' ) ;
+
+ #myprint( Data::Dumper->Dump( [ $mysync ] ) ) ;
+ $mysync->{cgi} = CGI->new( 'simulong=' ) ;
+ get_options( $mysync ) ;
+ is( undef, $mysync->{simulong}, 'get_options cgi context: --simulong= => $mysync->{simulong} => undef' ) ;
+
+ $mysync->{cgi} = CGI->new( 'simulong' ) ;
+ get_options( $mysync ) ;
+ is( undef, $mysync->{simulong}, 'get_options cgi context: --simulong => $mysync->{simulong} => undef' ) ;
+
+ $mysync->{cgi} = CGI->new( 'simulong=4' ) ;
+ get_options( $mysync ) ;
+ is( 4, $mysync->{simulong}, 'get_options cgi context: --simulong=4 => $mysync->{simulong} => 4' ) ;
+ is( undef, $mysync->{ folder }, 'get_options cgi context: --simulong=4 => $mysync->{ folder } => undef' ) ;
+ #myprint( Data::Dumper->Dump( [ $mysync ] ) ) ;
+
+ $mysync ={} ;
+ $mysync->{cgi} = CGI->new( 'justfoldersizes=on' ) ;
+ get_options( $mysync ) ;
+ is( 1, $mysync->{ justfoldersizes }, 'get_options cgi context: --justfoldersizes=1 => justfoldersizes => 1' ) ;
+ myprint( Data::Dumper->Dump( [ $mysync ] ) ) ;
+
+ note( 'Leaving tests_get_options_cgi_context()' ) ;
+ return ;
+}
+
+
+
+sub get_options_cgi
+{
+ # In CGI context arguments are not in @ARGV but in QUERY_STRING variable (with GET).
+ my $mysync = shift @ARG ;
+ $mysync->{cgi} || return ;
+ my @arguments = @ARG ;
+ # final 0 is used to print usage when no option is given
+ my $numopt = length $ENV{'QUERY_STRING'} || 1 ;
+ $mysync->{f1f2h} = {} ;
+ my $opt_ret = myGetOptions(
+ $mysync,
+ \@arguments,
+ 'abort' => \$mysync->{abort},
+ 'host1=s' => \$mysync->{ host1 },
+ 'host2=s' => \$mysync->{ host2 },
+ 'user1=s' => \$mysync->{ user1 },
+ 'user2=s' => \$mysync->{ user2 },
+ 'password1=s' => \$mysync->{password1},
+ 'password2=s' => \$mysync->{password2},
+ 'dry!' => \$mysync->{dry},
+ 'version' => \$mysync->{version},
+ 'ssl1!' => \$mysync->{ssl1},
+ 'ssl2!' => \$mysync->{ssl2},
+ 'tls1!' => \$mysync->{tls1},
+ 'tls2!' => \$mysync->{tls2},
+ 'justlogin!' => \$mysync->{justlogin},
+ 'justconnect!' => \$mysync->{justconnect},
+ 'addheader!' => \$mysync->{addheader},
+ 'automap!' => \$mysync->{automap},
+ 'justautomap!' => \$mysync->{justautomap},
+ 'gmail1' => \$mysync->{gmail1},
+ 'gmail2' => \$mysync->{gmail2},
+ 'office1' => \$mysync->{office1},
+ 'office2' => \$mysync->{office2},
+ 'exchange1' => \$mysync->{exchange1},
+ 'exchange2' => \$mysync->{exchange2},
+ 'domino1' => \$mysync->{domino1},
+ 'domino2' => \$mysync->{domino2},
+ 'f1f2=s@' => \$mysync->{f1f2},
+ 'f1f2h=s%' => \$mysync->{f1f2h},
+ 'folder=s@' => \$mysync->{ folder },
+ 'blabla=s' => \@blabla,
+ 'testslive!' => \$mysync->{testslive},
+ 'testslive6!' => \$mysync->{testslive6},
+ 'releasecheck!' => \$mysync->{releasecheck},
+ 'simulong=i' => \$mysync->{simulong},
+ 'debugsleep=f' => \$mysync->{debugsleep},
+ 'subfolder1=s' => \$mysync->{ subfolder1 },
+ 'subfolder2=s' => \$mysync->{ subfolder2 },
+ 'justfolders!' => \$mysync->{ justfolders },
+ 'justfoldersizes!' => \$mysync->{ justfoldersizes },
+ 'delete1!' => \$mysync->{ delete1 },
+ 'delete2!' => \$mysync->{ delete2 },
+ 'delete2duplicates!' => \$mysync->{ delete2duplicates },
+ 'tail!' => \$mysync->{tail},
+
+# blabla and f1f2h=s% could be removed but
+# tests_get_options_cgi() should be split before
+# with a sub tests_myGetOptions()
+ ) ;
+
+ $mysync->{ debug } and output( $mysync, "get options: [$opt_ret][$numopt]\n" ) ;
+
+ if ( ! $opt_ret ) {
+ return ;
+ }
+ return $numopt ;
+}
+
+sub get_options_cmd
+{
+ my $mysync = shift @ARG ;
+ my @arguments = @ARG ;
+ my $mycgi = $mysync->{cgi} ;
+ # final 0 is used to print usage when no option is given on command line
+ my $numopt = scalar @arguments || 0 ;
+ my $argv = join "\x00", @arguments ;
+
+ if ( $argv =~ m/-delete\x002/x ) {
+ output( $mysync, "May be you mean --delete2 instead of --delete 2\n" ) ;
+ return ;
+ }
+ $mysync->{f1f2h} = {} ;
+ my $opt_ret = myGetOptions(
+ $mysync,
+ \@arguments,
+ 'debug!' => \$mysync->{ debug },
+ 'debuglist!' => \$debuglist,
+ 'debugcontent!' => \$debugcontent,
+ 'debugsleep=f' => \$mysync->{debugsleep},
+ 'debugflags!' => \$debugflags,
+ 'debugimap!' => \$debugimap,
+ 'debugimap1!' => \$debugimap1,
+ 'debugimap2!' => \$debugimap2,
+ 'debugdev!' => \$debugdev,
+ 'debugmemory!' => \$mysync->{debugmemory},
+ 'debugfolders!' => \$mysync->{debugfolders},
+ 'debugssl=i' => \$mysync->{debugssl},
+ 'debugcgi!' => \$debugcgi,
+ 'debugenv!' => \$mysync->{debugenv},
+ 'debugsig!' => \$mysync->{debugsig},
+ 'debuglabels!' => \$mysync->{debuglabels},
+ 'simulong=i' => \$mysync->{simulong},
+ 'abort' => \$mysync->{abort},
+ 'host1=s' => \$mysync->{ host1 },
+ 'host2=s' => \$mysync->{ host2 },
+ 'port1=i' => \$mysync->{port1},
+ 'port2=i' => \$mysync->{port2},
+ 'inet4|ipv4' => \$mysync->{inet4},
+ 'inet6|ipv6' => \$mysync->{inet6},
+ 'user1=s' => \$mysync->{ user1 },
+ 'user2=s' => \$mysync->{ user2 },
+ 'gmail1' => \$mysync->{gmail1},
+ 'gmail2' => \$mysync->{gmail2},
+ 'office1' => \$mysync->{office1},
+ 'office2' => \$mysync->{office2},
+ 'exchange1' => \$mysync->{exchange1},
+ 'exchange2' => \$mysync->{exchange2},
+ 'domino1' => \$mysync->{domino1},
+ 'domino2' => \$mysync->{domino2},
+ 'domain1=s' => \$domain1,
+ 'domain2=s' => \$domain2,
+ 'password1=s' => \$mysync->{password1},
+ 'password2=s' => \$mysync->{password2},
+ 'passfile1=s' => \$mysync->{ passfile1 },
+ 'passfile2=s' => \$mysync->{ passfile2 },
+ 'authmd5!' => \$authmd5,
+ 'authmd51!' => \$authmd51,
+ 'authmd52!' => \$authmd52,
+ 'sep1=s' => \$mysync->{ sep1 },
+ 'sep2=s' => \$mysync->{ sep2 },
+ 'sanitize!' => \$mysync->{ sanitize },
+ 'folder=s@' => \$mysync->{ folder },
+ 'folderrec=s' => \@folderrec,
+ 'include=s' => \@include,
+ 'exclude=s' => \@exclude,
+ 'noexclude' => \$mysync->{noexclude},
+ 'folderfirst=s' => \@folderfirst,
+ 'folderlast=s' => \@folderlast,
+ 'prefix1=s' => \$prefix1,
+ 'prefix2=s' => \$prefix2,
+ 'subfolder1=s' => \$mysync->{ subfolder1 },
+ 'subfolder2=s' => \$mysync->{ subfolder2 },
+ 'fixslash2!' => \$mysync->{ fixslash2 },
+ 'fixInboxINBOX!' => \$fixInboxINBOX,
+ 'regextrans2=s@' => \$mysync->{ regextrans2 },
+ 'mixfolders!' => \$mixfolders,
+ 'skipemptyfolders!' => \$mysync->{ skipemptyfolders },
+ 'regexmess=s' => \@regexmess,
+ 'noregexmess' => \$mysync->{noregexmess},
+ 'skipmess=s' => \@skipmess,
+ 'pipemess=s' => \@pipemess,
+ 'pipemesscheck!' => \$pipemesscheck,
+ 'disarmreadreceipts!' => \$disarmreadreceipts,
+ 'regexflag=s' => \@regexflag,
+ 'noregexflag' => \$mysync->{noregexflag},
+ 'filterflags!' => \$filterflags,
+ 'flagscase!' => \$flagscase,
+ 'syncflagsaftercopy!' => \$syncflagsaftercopy,
+ 'resyncflags!' => \$mysync->{ resyncflags },
+ 'synclabels!' => \$mysync->{ synclabels },
+ 'resynclabels!' => \$mysync->{ resynclabels },
+ 'delete|delete1!' => \$mysync->{ delete1 },
+ 'delete2!' => \$mysync->{ delete2 },
+ 'delete2duplicates!' => \$mysync->{ delete2duplicates },
+ 'delete2folders!' => \$delete2folders,
+ 'delete2foldersonly=s' => \$delete2foldersonly,
+ 'delete2foldersbutnot=s' => \$delete2foldersbutnot,
+ 'syncinternaldates!' => \$syncinternaldates,
+ 'idatefromheader!' => \$idatefromheader,
+ 'syncacls!' => \$syncacls,
+ 'maxsize=i' => \$mysync->{ maxsize },
+ 'appendlimit=i' => \$mysync->{ appendlimit },
+ 'truncmess=i' => \$mysync->{ truncmess },
+ 'minsize=i' => \$minsize,
+ 'maxage=f' => \$maxage,
+ 'minage=f' => \$minage,
+ 'search=s' => \$search,
+ 'search1=s' => \$mysync->{ search1 },
+ 'search2=s' => \$mysync->{ search2 },
+ 'foldersizes!' => \$mysync->{ foldersizes },
+ 'foldersizesatend!' => \$mysync->{ foldersizesatend },
+ 'dry!' => \$mysync->{dry},
+ 'expunge1|expunge!' => \$mysync->{ expunge1 },
+ 'expunge2!' => \$mysync->{ expunge2 },
+ 'uidexpunge2!' => \$mysync->{ uidexpunge2 },
+ 'subscribed' => \$subscribed,
+ 'subscribe!' => \$subscribe,
+ 'subscribeall|subscribe_all!' => \$subscribeall,
+ 'justbanner!' => \$justbanner,
+ 'justfolders!'=> \$mysync->{ justfolders },
+ 'justfoldersizes!' => \$mysync->{ justfoldersizes },
+ 'fast!' => \$fast,
+ 'version' => \$mysync->{version},
+ 'help' => \$help,
+ 'timeout=i' => \$timeout,
+ 'timeout1=i' => \$mysync->{h1}->{timeout},
+ 'timeout2=i' => \$mysync->{h2}->{timeout},
+ 'skipheader=s' => \$skipheader,
+ 'useheader=s' => \@useheader,
+ 'wholeheaderifneeded!' => \$wholeheaderifneeded,
+ 'messageidnodomain!' => \$messageidnodomain,
+ 'skipsize!' => \$skipsize,
+ 'allowsizemismatch!' => \$allowsizemismatch,
+ 'fastio1!' => \$fastio1,
+ 'fastio2!' => \$fastio2,
+ 'sslcheck!' => \$mysync->{sslcheck},
+ 'ssl1!' => \$mysync->{ssl1},
+ 'ssl2!' => \$mysync->{ssl2},
+ 'ssl1_ssl_version=s' => \$mysync->{h1}->{sslargs}->{SSL_version},
+ 'ssl2_ssl_version=s' => \$mysync->{h2}->{sslargs}->{SSL_version},
+ 'sslargs1=s%' => \$mysync->{h1}->{sslargs},
+ 'sslargs2=s%' => \$mysync->{h2}->{sslargs},
+ 'tls1!' => \$mysync->{tls1},
+ 'tls2!' => \$mysync->{tls2},
+ 'uid1!' => \$uid1,
+ 'uid2!' => \$uid2,
+ 'authmech1=s' => \$authmech1,
+ 'authmech2=s' => \$authmech2,
+ 'authuser1=s' => \$authuser1,
+ 'authuser2=s' => \$authuser2,
+ 'proxyauth1' => \$proxyauth1,
+ 'proxyauth2' => \$proxyauth2,
+ 'split1=i' => \$split1,
+ 'split2=i' => \$split2,
+ 'buffersize=i' => \$buffersize,
+ 'reconnectretry1=i' => \$reconnectretry1,
+ 'reconnectretry2=i' => \$reconnectretry2,
+ 'tests!' => \$mysync->{ tests },
+ 'testsdebug|tests_debug!' => \$mysync->{ testsdebug },
+ 'testsunit=s@' => \$mysync->{testsunit},
+ 'testslive!' => \$mysync->{testslive},
+ 'testslive6!' => \$mysync->{testslive6},
+ 'justlogin!' => \$mysync->{justlogin},
+ 'justconnect!' => \$mysync->{justconnect},
+ 'tmpdir=s' => \$mysync->{ tmpdir },
+ 'pidfile=s' => \$mysync->{pidfile},
+ 'pidfilelocking!' => \$mysync->{pidfilelocking},
+ 'sigexit=s@' => \$mysync->{ sigexit },
+ 'sigreconnect=s@' => \$mysync->{ sigreconnect },
+ 'sigignore=s@' => \$mysync->{ sigignore },
+ 'releasecheck!' => \$mysync->{releasecheck},
+ 'modulesversion|modules_version!' => \$modulesversion,
+ 'usecache!' => \$usecache,
+ 'cacheaftercopy!' => \$cacheaftercopy,
+ 'debugcache!' => \$debugcache,
+ 'useuid!' => \$useuid,
+ 'addheader!' => \$mysync->{addheader},
+ 'exitwhenover=i' => \$mysync->{ exitwhenover },
+ 'checkselectable!' => \$mysync->{ checkselectable },
+ 'checkfoldersexist!' => \$mysync->{ checkfoldersexist },
+ 'checkmessageexists!' => \$checkmessageexists,
+ 'expungeaftereach!' => \$mysync->{ expungeaftereach },
+ 'abletosearch!' => \$mysync->{abletosearch},
+ 'abletosearch1!' => \$mysync->{abletosearch1},
+ 'abletosearch2!' => \$mysync->{abletosearch2},
+ 'showpasswords!' => \$mysync->{showpasswords},
+ 'maxlinelength=i' => \$maxlinelength,
+ 'maxlinelengthcmd=s' => \$maxlinelengthcmd,
+ 'minmaxlinelength=i' => \$minmaxlinelength,
+ 'debugmaxlinelength!' => \$debugmaxlinelength,
+ 'fixcolonbug!' => \$fixcolonbug,
+ 'create_folder_old!' => \$create_folder_old,
+ 'maxmessagespersecond=f' => \$mysync->{maxmessagespersecond},
+ 'maxbytespersecond=i' => \$mysync->{maxbytespersecond},
+ 'maxbytesafter=i' => \$mysync->{maxbytesafter},
+ 'maxsleep=f' => \$mysync->{maxsleep},
+ 'skipcrossduplicates!' => \$skipcrossduplicates,
+ 'debugcrossduplicates!' => \$debugcrossduplicates,
+ 'log!' => \$mysync->{log},
+ 'tail!' => \$mysync->{tail},
+ 'logfile=s' => \$mysync->{logfile},
+ 'logdir=s' => \$mysync->{logdir},
+ 'errorsmax=i' => \$mysync->{errorsmax},
+ 'errorsdump!' => \$mysync->{errorsdump},
+ 'fetch_hash_set=s' => \$fetch_hash_set,
+ 'automap!' => \$mysync->{automap},
+ 'justautomap!' => \$mysync->{justautomap},
+ 'id!' => \$mysync->{id},
+ 'f1f2=s@' => \$mysync->{f1f2},
+ 'nof1f2' => \$mysync->{nof1f2},
+ 'f1f2h=s%' => \$mysync->{f1f2h},
+ 'justfolderlists!' => \$mysync->{justfolderlists},
+ 'delete1emptyfolders' => \$mysync->{delete1emptyfolders},
+ ) ;
+ #myprint( Data::Dumper->Dump( [ $mysync ] ) ) ;
+ $mysync->{ debug } and output( $mysync, "get options: [$opt_ret][$numopt]\n" ) ;
+ my $numopt_after = scalar @arguments ;
+ #myprint( "get options: [$opt_ret][$numopt][$numopt_after]\n" ) ;
+ if ( $numopt_after ) {
+ myprint(
+ "Extra arguments found: @arguments\n",
+ "It usually means a quoting issue in the command line ",
+ "or some misspelling options.\n",
+ ) ;
+ return ;
+ }
+ if ( ! $opt_ret ) {
+ return ;
+ }
+ return $numopt ;
+}
+
+
+
+sub tests_get_options
+{
+ note( 'Entering tests_get_options()' ) ;
+
+ # CAVEAT: still setting global variables, be careful
+ # with tests, the context increases! $debug stays on for example.
+ # API:
+ # * input arguments: two ways, command line or CGI
+ # * the program arguments
+ # * QUERY_STRING env variable
+ # * return
+ # * undef if bad things happened like
+ # * options not known
+ # * --delete 2 input
+ # * number of arguments or QUERY_STRING length
+ my $mysync = { } ;
+ is( undef, get_options( $mysync, qw( --noexist ) ), 'get_options: --noexist => undef' ) ;
+ is( undef, $mysync->{ noexist }, 'get_options: --noexist => undef' ) ;
+ $mysync = { } ;
+ is( undef, get_options( $mysync, qw( --lalala --noexist --version ) ), 'get_options: --lalala --noexist --version => undef' ) ;
+ is( 1, $mysync->{ version }, 'get_options: --version => 1' ) ;
+ is( undef, $mysync->{ noexist }, 'get_options: --noexist => undef' ) ;
+ $mysync = { } ;
+ is( 1, get_options( $mysync, qw( --delete2 ) ), 'get_options: --delete2 => 1' ) ;
+ is( 1, $mysync->{ delete2 }, 'get_options: --delete2 => var delete2 = 1' ) ;
+ $mysync = { } ;
+ is( undef, get_options( $mysync, qw( --delete 2 ) ), 'get_options: --delete 2 => var undef' ) ;
+ is( undef, $mysync->{ delete1 }, 'get_options: --delete 2 => var still undef ; good!' ) ;
+ $mysync = { } ;
+ is( undef, get_options( $mysync, "--delete 2" ), 'get_options: --delete 2 => undef' ) ;
+
+ is( 1, get_options( $mysync, "--version" ), 'get_options: --version => 1' ) ;
+ is( 1, get_options( $mysync, "--help" ), 'get_options: --help => 1' ) ;
+
+ is( undef, get_options( $mysync, qw( --noexist --version ) ), 'get_options: --debug --noexist --version => undef' ) ;
+ is( 1, get_options( $mysync, qw( --version ) ), 'get_options: --version => 1' ) ;
+ is( undef, get_options( $mysync, qw( extra ) ), 'get_options: extra => undef' ) ;
+ is( undef, get_options( $mysync, qw( extra1 --version extra2 ) ), 'get_options: extra1 --version extra2 => undef' ) ;
+
+ $mysync = { } ;
+ is( 2, get_options( $mysync, qw( --host1 HOST_01) ), 'get_options: --host1 HOST_01 => 1' ) ;
+ is( 'HOST_01', $mysync->{ host1 }, 'get_options: --host1 HOST_01 => HOST_01' ) ;
+ #myprint( Data::Dumper->Dump( [ $mysync ] ) ) ;
+
+ note( 'Leaving tests_get_options()' ) ;
+ return ;
+}
+
+
+
+sub get_options
+{
+ my $mysync = shift @ARG ;
+ my @arguments = @ARG ;
+ #myprint( "1 mysync: ", Data::Dumper->Dump( [ $mysync ] ) ) ;
+ my $ret ;
+ if ( under_cgi_context( ) ) {
+ # CGI context
+ $ret = get_options_cgi( $mysync, @arguments ) ;
+ }else{
+ # Command line context ;
+ $ret = get_options_cmd( $mysync, @arguments ) ;
+ } ;
+ #myprint( "2 mysync: ", Data::Dumper->Dump( [ $mysync ] ) ) ;
+ foreach my $key ( sort keys %{ $mysync } ) {
+ if ( ! defined $mysync->{$key} ) {
+ delete $mysync->{$key} ;
+ next ;
+ }
+ if ( 'ARRAY' eq ref( $mysync->{$key} )
+ and 0 == scalar( @{ $mysync->{$key} } ) ) {
+ delete $mysync->{$key} ;
+ }
+ }
+ return $ret ;
+}
+
+sub testunitsession
+{
+ my $mysync = shift ;
+
+ if ( ! $mysync ) { return ; }
+ if ( ! $mysync->{ testsunit } ) { return ; }
+
+ my @functions = @{ $mysync->{ testsunit } } ;
+
+ if ( ! @functions ) { return ; }
+
+ SKIP: {
+ if ( ! @functions ) { skip 'No test in normal run' ; }
+ testsunit( @functions ) ;
+ done_testing( ) ;
+ }
+ return ;
+}
+
+sub tests_count_0s
+{
+ note( 'Entering tests_count_zeros()' ) ;
+ is( 0, count_0s( ), 'count_0s: no parameters => 0' ) ;
+ is( 1, count_0s( 0 ), 'count_0s: 0 => 1' ) ;
+ is( 0, count_0s( 1 ), 'count_0s: 1 => 0' ) ;
+ is( 1, count_0s( 1, 0, 1 ), 'count_0s: 1, 0, 1 => 1' ) ;
+ is( 2, count_0s( 1, 0, 1, 0 ), 'count_0s: 1, 0, 1, 0 => 2' ) ;
+ note( 'Leaving tests_count_zeros()' ) ;
+ return ;
+}
+sub count_0s
+{
+ my @array = @ARG ;
+
+ if ( ! @array ) { return 0 ; }
+ my $nb_zeros = 0 ;
+ map { $_ == 0 and $nb_zeros += 1 } @array ;
+ return $nb_zeros ;
+}
+
+sub tests_report_failures
+{
+ note( 'Entering tests_report_failures()' ) ;
+
+ is( undef, report_failures( ), 'report_failures: no parameters => undef' ) ;
+ is( "nb 1 - first\n", report_failures( ({'ok' => 0, name => 'first'}) ), 'report_failures: "first" failed => nb 1 - first' ) ;
+ is( q{}, report_failures( ( {'ok' => 1, name => 'first'} ) ), 'report_failures: "first" success =>' ) ;
+ is( "nb 2 - second\n", report_failures( ( {'ok' => 1, name => 'second'}, {'ok' => 0, name => 'second'} ) ), 'report_failures: "second" failed => nb 2 - second' ) ;
+ is( "nb 1 - first\nnb 2 - second\n", report_failures( ( {'ok' => 0, name => 'first'}, {'ok' => 0, name => 'second'} ) ), 'report_failures: both failed => nb 1 - first nb 2 - second' ) ;
+ note( 'Leaving tests_report_failures()' ) ;
+ return ;
+}
+
+sub report_failures
+{
+ my @details = @ARG ;
+
+ if ( ! @details ) { return ; }
+
+ my $counter = 1 ;
+ my $report = q{} ;
+ foreach my $details ( @details ) {
+ if ( ! $details->{ 'ok' } ) {
+ my $name = $details->{ 'name' } || 'NONAME' ;
+ $report .= "nb $counter - $name\n" ;
+ }
+ $counter += 1 ;
+ }
+ return $report ;
+
+}
+
+sub tests_true
+{
+ note( 'Entering tests_true()' ) ;
+
+ is( 1, 1, 'true: 1 is 1' ) ;
+ note( 'Leaving tests_true()' ) ;
+ return ;
+}
+
+sub tests_testsunit
+{
+ note( 'Entering tests_testunit()' ) ;
+
+ is( undef, testsunit( ), 'testsunit: no parameters => undef' ) ;
+ is( undef, testsunit( undef ), 'testsunit: an undef parameter => undef' ) ;
+ is( undef, testsunit( q{} ), 'testsunit: an empty parameter => undef' ) ;
+ is( undef, testsunit( 'idonotexist' ), 'testsunit: a do not exist function as parameter => undef' ) ;
+ is( undef, testsunit( 'tests_true' ), 'testsunit: tests_true => undef' ) ;
+ note( 'Leaving tests_testunit()' ) ;
+ return ;
+}
+
+sub testsunit
+{
+ my @functions = @ARG ;
+
+ if ( ! @functions ) { #
+ myprint( "testsunit warning: no argument given\n" ) ;
+ return ;
+ }
+
+ foreach my $function ( @functions ) {
+ if ( ! $function ) {
+ myprint( "testsunit warning: argument is empty\n" ) ;
+ next ;
+ }
+ if ( ! exists &$function ) {
+ myprint( "testsunit warning: function $function does not exist\n" ) ;
+ next ;
+ }
+ if ( ! defined &$function ) {
+ myprint( "testsunit warning: function $function is not defined\n" ) ;
+ next ;
+ }
+ my $function_ref = \&{ $function } ;
+ &$function_ref() ;
+ }
+ return ;
+}
+
+sub testsdebug
+{
+ # Now a little obsolete since there is
+ # imapsync ... --testsunit "anyfunction"
+ my $mysync = shift ;
+ if ( ! $mysync->{ testsdebug } ) { return ; }
+ SKIP: {
+ if ( ! $mysync->{ testsdebug } ) {
+ skip 'No test in normal run' ;
+ }
+
+ note( 'Entering testsdebug()' ) ;
+ #ok( ( ( not -d 'W/tmp/tests' ) or rmtree( 'W/tmp/tests/' ) ), 'testsdebug: rmtree W/tmp/tests' ) ;
+ #tests_check_binary_embed_all_dyn_libs( ) ;
+ #tests_killpid_by_parent( ) ;
+ #tests_killpid_by_brother( ) ;
+ #tests_kill_zero( ) ;
+ #tests_connect_socket( ) ;
+ tests_probe_imapssl( ) ;
+ #tests_always_fail( ) ;
+
+ note( 'Leaving testsdebug()' ) ;
+ done_testing( ) ;
+ }
+ return ;
+}
+
+
+sub tests
+{
+ my $mysync = shift ;
+ if ( ! $mysync->{ tests } ) { return ; }
+
+ SKIP: {
+ skip 'No test in normal run' if ( ! $mysync->{ tests } ) ;
+ note( 'Entering tests()' ) ;
+ tests_folder_routines( ) ;
+ tests_compare_lists( ) ;
+ tests_regexmess( ) ;
+ tests_skipmess( ) ;
+ tests_flags_regex();
+ tests_ucsecond( ) ;
+ tests_permanentflags();
+ tests_flags_filter( ) ;
+ tests_separator_invert( ) ;
+ tests_imap2_folder_name( ) ;
+ tests_command_line_nopassword( ) ;
+ tests_good_date( ) ;
+ tests_max( ) ;
+ tests_remove_not_num();
+ tests_memory_consumption( ) ;
+ tests_is_a_release_number();
+ tests_imapsync_basename();
+ tests_list_keys_in_2_not_in_1();
+ tests_convert_sep_to_slash( ) ;
+ tests_match_a_cache_file( ) ;
+ tests_cache_map( ) ;
+ tests_get_cache( ) ;
+ tests_clean_cache( ) ;
+ tests_clean_cache_2( ) ;
+ tests_touch( ) ;
+ tests_flagscase( ) ;
+ tests_mkpath( ) ;
+ tests_extract_header( ) ;
+ tests_decompose_header( ) ;
+ tests_epoch( ) ;
+ tests_add_header( ) ;
+ tests_cache_dir_fix( ) ;
+ tests_cache_dir_fix_win( ) ;
+ tests_filter_forbidden_characters( ) ;
+ tests_cache_folder( ) ;
+ tests_time_remaining( ) ;
+ tests_decompose_regex( ) ;
+ tests_backtick( ) ;
+ tests_bytes_display_string( ) ;
+ tests_header_line_normalize( ) ;
+ tests_fix_Inbox_INBOX_mapping( ) ;
+ tests_max_line_length( ) ;
+ tests_subject( ) ;
+ tests_msgs_from_maxmin( ) ;
+ tests_tmpdir_has_colon_bug( ) ;
+ tests_sleep_max_messages( ) ;
+ tests_sleep_max_bytes( ) ;
+ tests_logfile( ) ;
+ tests_setlogfile( ) ;
+ tests_jux_utf8_old( ) ;
+ tests_jux_utf8( ) ;
+ tests_pipemess( ) ;
+ tests_jux_utf8_list( ) ;
+ tests_guess_prefix( ) ;
+ tests_guess_separator( ) ;
+ tests_format_for_imap_arg( ) ;
+ tests_imapsync_id( ) ;
+ tests_date_from_rcs( ) ;
+ tests_quota_extract_storage_limit_in_bytes( ) ;
+ tests_quota_extract_storage_current_in_bytes( ) ;
+ tests_guess_special( ) ;
+ tests_do_valid_directory( ) ;
+ tests_delete1emptyfolders( ) ;
+ tests_message_for_host2( ) ;
+ tests_length_ref( ) ;
+ tests_firstline( ) ;
+ tests_diff_or_NA( ) ;
+ tests_match_number( ) ;
+ tests_all_defined( ) ;
+ tests_special_from_folders_hash( ) ;
+ tests_notmatch( ) ;
+ tests_match( ) ;
+ tests_get_options( ) ;
+ tests_get_options_cgi_context( ) ;
+ tests_rand32( ) ;
+ tests_hashsynclocal( ) ;
+ tests_hashsync( ) ;
+ tests_output( ) ;
+ tests_output_reset_with( ) ;
+ tests_output_start( ) ;
+ tests_check_last_release( ) ;
+ tests_loadavg( ) ;
+ tests_cpu_number( ) ;
+ tests_load_and_delay( ) ;
+ #tests_imapsping( ) ;
+ #tests_tcpping( ) ;
+ tests_sslcheck( ) ;
+ tests_not_long_imapsync_version_public( ) ;
+ tests_reconnect_if_needed( ) ;
+ tests_reconnect_12_if_needed( ) ;
+ tests_sleep_if_needed( ) ;
+ tests_string_to_file( ) ;
+ tests_file_to_string( ) ;
+ tests_under_cgi_context( ) ;
+ tests_umask( ) ;
+ tests_umask_str( ) ;
+ tests_set_umask( ) ;
+ tests_createhashfileifneeded( ) ;
+ tests_slash_to_underscore( ) ;
+ tests_testsunit( ) ;
+ tests_count_0s( ) ;
+ tests_report_failures( ) ;
+ tests_min( ) ;
+ #tests_connect_socket( ) ;
+ #tests_resolvrev( ) ;
+ tests_usage( ) ;
+ tests_version_from_rcs( ) ;
+ tests_backslash_caret( ) ;
+ #tests_mailimapclient_connect_bug( ) ; # it fails with Mail-IMAPClient <= 3.39
+ tests_write_pidfile( ) ;
+ tests_remove_pidfile_not_running( ) ;
+ tests_match_a_pid_number( ) ;
+ tests_prefix_seperator_invertion( ) ;
+ tests_is_an_integer( ) ;
+ tests_integer_or_1( ) ;
+ tests_is_number( ) ;
+ tests_sig_install( ) ;
+ tests_template( ) ;
+ tests_split_around_equal( ) ;
+ tests_toggle_sleep( ) ;
+ tests_labels( ) ;
+ tests_synclabels( ) ;
+ tests_uidexpunge_or_expunge( ) ;
+ tests_appendlimit_from_capability( ) ;
+ tests_maxsize_setting( ) ;
+ tests_mock_capability( ) ;
+ tests_appendlimit( ) ;
+ tests_capability_of( ) ;
+ tests_search_in_array( ) ;
+ tests_operators_and_exclam_precedence( ) ;
+ tests_teelaunch( ) ;
+ tests_logfileprepa( ) ;
+ tests_useheader_suggestion( ) ;
+ tests_nb_messages_in_2_not_in_1( ) ;
+ tests_labels_add_subfolder2( ) ;
+ tests_labels_remove_subfolder1( ) ;
+ tests_resynclabels( ) ;
+ tests_labels_remove_special( ) ;
+ tests_uniq( ) ;
+ tests_remove_from_requested_folders( ) ;
+ tests_errors_log( ) ;
+ tests_add_subfolder1_to_folderrec( ) ;
+ tests_sanitize_subfolder( ) ;
+ tests_remove_edging_blanks( ) ;
+ tests_sanitize( ) ;
+ tests_remove_last_char_if_is( ) ;
+ tests_check_binary_embed_all_dyn_libs( ) ;
+ tests_nthline( ) ;
+ tests_secondline( ) ;
+ tests_tail( ) ;
+ tests_truncmess( ) ;
+ tests_eta( ) ;
+ tests_timesince( ) ;
+ tests_timenext( ) ;
+ tests_foldersize( ) ;
+ tests_imapsync_context( ) ;
+ tests_abort( ) ;
+ tests_probe_imapssl( ) ;
+ tests_mailimapclient_connect( ) ;
+ #tests_resolv( ) ;
+
+ # Those three are for later use, when webserver will be inside imapsync
+ # or will be deleted them if I abandon the project.
+ #tests_killpid_by_parent( ) ;
+ #tests_killpid_by_brother( ) ;
+ #tests_kill_zero( ) ;
+
+ #tests_always_fail( ) ;
+ done_testing( 1496 ) ;
+ note( 'Leaving tests()' ) ;
+ }
+ return ;
+}
+
+sub tests_template
+{
+ note( 'Entering tests_template()' ) ;
+
+ is( undef, undef, 'template: no args => undef' ) ;
+ is_deeply( {}, {}, 'template: a hash is a hash' ) ;
+ is_deeply( [], [], 'template: an array is an array' ) ;
+ note( 'Leaving tests_template()' ) ;
+ return ;
+}
+
+
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/imapsync_cron.pl b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/imapsync_cron.pl
new file mode 100644
index 0000000..746b1d4
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/imapsync_cron.pl
@@ -0,0 +1,180 @@
+#!/usr/bin/perl
+
+use DBI;
+use LockFile::Simple qw(lock trylock unlock);
+use Proc::ProcessTable;
+use Data::Dumper qw(Dumper);
+use IPC::Run 'run';
+use File::Temp;
+use Try::Tiny;
+use sigtrap 'handler' => \&sig_handler, qw(INT TERM KILL QUIT);
+
+sub trim { my $s = shift; $s =~ s/^\s+|\s+$//g; return $s };
+my $t = Proc::ProcessTable->new;
+my $imapsync_running = grep { $_->{cmndline} =~ /imapsync\s/i } @{$t->table};
+if ($imapsync_running ge 1)
+{
+ print "imapsync is active, exiting...";
+ exit;
+}
+
+sub qqw($) {
+ my @params = ();
+ my @values = split(/(?=--)/, $_[0]);
+ foreach my $val (@values) {
+ my @tmpparam = split(/ /, $val, 2);
+ foreach my $tmpval (@tmpparam) {
+ if ($tmpval ne '') {
+ push @params, $tmpval;
+ }
+ }
+ }
+ foreach my $val (@params) {
+ $val=trim($val);
+ }
+ return @params;
+}
+
+$run_dir="/tmp";
+$dsn = 'DBI:mysql:database=__DBNAME__;mysql_socket=/var/run/mysqld/mysqld.sock';
+$lock_file = $run_dir . "/imapsync_busy";
+$lockmgr = LockFile::Simple->make(-autoclean => 1, -max => 1);
+$lockmgr->lock($lock_file) || die "can't lock ${lock_file}";
+$dbh = DBI->connect($dsn, '__DBUSER__', '__DBPASS__', {
+ mysql_auto_reconnect => 1,
+ mysql_enable_utf8mb4 => 1
+});
+$dbh->do("UPDATE imapsync SET is_running = 0");
+
+sub sig_handler {
+ # Send die to force exception in "run"
+ die "sig_handler received signal, preparing to exit...\n";
+};
+
+open my $file, '<', "/etc/sogo/sieve.creds";
+my $creds = <$file>;
+close $file;
+my ($master_user, $master_pass) = split /:/, $creds;
+my $sth = $dbh->prepare("SELECT id,
+ user1,
+ user2,
+ host1,
+ authmech1,
+ password1,
+ exclude,
+ port1,
+ enc1,
+ delete2duplicates,
+ maxage,
+ subfolder2,
+ delete1,
+ delete2,
+ automap,
+ skipcrossduplicates,
+ maxbytespersecond,
+ custom_params,
+ subscribeall,
+ timeout1,
+ timeout2
+ FROM imapsync
+ WHERE active = 1
+ AND is_running = 0
+ AND (
+ UNIX_TIMESTAMP(NOW()) - UNIX_TIMESTAMP(last_run) > mins_interval * 60
+ OR
+ last_run IS NULL)
+ ORDER BY last_run");
+
+$sth->execute();
+my $row;
+
+while ($row = $sth->fetchrow_arrayref()) {
+
+ $id = @$row[0];
+ $user1 = @$row[1];
+ $user2 = @$row[2];
+ $host1 = @$row[3];
+ $authmech1 = @$row[4];
+ $password1 = @$row[5];
+ $exclude = @$row[6];
+ $port1 = @$row[7];
+ $enc1 = @$row[8];
+ $delete2duplicates = @$row[9];
+ $maxage = @$row[10];
+ $subfolder2 = @$row[11];
+ $delete1 = @$row[12];
+ $delete2 = @$row[13];
+ $automap = @$row[14];
+ $skipcrossduplicates = @$row[15];
+ $maxbytespersecond = @$row[16];
+ $custom_params = @$row[17];
+ $subscribeall = @$row[18];
+ $timeout1 = @$row[19];
+ $timeout2 = @$row[20];
+
+ if ($enc1 eq "TLS") { $enc1 = "--tls1"; } elsif ($enc1 eq "SSL") { $enc1 = "--ssl1"; } else { undef $enc1; }
+
+ my $template = $run_dir . '/imapsync.XXXXXXX';
+ my $passfile1 = File::Temp->new(TEMPLATE => $template);
+ my $passfile2 = File::Temp->new(TEMPLATE => $template);
+
+ print $passfile1 "$password1\n";
+ print $passfile2 trim($master_pass) . "\n";
+
+ my @custom_params_a = qqw($custom_params);
+ my $custom_params_ref = \@custom_params_a;
+
+ my $generated_cmds = [ "/usr/local/bin/imapsync",
+ "--tmpdir", "/tmp",
+ "--nofoldersizes",
+ ($timeout1 gt "0" ? () : ('--timeout1', $timeout1)),
+ ($timeout2 gt "0" ? () : ('--timeout2', $timeout2)),
+ ($exclude eq "" ? () : ("--exclude", $exclude)),
+ ($subfolder2 eq "" ? () : ('--subfolder2', $subfolder2)),
+ ($maxage eq "0" ? () : ('--maxage', $maxage)),
+ ($maxbytespersecond eq "0" ? () : ('--maxbytespersecond', $maxbytespersecond)),
+ ($delete2duplicates ne "1" ? () : ('--delete2duplicates')),
+ ($subscribeall ne "1" ? () : ('--subscribeall')),
+ ($delete1 ne "1" ? () : ('--delete')),
+ ($delete2 ne "1" ? () : ('--delete2')),
+ ($automap ne "1" ? () : ('--automap')),
+ ($skipcrossduplicates ne "1" ? () : ('--skipcrossduplicates')),
+ (!defined($enc1) ? () : ($enc1)),
+ "--host1", $host1,
+ "--user1", $user1,
+ "--passfile1", $passfile1->filename,
+ "--port1", $port1,
+ "--host2", "localhost",
+ "--user2", $user2 . '*' . trim($master_user),
+ "--passfile2", $passfile2->filename,
+ '--no-modulesversion',
+ '--noreleasecheck'];
+
+ try {
+ $is_running = $dbh->prepare("UPDATE imapsync SET is_running = 1 WHERE id = ?");
+ $is_running->bind_param( 1, ${id} );
+ $is_running->execute();
+
+ run [@$generated_cmds, @$custom_params_ref], '&>', \my $stdout;
+
+ $update = $dbh->prepare("UPDATE imapsync SET returned_text = ? WHERE id = ?");
+ $update->bind_param( 1, ${stdout} );
+ $update->bind_param( 2, ${id} );
+ $update->execute();
+ } catch {
+ $update = $dbh->prepare("UPDATE imapsync SET returned_text = 'Could not start or finish imapsync' WHERE id = ?");
+ $update->bind_param( 1, ${id} );
+ $update->execute();
+ } finally {
+ $update = $dbh->prepare("UPDATE imapsync SET last_run = NOW(), is_running = 0 WHERE id = ?");
+ $update->bind_param( 1, ${id} );
+ $update->execute();
+ };
+
+
+}
+
+$sth->finish();
+$dbh->disconnect();
+
+$lockmgr->unlock($lock_file);
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/maildir_gc.sh b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/maildir_gc.sh
new file mode 100755
index 0000000..24c1e46
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/maildir_gc.sh
@@ -0,0 +1,2 @@
+#/bin/bash
+[ -d /var/vmail/_garbage/ ] && /usr/bin/find /var/vmail/_garbage/ -mindepth 1 -maxdepth 1 -type d -cmin +${MAILDIR_GC_TIME} -exec rm -r {} \;
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/postlogin.sh b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/postlogin.sh
new file mode 100755
index 0000000..01a45f3
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/postlogin.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+export MASTER_USER=$USER
+exec "$@"
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/quarantine_notify.py b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/quarantine_notify.py
new file mode 100755
index 0000000..adf3171
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/quarantine_notify.py
@@ -0,0 +1,151 @@
+#!/usr/bin/python3
+
+import smtplib
+import os
+import mysql.connector
+from email.mime.multipart import MIMEMultipart
+from email.mime.text import MIMEText
+from email.utils import COMMASPACE, formatdate
+import cgi
+import jinja2
+from jinja2 import Template
+import json
+import redis
+import time
+import html2text
+import socket
+
+while True:
+ try:
+ r = redis.StrictRedis(host='redis', decode_responses=True, port=6379, db=0)
+ r.ping()
+ except Exception as ex:
+ print('%s - trying again...' % (ex))
+ time.sleep(3)
+ else:
+ break
+
+time_now = int(time.time())
+mailcow_hostname = '__MAILCOW_HOSTNAME__'
+
+max_score = float(r.get('Q_MAX_SCORE') or "9999.0")
+if max_score == "":
+ max_score = 9999.0
+
+def query_mysql(query, headers = True, update = False):
+ while True:
+ try:
+ cnx = mysql.connector.connect(unix_socket = '/var/run/mysqld/mysqld.sock', user='__DBUSER__', passwd='__DBPASS__', database='__DBNAME__', charset="utf8")
+ except Exception as ex:
+ print('%s - trying again...' % (ex))
+ time.sleep(3)
+ else:
+ break
+ cur = cnx.cursor()
+ cur.execute(query)
+ if not update:
+ result = []
+ columns = tuple( [d[0] for d in cur.description] )
+ for row in cur:
+ if headers:
+ result.append(dict(list(zip(columns, row))))
+ else:
+ result.append(row)
+ cur.close()
+ cnx.close()
+ return result
+ else:
+ cnx.commit()
+ cur.close()
+ cnx.close()
+
+def notify_rcpt(rcpt, msg_count, quarantine_acl, category):
+ if category == "add_header": category = "add header"
+ meta_query = query_mysql('SELECT SHA2(CONCAT(id, qid), 256) AS qhash, id, subject, score, sender, created, action FROM quarantine WHERE notified = 0 AND rcpt = "%s" AND score < %f AND (action = "%s" OR "all" = "%s")' % (rcpt, max_score, category, category))
+ print("%s: %d of %d messages qualify for notification" % (rcpt, len(meta_query), msg_count))
+ if len(meta_query) == 0:
+ return
+ msg_count = len(meta_query)
+ if r.get('Q_HTML'):
+ try:
+ template = Template(r.get('Q_HTML'))
+ except:
+ print("Error: Cannot parse quarantine template, falling back to default template.")
+ with open('/templates/quarantine.tpl') as file_:
+ template = Template(file_.read())
+ else:
+ with open('/templates/quarantine.tpl') as file_:
+ template = Template(file_.read())
+ html = template.render(meta=meta_query, username=rcpt, counter=msg_count, hostname=mailcow_hostname, quarantine_acl=quarantine_acl)
+ text = html2text.html2text(html)
+ count = 0
+ while count < 15:
+ count += 1
+ try:
+ server = smtplib.SMTP('postfix', 590, 'quarantine')
+ server.ehlo()
+ msg = MIMEMultipart('alternative')
+ msg_from = r.get('Q_SENDER') or "quarantine@localhost"
+ # Remove non-ascii chars from field
+ msg['From'] = ''.join([i if ord(i) < 128 else '' for i in msg_from])
+ msg['Subject'] = r.get('Q_SUBJ') or "Spam Quarantine Notification"
+ msg['Date'] = formatdate(localtime = True)
+ text_part = MIMEText(text, 'plain', 'utf-8')
+ html_part = MIMEText(html, 'html', 'utf-8')
+ msg.attach(text_part)
+ msg.attach(html_part)
+ msg['To'] = str(rcpt)
+ bcc = r.get('Q_BCC') or ""
+ redirect = r.get('Q_REDIRECT') or ""
+ text = msg.as_string()
+ if bcc == '':
+ if redirect == '':
+ server.sendmail(msg['From'], str(rcpt), text)
+ else:
+ server.sendmail(msg['From'], str(redirect), text)
+ else:
+ if redirect == '':
+ server.sendmail(msg['From'], [str(rcpt)] + [str(bcc)], text)
+ else:
+ server.sendmail(msg['From'], [str(redirect)] + [str(bcc)], text)
+ server.quit()
+ for res in meta_query:
+ query_mysql('UPDATE quarantine SET notified = 1 WHERE id = "%d"' % (res['id']), update = True)
+ r.hset('Q_LAST_NOTIFIED', record['rcpt'], time_now)
+ break
+ except Exception as ex:
+ server.quit()
+ print('%s' % (ex))
+ time.sleep(3)
+
+records = query_mysql('SELECT IFNULL(user_acl.quarantine, 0) AS quarantine_acl, count(id) AS counter, rcpt FROM quarantine LEFT OUTER JOIN user_acl ON user_acl.username = rcpt WHERE notified = 0 AND score < %f AND rcpt in (SELECT username FROM mailbox) GROUP BY rcpt' % (max_score))
+
+for record in records:
+ attrs = ''
+ attrs_json = ''
+ time_trans = {
+ "hourly": 3600,
+ "daily": 86400,
+ "weekly": 604800
+ }
+ try:
+ last_notification = int(r.hget('Q_LAST_NOTIFIED', record['rcpt']))
+ if last_notification > time_now:
+ print('Last notification is > time now, assuming never')
+ last_notification = 0
+ except Exception as ex:
+ print('Could not determine last notification for %s, assuming never' % (record['rcpt']))
+ last_notification = 0
+ attrs_json = query_mysql('SELECT attributes FROM mailbox WHERE username = "%s"' % (record['rcpt']))
+ attrs = attrs_json[0]['attributes']
+ if isinstance(attrs, str):
+ # if attr is str then just load it
+ attrs = json.loads(attrs)
+ else:
+ # if it's bytes then decode and load it
+ attrs = json.loads(attrs.decode('utf-8'))
+ if attrs['quarantine_notification'] not in ('hourly', 'daily', 'weekly'):
+ continue
+ if last_notification == 0 or (last_notification + time_trans[attrs['quarantine_notification']]) < time_now:
+ print("Notifying %s: Considering %d new items in quarantine (policy: %s)" % (record['rcpt'], record['counter'], attrs['quarantine_notification']))
+ notify_rcpt(record['rcpt'], record['counter'], record['quarantine_acl'], attrs['quarantine_category'])
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/quota_notify.py b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/quota_notify.py
new file mode 100755
index 0000000..fdfda30
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/quota_notify.py
@@ -0,0 +1,72 @@
+#!/usr/bin/python3
+
+import smtplib
+import os
+from email.mime.multipart import MIMEMultipart
+from email.mime.text import MIMEText
+from email.utils import COMMASPACE, formatdate
+import jinja2
+from jinja2 import Template
+import redis
+import time
+import sys
+import html2text
+from subprocess import Popen, PIPE, STDOUT
+
+if len(sys.argv) > 2:
+ percent = int(sys.argv[1])
+ username = str(sys.argv[2])
+else:
+ print("Args missing")
+ sys.exit(1)
+
+while True:
+ try:
+ r = redis.StrictRedis(host='redis', decode_responses=True, port=6379, db=0)
+ r.ping()
+ except Exception as ex:
+ print('%s - trying again...' % (ex))
+ time.sleep(3)
+ else:
+ break
+
+if r.get('QW_HTML'):
+ try:
+ template = Template(r.get('QW_HTML'))
+ except:
+ print("Error: Cannot parse quarantine template, falling back to default template.")
+ with open('/templates/quota.tpl') as file_:
+ template = Template(file_.read())
+else:
+ with open('/templates/quota.tpl') as file_:
+ template = Template(file_.read())
+
+html = template.render(username=username, percent=percent)
+text = html2text.html2text(html)
+
+try:
+ msg = MIMEMultipart('alternative')
+ msg['From'] = r.get('QW_SENDER') or "quota-warning@localhost"
+ msg['Subject'] = r.get('QW_SUBJ') or "Quota warning"
+ msg['Date'] = formatdate(localtime = True)
+ text_part = MIMEText(text, 'plain', 'utf-8')
+ html_part = MIMEText(html, 'html', 'utf-8')
+ msg.attach(text_part)
+ msg.attach(html_part)
+ msg['To'] = username
+ p = Popen(['/usr/lib/dovecot/dovecot-lda', '-d', username, '-o', '"plugin/quota=maildir:User quota:noenforcing"'], stdout=PIPE, stdin=PIPE, stderr=STDOUT)
+ p.communicate(input=bytes(msg.as_string(), 'utf-8'))
+
+except Exception as ex:
+ print('Failed to send quota notification: %s' % (ex))
+ sys.exit(1)
+
+try:
+ sys.stdout.close()
+except:
+ pass
+
+try:
+ sys.stderr.close()
+except:
+ pass
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/repl_health.sh b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/repl_health.sh
new file mode 100755
index 0000000..05d8914
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/repl_health.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+
+source /source_env.sh
+
+# Do not attempt to write to slave
+if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
+ REDIS_CMDLINE="redis-cli -h ${REDIS_SLAVEOF_IP} -p ${REDIS_SLAVEOF_PORT}"
+else
+ REDIS_CMDLINE="redis-cli -h redis -p 6379"
+fi
+
+# Is replication active?
+# grep on file is less expensive than doveconf
+if ! grep -qi mail_replica /etc/dovecot/dovecot.conf; then
+ ${REDIS_CMDLINE} SET DOVECOT_REPL_HEALTH 1 > /dev/null
+ exit
+fi
+
+FAILED_SYNCS=$(doveadm replicator status | grep "Waiting 'failed' requests" | grep -oE '[0-9]+')
+
+# Set amount of failed jobs as DOVECOT_REPL_HEALTH
+# 1 failed job for mailcow.local is expected and healthy
+if [[ "${FAILED_SYNCS}" != 0 ]] && [[ "${FAILED_SYNCS}" != 1 ]]; then
+ printf "Dovecot replicator has %d failed jobs\n" "${FAILED_SYNCS}"
+ ${REDIS_CMDLINE} SET DOVECOT_REPL_HEALTH "${FAILED_SYNCS}" > /dev/null
+else
+ ${REDIS_CMDLINE} SET DOVECOT_REPL_HEALTH 1 > /dev/null
+fi
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/report-ham.sieve b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/report-ham.sieve
new file mode 100644
index 0000000..80c7f44
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/report-ham.sieve
@@ -0,0 +1,11 @@
+require ["vnd.dovecot.pipe", "copy", "imapsieve", "environment", "variables"];
+
+if environment :matches "imap.mailbox" "*" {
+ set "mailbox" "${1}";
+}
+
+if string "${mailbox}" "Trash" {
+ stop;
+}
+
+pipe :copy "rspamd-pipe-ham";
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/report-spam.sieve b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/report-spam.sieve
new file mode 100644
index 0000000..d44cb9a
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/report-spam.sieve
@@ -0,0 +1,3 @@
+require ["vnd.dovecot.pipe", "copy"];
+
+pipe :copy "rspamd-pipe-spam";
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/rspamd-pipe-ham b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/rspamd-pipe-ham
new file mode 100755
index 0000000..732af85
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/rspamd-pipe-ham
@@ -0,0 +1,10 @@
+#!/bin/bash
+FILE=/tmp/mail$$
+cat > $FILE
+trap "/bin/rm -f $FILE" 0 1 2 3 13 15
+
+cat ${FILE} | /usr/bin/curl -H "Flag: 11" -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd/fuzzydel
+cat ${FILE} | /usr/bin/curl -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd/learnham
+cat ${FILE} | /usr/bin/curl -H "Flag: 13" -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd/fuzzyadd
+
+exit 0
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/rspamd-pipe-spam b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/rspamd-pipe-spam
new file mode 100755
index 0000000..a4b91a0
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/rspamd-pipe-spam
@@ -0,0 +1,10 @@
+#!/bin/bash
+FILE=/tmp/mail$$
+cat > $FILE
+trap "/bin/rm -f $FILE" 0 1 2 3 13 15
+
+cat ${FILE} | /usr/bin/curl -H "Flag: 13" -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd/fuzzydel
+cat ${FILE} | /usr/bin/curl -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd/learnspam
+cat ${FILE} | /usr/bin/curl -H "Flag: 11" -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd/fuzzyadd
+
+exit 0
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/sa-rules.sh b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/sa-rules.sh
new file mode 100755
index 0000000..1bfc8cc
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/sa-rules.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+
+# Create temp directories
+[[ ! -d /tmp/sa-rules-heinlein ]] && mkdir -p /tmp/sa-rules-heinlein
+#[[ ! -d /tmp/sa-rules-schaal ]] && mkdir -p /tmp/sa-rules-schaal
+
+# Hash current SA rules
+if [[ ! -f /etc/rspamd/custom/sa-rules ]]; then
+ HASH_SA_RULES=0
+else
+ HASH_SA_RULES=$(cat /etc/rspamd/custom/sa-rules | md5sum | cut -d' ' -f1)
+fi
+
+# Deploy
+## Heinlein
+curl --connect-timeout 15 --retry 10 --max-time 30 http://www.spamassassin.heinlein-support.de/$(dig txt 1.4.3.spamassassin.heinlein-support.de +short | tr -d '"').tar.gz --output /tmp/sa-rules-heinlein.tar.gz
+if gzip -t /tmp/sa-rules-heinlein.tar.gz; then
+ tar xfvz /tmp/sa-rules-heinlein.tar.gz -C /tmp/sa-rules-heinlein
+ cat /tmp/sa-rules-heinlein/*cf > /etc/rspamd/custom/sa-rules
+fi
+## Schaal
+#curl --connect-timeout 15 --max-time 30 http://sa.schaal-it.net/$(dig txt 1.4.3.sa.schaal-it.net +short | tr -d '"').tar.gz --output /tmp/sa-rules-schaal.tar.gz
+#if gzip -t /tmp/sa-rules-schaal.tar.gz; then
+# tar xfvz /tmp/sa-rules-schaal.tar.gz -C /tmp/sa-rules-schaal
+# # Append, do not overwrite
+# cat /tmp/sa-rules-schaal/*cf >> /etc/rspamd/custom/sa-rules
+#fi
+
+sed -i -e 's/\([^\\]\)\$\([^\/]\)/\1\\$\2/g' /etc/rspamd/custom/sa-rules
+
+if [[ "$(cat /etc/rspamd/custom/sa-rules | md5sum | cut -d' ' -f1)" != "${HASH_SA_RULES}" ]]; then
+ CONTAINER_NAME=rspamd-mailcow
+ CONTAINER_ID=$(curl --silent --insecure https://dockerapi/containers/json | \
+ jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | \
+ jq -rc "select( .name | tostring | contains(\"${CONTAINER_NAME}\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id")
+ if [[ ! -z ${CONTAINER_ID} ]]; then
+ curl --silent --insecure -XPOST --connect-timeout 15 --max-time 120 https://dockerapi/containers/${CONTAINER_ID}/restart
+ fi
+fi
+
+# Cleanup
+rm -rf /tmp/sa-rules-heinlein /tmp/sa-rules-heinlein.tar.gz
+#rm -rf /tmp/sa-rules-schaal /tmp/sa-rules-schaal.tar.gz
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/stop-supervisor.sh b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/stop-supervisor.sh
new file mode 100755
index 0000000..5394490
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/stop-supervisor.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+printf "READY\n";
+
+while read line; do
+ echo "Processing Event: $line" >&2;
+ kill -3 $(cat "/var/run/supervisord.pid")
+done < /dev/stdin
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/supervisord.conf b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/supervisord.conf
new file mode 100644
index 0000000..2d91b55
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/supervisord.conf
@@ -0,0 +1,24 @@
+[supervisord]
+nodaemon=true
+user=root
+pidfile=/var/run/supervisord.pid
+
+[program:syslog-ng]
+command=/usr/sbin/syslog-ng --foreground --no-caps
+stdout_logfile=/dev/stdout
+stdout_logfile_maxbytes=0
+stderr_logfile=/dev/stderr
+stderr_logfile_maxbytes=0
+autostart=true
+
+[program:dovecot]
+command=/usr/sbin/dovecot -F
+autorestart=true
+
+[program:cron]
+command=/usr/sbin/cron -f
+autorestart=true
+
+[eventlistener:processes]
+command=/usr/local/sbin/stop-supervisor.sh
+events=PROCESS_STATE_STOPPED, PROCESS_STATE_EXITED, PROCESS_STATE_FATAL
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/syslog-ng-redis_slave.conf b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/syslog-ng-redis_slave.conf
new file mode 100644
index 0000000..335cbfe
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/syslog-ng-redis_slave.conf
@@ -0,0 +1,45 @@
+@version: 3.19
+@include "scl.conf"
+options {
+ chain_hostnames(off);
+ flush_lines(0);
+ use_dns(no);
+ use_fqdn(no);
+ owner("root"); group("adm"); perm(0640);
+ stats_freq(0);
+ bad_hostname("^gconfd$");
+};
+source s_src {
+ unix-stream("/dev/log");
+ internal();
+};
+destination d_stdout { pipe("/dev/stdout"); };
+destination d_redis_ui_log {
+ redis(
+ host("`REDIS_SLAVEOF_IP`")
+ persist-name("redis1")
+ port(`REDIS_SLAVEOF_PORT`)
+ command("LPUSH" "DOVECOT_MAILLOG" "$(format-json time=\"$S_UNIXTIME\" priority=\"$PRIORITY\" program=\"$PROGRAM\" message=\"$MESSAGE\")\n")
+ );
+};
+destination d_redis_f2b_channel {
+ redis(
+ host("`REDIS_SLAVEOF_IP`")
+ persist-name("redis2")
+ port(`REDIS_SLAVEOF_PORT`)
+ command("PUBLISH" "F2B_CHANNEL" "$MESSAGE")
+ );
+};
+filter f_mail { facility(mail); };
+filter f_replica {
+ not match("User has no mail_replica in userdb" value("MESSAGE"));
+ not match("Error: sync: Unknown user in remote" value("MESSAGE"));
+};
+log {
+ source(s_src);
+ filter(f_replica);
+ destination(d_stdout);
+ filter(f_mail);
+ destination(d_redis_ui_log);
+ destination(d_redis_f2b_channel);
+};
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/syslog-ng.conf b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/syslog-ng.conf
new file mode 100644
index 0000000..f0489ea
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/syslog-ng.conf
@@ -0,0 +1,45 @@
+@version: 3.19
+@include "scl.conf"
+options {
+ chain_hostnames(off);
+ flush_lines(0);
+ use_dns(no);
+ use_fqdn(no);
+ owner("root"); group("adm"); perm(0640);
+ stats_freq(0);
+ bad_hostname("^gconfd$");
+};
+source s_src {
+ unix-stream("/dev/log");
+ internal();
+};
+destination d_stdout { pipe("/dev/stdout"); };
+destination d_redis_ui_log {
+ redis(
+ host("redis-mailcow")
+ persist-name("redis1")
+ port(6379)
+ command("LPUSH" "DOVECOT_MAILLOG" "$(format-json time=\"$S_UNIXTIME\" priority=\"$PRIORITY\" program=\"$PROGRAM\" message=\"$MESSAGE\")\n")
+ );
+};
+destination d_redis_f2b_channel {
+ redis(
+ host("redis-mailcow")
+ persist-name("redis2")
+ port(6379)
+ command("PUBLISH" "F2B_CHANNEL" "$MESSAGE")
+ );
+};
+filter f_mail { facility(mail); };
+filter f_replica {
+ not match("User has no mail_replica in userdb" value("MESSAGE"));
+ not match("Error: sync: Unknown user in remote" value("MESSAGE"));
+};
+log {
+ source(s_src);
+ filter(f_replica);
+ destination(d_stdout);
+ filter(f_mail);
+ destination(d_redis_ui_log);
+ destination(d_redis_f2b_channel);
+};
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/trim_logs.sh b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/trim_logs.sh
new file mode 100755
index 0000000..2993a4c
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/trim_logs.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+catch_non_zero() {
+ CMD=${1}
+ ${CMD} > /dev/null
+ EC=$?
+ if [ ${EC} -ne 0 ]; then
+ echo "Command ${CMD} failed to execute, exit code was ${EC}"
+ fi
+}
+source /source_env.sh
+# Do not attempt to write to slave
+if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
+ REDIS_CMDLINE="redis-cli -h ${REDIS_SLAVEOF_IP} -p ${REDIS_SLAVEOF_PORT}"
+else
+ REDIS_CMDLINE="redis-cli -h redis -p 6379"
+fi
+catch_non_zero "${REDIS_CMDLINE} LTRIM ACME_LOG 0 __LOG_LINES__"
+catch_non_zero "${REDIS_CMDLINE} LTRIM POSTFIX_MAILLOG 0 __LOG_LINES__"
+catch_non_zero "${REDIS_CMDLINE} LTRIM DOVECOT_MAILLOG 0 __LOG_LINES__"
+catch_non_zero "${REDIS_CMDLINE} LTRIM SOGO_LOG 0 __LOG_LINES__"
+catch_non_zero "${REDIS_CMDLINE} LTRIM NETFILTER_LOG 0 __LOG_LINES__"
+catch_non_zero "${REDIS_CMDLINE} LTRIM AUTODISCOVER_LOG 0 __LOG_LINES__"
+catch_non_zero "${REDIS_CMDLINE} LTRIM API_LOG 0 __LOG_LINES__"
+catch_non_zero "${REDIS_CMDLINE} LTRIM RL_LOG 0 __LOG_LINES__"
+catch_non_zero "${REDIS_CMDLINE} LTRIM WATCHDOG_LOG 0 __LOG_LINES__"
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/netfilter/Dockerfile b/mailcow/src/mailcow-dockerized/data/Dockerfiles/netfilter/Dockerfile
new file mode 100644
index 0000000..42aafd4
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/netfilter/Dockerfile
@@ -0,0 +1,15 @@
+FROM alpine:3.11
+LABEL maintainer "Andre Peters <andre.peters@servercow.de>"
+
+ENV XTABLES_LIBDIR /usr/lib/xtables
+ENV PYTHON_IPTABLES_XTABLES_VERSION 12
+ENV IPTABLES_LIBDIR /usr/lib
+
+RUN apk add --virtual .build-deps gcc python3-dev libffi-dev openssl-dev \
+ && apk add -U python3 iptables ip6tables tzdata musl-dev \
+ && pip3 install --upgrade pip python-iptables redis ipaddress dnspython \
+# && pip3 install --upgrade pip python-iptables==0.13.0 redis ipaddress dnspython \
+ && apk del .build-deps
+
+COPY server.py /
+CMD ["python3", "-u", "/server.py"]
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/netfilter/server.py b/mailcow/src/mailcow-dockerized/data/Dockerfiles/netfilter/server.py
new file mode 100644
index 0000000..36565db
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/netfilter/server.py
@@ -0,0 +1,557 @@
+#!/usr/bin/env python3
+
+import re
+import os
+import time
+import atexit
+import signal
+import ipaddress
+from collections import Counter
+from random import randint
+from threading import Thread
+from threading import Lock
+import redis
+import json
+import iptc
+import dns.resolver
+import dns.exception
+
+while True:
+ try:
+ redis_slaveof_ip = os.getenv('REDIS_SLAVEOF_IP', '')
+ redis_slaveof_port = os.getenv('REDIS_SLAVEOF_PORT', '')
+ if "".__eq__(redis_slaveof_ip):
+ r = redis.StrictRedis(host=os.getenv('IPV4_NETWORK', '172.22.1') + '.249', decode_responses=True, port=6379, db=0)
+ else:
+ r = redis.StrictRedis(host=redis_slaveof_ip, decode_responses=True, port=redis_slaveof_port, db=0)
+ r.ping()
+ except Exception as ex:
+ print('%s - trying again in 3 seconds' % (ex))
+ time.sleep(3)
+ else:
+ break
+
+pubsub = r.pubsub()
+
+WHITELIST = []
+BLACKLIST= []
+
+bans = {}
+
+quit_now = False
+lock = Lock()
+
+def log(priority, message):
+ tolog = {}
+ tolog['time'] = int(round(time.time()))
+ tolog['priority'] = priority
+ tolog['message'] = message
+ r.lpush('NETFILTER_LOG', json.dumps(tolog, ensure_ascii=False))
+ print(message)
+
+def logWarn(message):
+ log('warn', message)
+
+def logCrit(message):
+ log('crit', message)
+
+def logInfo(message):
+ log('info', message)
+
+def refreshF2boptions():
+ global f2boptions
+ global quit_now
+ if not r.get('F2B_OPTIONS'):
+ f2boptions = {}
+ f2boptions['ban_time'] = int
+ f2boptions['max_attempts'] = int
+ f2boptions['retry_window'] = int
+ f2boptions['netban_ipv4'] = int
+ f2boptions['netban_ipv6'] = int
+ f2boptions['ban_time'] = r.get('F2B_BAN_TIME') or 1800
+ f2boptions['max_attempts'] = r.get('F2B_MAX_ATTEMPTS') or 10
+ f2boptions['retry_window'] = r.get('F2B_RETRY_WINDOW') or 600
+ f2boptions['netban_ipv4'] = r.get('F2B_NETBAN_IPV4') or 32
+ f2boptions['netban_ipv6'] = r.get('F2B_NETBAN_IPV6') or 128
+ r.set('F2B_OPTIONS', json.dumps(f2boptions, ensure_ascii=False))
+ else:
+ try:
+ f2boptions = {}
+ f2boptions = json.loads(r.get('F2B_OPTIONS'))
+ except ValueError:
+ print('Error loading F2B options: F2B_OPTIONS is not json')
+ quit_now = True
+
+def refreshF2bregex():
+ global f2bregex
+ global quit_now
+ if not r.get('F2B_REGEX'):
+ f2bregex = {}
+ f2bregex[1] = 'warning: .*\[([0-9a-f\.:]+)\]: SASL .+ authentication failed'
+ f2bregex[2] = '-login: Disconnected \(auth failed, .+\): user=.*, method=.+, rip=([0-9a-f\.:]+),'
+ f2bregex[3] = '-login: Aborted login \(tried to use disallowed .+\): user=.+, rip=([0-9a-f\.:]+), lip.+'
+ f2bregex[4] = 'SOGo.+ Login from \'([0-9a-f\.:]+)\' for user .+ might not have worked'
+ f2bregex[5] = 'mailcow UI: Invalid password for .+ by ([0-9a-f\.:]+)'
+ f2bregex[6] = '([0-9a-f\.:]+) \"GET \/SOGo\/.* HTTP.+\" 403 .+'
+ f2bregex[7] = 'Rspamd UI: Invalid password by ([0-9a-f\.:]+)'
+ f2bregex[8] = '-login: Aborted login \(auth failed .+\): user=.+, rip=([0-9a-f\.:]+), lip.+'
+ r.set('F2B_REGEX', json.dumps(f2bregex, ensure_ascii=False))
+ else:
+ try:
+ f2bregex = {}
+ f2bregex = json.loads(r.get('F2B_REGEX'))
+ except ValueError:
+ print('Error loading F2B options: F2B_REGEX is not json')
+ quit_now = True
+
+if r.exists('F2B_LOG'):
+ r.rename('F2B_LOG', 'NETFILTER_LOG')
+
+def mailcowChainOrder():
+ global lock
+ global quit_now
+ while not quit_now:
+ time.sleep(10)
+ with lock:
+ filter4_table = iptc.Table(iptc.Table.FILTER)
+ filter6_table = iptc.Table6(iptc.Table6.FILTER)
+ filter4_table.refresh()
+ filter6_table.refresh()
+ for f in [filter4_table, filter6_table]:
+ forward_chain = iptc.Chain(f, 'FORWARD')
+ input_chain = iptc.Chain(f, 'INPUT')
+ for chain in [forward_chain, input_chain]:
+ target_found = False
+ for position, item in enumerate(chain.rules):
+ if item.target.name == 'MAILCOW':
+ target_found = True
+ if position > 2:
+ logCrit('Error in %s chain order: MAILCOW on position %d, restarting container' % (chain.name, position))
+ quit_now = True
+ if not target_found:
+ logCrit('Error in %s chain: MAILCOW target not found, restarting container' % (chain.name))
+ quit_now = True
+
+def ban(address):
+ global lock
+ refreshF2boptions()
+ BAN_TIME = int(f2boptions['ban_time'])
+ MAX_ATTEMPTS = int(f2boptions['max_attempts'])
+ RETRY_WINDOW = int(f2boptions['retry_window'])
+ NETBAN_IPV4 = '/' + str(f2boptions['netban_ipv4'])
+ NETBAN_IPV6 = '/' + str(f2boptions['netban_ipv6'])
+
+ ip = ipaddress.ip_address(address)
+ if type(ip) is ipaddress.IPv6Address and ip.ipv4_mapped:
+ ip = ip.ipv4_mapped
+ address = str(ip)
+ if ip.is_private or ip.is_loopback:
+ return
+
+ self_network = ipaddress.ip_network(address)
+
+ with lock:
+ temp_whitelist = set(WHITELIST)
+
+ if temp_whitelist:
+ for wl_key in temp_whitelist:
+ wl_net = ipaddress.ip_network(wl_key, False)
+ if wl_net.overlaps(self_network):
+ logInfo('Address %s is whitelisted by rule %s' % (self_network, wl_net))
+ return
+
+ net = ipaddress.ip_network((address + (NETBAN_IPV4 if type(ip) is ipaddress.IPv4Address else NETBAN_IPV6)), strict=False)
+ net = str(net)
+
+ if not net in bans or time.time() - bans[net]['last_attempt'] > RETRY_WINDOW:
+ bans[net] = { 'attempts': 0 }
+ active_window = RETRY_WINDOW
+ else:
+ active_window = time.time() - bans[net]['last_attempt']
+
+ bans[net]['attempts'] += 1
+ bans[net]['last_attempt'] = time.time()
+
+ active_window = time.time() - bans[net]['last_attempt']
+
+ if bans[net]['attempts'] >= MAX_ATTEMPTS:
+ cur_time = int(round(time.time()))
+ logCrit('Banning %s for %d minutes' % (net, BAN_TIME / 60))
+ if type(ip) is ipaddress.IPv4Address:
+ with lock:
+ chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), 'MAILCOW')
+ rule = iptc.Rule()
+ rule.src = net
+ target = iptc.Target(rule, "REJECT")
+ rule.target = target
+ if rule not in chain.rules:
+ chain.insert_rule(rule)
+ else:
+ with lock:
+ chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), 'MAILCOW')
+ rule = iptc.Rule6()
+ rule.src = net
+ target = iptc.Target(rule, "REJECT")
+ rule.target = target
+ if rule not in chain.rules:
+ chain.insert_rule(rule)
+ r.hset('F2B_ACTIVE_BANS', '%s' % net, cur_time + BAN_TIME)
+ else:
+ logWarn('%d more attempts in the next %d seconds until %s is banned' % (MAX_ATTEMPTS - bans[net]['attempts'], RETRY_WINDOW, net))
+
+def unban(net):
+ global lock
+ if not net in bans:
+ logInfo('%s is not banned, skipping unban and deleting from queue (if any)' % net)
+ r.hdel('F2B_QUEUE_UNBAN', '%s' % net)
+ return
+ logInfo('Unbanning %s' % net)
+ if type(ipaddress.ip_network(net)) is ipaddress.IPv4Network:
+ with lock:
+ chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), 'MAILCOW')
+ rule = iptc.Rule()
+ rule.src = net
+ target = iptc.Target(rule, "REJECT")
+ rule.target = target
+ if rule in chain.rules:
+ chain.delete_rule(rule)
+ else:
+ with lock:
+ chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), 'MAILCOW')
+ rule = iptc.Rule6()
+ rule.src = net
+ target = iptc.Target(rule, "REJECT")
+ rule.target = target
+ if rule in chain.rules:
+ chain.delete_rule(rule)
+ r.hdel('F2B_ACTIVE_BANS', '%s' % net)
+ r.hdel('F2B_QUEUE_UNBAN', '%s' % net)
+ if net in bans:
+ del bans[net]
+
+def permBan(net, unban=False):
+ global lock
+ if type(ipaddress.ip_network(net, strict=False)) is ipaddress.IPv4Network:
+ with lock:
+ chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), 'MAILCOW')
+ rule = iptc.Rule()
+ rule.src = net
+ target = iptc.Target(rule, "REJECT")
+ rule.target = target
+ if rule not in chain.rules and not unban:
+ logCrit('Add host/network %s to blacklist' % net)
+ chain.insert_rule(rule)
+ r.hset('F2B_PERM_BANS', '%s' % net, int(round(time.time())))
+ elif rule in chain.rules and unban:
+ logCrit('Remove host/network %s from blacklist' % net)
+ chain.delete_rule(rule)
+ r.hdel('F2B_PERM_BANS', '%s' % net)
+ else:
+ with lock:
+ chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), 'MAILCOW')
+ rule = iptc.Rule6()
+ rule.src = net
+ target = iptc.Target(rule, "REJECT")
+ rule.target = target
+ if rule not in chain.rules and not unban:
+ logCrit('Add host/network %s to blacklist' % net)
+ chain.insert_rule(rule)
+ r.hset('F2B_PERM_BANS', '%s' % net, int(round(time.time())))
+ elif rule in chain.rules and unban:
+ logCrit('Remove host/network %s from blacklist' % net)
+ chain.delete_rule(rule)
+ r.hdel('F2B_PERM_BANS', '%s' % net)
+
+def quit(signum, frame):
+ global quit_now
+ quit_now = True
+
+def clear():
+ global lock
+ logInfo('Clearing all bans')
+ for net in bans.copy():
+ unban(net)
+ with lock:
+ filter4_table = iptc.Table(iptc.Table.FILTER)
+ filter6_table = iptc.Table6(iptc.Table6.FILTER)
+ for filter_table in [filter4_table, filter6_table]:
+ filter_table.autocommit = False
+ forward_chain = iptc.Chain(filter_table, "FORWARD")
+ input_chain = iptc.Chain(filter_table, "INPUT")
+ mailcow_chain = iptc.Chain(filter_table, "MAILCOW")
+ if mailcow_chain in filter_table.chains:
+ for rule in mailcow_chain.rules:
+ mailcow_chain.delete_rule(rule)
+ for rule in forward_chain.rules:
+ if rule.target.name == 'MAILCOW':
+ forward_chain.delete_rule(rule)
+ for rule in input_chain.rules:
+ if rule.target.name == 'MAILCOW':
+ input_chain.delete_rule(rule)
+ filter_table.delete_chain("MAILCOW")
+ filter_table.commit()
+ filter_table.refresh()
+ filter_table.autocommit = True
+ r.delete('F2B_ACTIVE_BANS')
+ r.delete('F2B_PERM_BANS')
+ pubsub.unsubscribe()
+
+def watch():
+ logInfo('Watching Redis channel F2B_CHANNEL')
+ pubsub.subscribe('F2B_CHANNEL')
+
+ while not quit_now:
+ for item in pubsub.listen():
+ refreshF2bregex()
+ for rule_id, rule_regex in f2bregex.items():
+ if item['data'] and item['type'] == 'message':
+ try:
+ result = re.search(rule_regex, item['data'])
+ except re.error:
+ result = False
+ if result:
+ addr = result.group(1)
+ ip = ipaddress.ip_address(addr)
+ if ip.is_private or ip.is_loopback:
+ continue
+ logWarn('%s matched rule id %s (%s)' % (addr, rule_id, item['data']))
+ ban(addr)
+
+def snat4(snat_target):
+ global lock
+ global quit_now
+
+ def get_snat4_rule():
+ rule = iptc.Rule()
+ rule.src = os.getenv('IPV4_NETWORK', '172.22.1') + '.0/24'
+ rule.dst = '!' + rule.src
+ target = rule.create_target("SNAT")
+ target.to_source = snat_target
+ return rule
+
+ while not quit_now:
+ time.sleep(10)
+ with lock:
+ try:
+ table = iptc.Table('nat')
+ table.refresh()
+ chain = iptc.Chain(table, 'POSTROUTING')
+ table.autocommit = False
+ if get_snat4_rule() not in chain.rules:
+ logCrit('Added POSTROUTING rule for source network %s to SNAT target %s' % (get_snat4_rule().src, snat_target))
+ chain.insert_rule(get_snat4_rule())
+ table.commit()
+ else:
+ for position, item in enumerate(chain.rules):
+ if item == get_snat4_rule():
+ if position != 0:
+ chain.delete_rule(get_snat4_rule())
+ table.commit()
+ table.autocommit = True
+ except:
+ print('Error running SNAT4, retrying...')
+
+def snat6(snat_target):
+ global lock
+ global quit_now
+
+ def get_snat6_rule():
+ rule = iptc.Rule6()
+ rule.src = os.getenv('IPV6_NETWORK', 'fd4d:6169:6c63:6f77::/64')
+ rule.dst = '!' + rule.src
+ target = rule.create_target("SNAT")
+ target.to_source = snat_target
+ return rule
+
+ while not quit_now:
+ time.sleep(10)
+ with lock:
+ try:
+ table = iptc.Table6('nat')
+ table.refresh()
+ chain = iptc.Chain(table, 'POSTROUTING')
+ table.autocommit = False
+ if get_snat6_rule() not in chain.rules:
+ logInfo('Added POSTROUTING rule for source network %s to SNAT target %s' % (get_snat6_rule().src, snat_target))
+ chain.insert_rule(get_snat6_rule())
+ table.commit()
+ else:
+ for position, item in enumerate(chain.rules):
+ if item == get_snat6_rule():
+ if position != 0:
+ chain.delete_rule(get_snat6_rule())
+ table.commit()
+ table.autocommit = True
+ except:
+ print('Error running SNAT6, retrying...')
+
+def autopurge():
+ while not quit_now:
+ time.sleep(10)
+ refreshF2boptions()
+ BAN_TIME = int(f2boptions['ban_time'])
+ MAX_ATTEMPTS = int(f2boptions['max_attempts'])
+ QUEUE_UNBAN = r.hgetall('F2B_QUEUE_UNBAN')
+ if QUEUE_UNBAN:
+ for net in QUEUE_UNBAN:
+ unban(str(net))
+ for net in bans.copy():
+ if bans[net]['attempts'] >= MAX_ATTEMPTS:
+ if time.time() - bans[net]['last_attempt'] > BAN_TIME:
+ unban(net)
+
+def isIpNetwork(address):
+ try:
+ ipaddress.ip_network(address, False)
+ except ValueError:
+ return False
+ return True
+
+
+def genNetworkList(list):
+ resolver = dns.resolver.Resolver()
+ hostnames = []
+ networks = []
+ for key in list:
+ if isIpNetwork(key):
+ networks.append(key)
+ else:
+ hostnames.append(key)
+ for hostname in hostnames:
+ hostname_ips = []
+ for rdtype in ['A', 'AAAA']:
+ try:
+ answer = resolver.resolve(qname=hostname, rdtype=rdtype, lifetime=3)
+ except dns.exception.Timeout:
+ logInfo('Hostname %s timedout on resolve' % hostname)
+ break
+ except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
+ continue
+ except dns.exception.DNSException as dnsexception:
+ logInfo('%s' % dnsexception)
+ continue
+ for rdata in answer:
+ hostname_ips.append(rdata.to_text())
+ networks.extend(hostname_ips)
+ return set(networks)
+
+def whitelistUpdate():
+ global lock
+ global quit_now
+ global WHITELIST
+ while not quit_now:
+ start_time = time.time()
+ list = r.hgetall('F2B_WHITELIST')
+ new_whitelist = []
+ if list:
+ new_whitelist = genNetworkList(list)
+ with lock:
+ if Counter(new_whitelist) != Counter(WHITELIST):
+ WHITELIST = new_whitelist
+ logInfo('Whitelist was changed, it has %s entries' % len(WHITELIST))
+ time.sleep(60.0 - ((time.time() - start_time) % 60.0))
+
+def blacklistUpdate():
+ global quit_now
+ global BLACKLIST
+ while not quit_now:
+ start_time = time.time()
+ list = r.hgetall('F2B_BLACKLIST')
+ new_blacklist = []
+ if list:
+ new_blacklist = genNetworkList(list)
+ if Counter(new_blacklist) != Counter(BLACKLIST):
+ addban = set(new_blacklist).difference(BLACKLIST)
+ delban = set(BLACKLIST).difference(new_blacklist)
+ BLACKLIST = new_blacklist
+ logInfo('Blacklist was changed, it has %s entries' % len(BLACKLIST))
+ if addban:
+ for net in addban:
+ permBan(net=net)
+ if delban:
+ for net in delban:
+ permBan(net=net, unban=True)
+ time.sleep(60.0 - ((time.time() - start_time) % 60.0))
+
+def initChain():
+ # Is called before threads start, no locking
+ print("Initializing mailcow netfilter chain")
+ # IPv4
+ if not iptc.Chain(iptc.Table(iptc.Table.FILTER), "MAILCOW") in iptc.Table(iptc.Table.FILTER).chains:
+ iptc.Table(iptc.Table.FILTER).create_chain("MAILCOW")
+ for c in ['FORWARD', 'INPUT']:
+ chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), c)
+ rule = iptc.Rule()
+ rule.src = '0.0.0.0/0'
+ rule.dst = '0.0.0.0/0'
+ target = iptc.Target(rule, "MAILCOW")
+ rule.target = target
+ if rule not in chain.rules:
+ chain.insert_rule(rule)
+ # IPv6
+ if not iptc.Chain(iptc.Table6(iptc.Table6.FILTER), "MAILCOW") in iptc.Table6(iptc.Table6.FILTER).chains:
+ iptc.Table6(iptc.Table6.FILTER).create_chain("MAILCOW")
+ for c in ['FORWARD', 'INPUT']:
+ chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), c)
+ rule = iptc.Rule6()
+ rule.src = '::/0'
+ rule.dst = '::/0'
+ target = iptc.Target(rule, "MAILCOW")
+ rule.target = target
+ if rule not in chain.rules:
+ chain.insert_rule(rule)
+
+if __name__ == '__main__':
+
+ # In case a previous session was killed without cleanup
+ clear()
+ # Reinit MAILCOW chain
+ initChain()
+
+ watch_thread = Thread(target=watch)
+ watch_thread.daemon = True
+ watch_thread.start()
+
+ if os.getenv('SNAT_TO_SOURCE') and os.getenv('SNAT_TO_SOURCE') != 'n':
+ try:
+ snat_ip = os.getenv('SNAT_TO_SOURCE')
+ snat_ipo = ipaddress.ip_address(snat_ip)
+ if type(snat_ipo) is ipaddress.IPv4Address:
+ snat4_thread = Thread(target=snat4,args=(snat_ip,))
+ snat4_thread.daemon = True
+ snat4_thread.start()
+ except ValueError:
+ print(os.getenv('SNAT_TO_SOURCE') + ' is not a valid IPv4 address')
+
+ if os.getenv('SNAT6_TO_SOURCE') and os.getenv('SNAT6_TO_SOURCE') != 'n':
+ try:
+ snat_ip = os.getenv('SNAT6_TO_SOURCE')
+ snat_ipo = ipaddress.ip_address(snat_ip)
+ if type(snat_ipo) is ipaddress.IPv6Address:
+ snat6_thread = Thread(target=snat6,args=(snat_ip,))
+ snat6_thread.daemon = True
+ snat6_thread.start()
+ except ValueError:
+ print(os.getenv('SNAT6_TO_SOURCE') + ' is not a valid IPv6 address')
+
+ autopurge_thread = Thread(target=autopurge)
+ autopurge_thread.daemon = True
+ autopurge_thread.start()
+
+ mailcowchainwatch_thread = Thread(target=mailcowChainOrder)
+ mailcowchainwatch_thread.daemon = True
+ mailcowchainwatch_thread.start()
+
+ blacklistupdate_thread = Thread(target=blacklistUpdate)
+ blacklistupdate_thread.daemon = True
+ blacklistupdate_thread.start()
+
+ whitelistupdate_thread = Thread(target=whitelistUpdate)
+ whitelistupdate_thread.daemon = True
+ whitelistupdate_thread.start()
+
+ signal.signal(signal.SIGTERM, quit)
+ atexit.register(clear)
+
+ while not quit_now:
+ time.sleep(0.5)
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/olefy/Dockerfile b/mailcow/src/mailcow-dockerized/data/Dockerfiles/olefy/Dockerfile
new file mode 100644
index 0000000..05ffcc2
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/olefy/Dockerfile
@@ -0,0 +1,21 @@
+FROM alpine:3.11
+LABEL maintainer "Andre Peters <andre.peters@servercow.de>"
+
+WORKDIR /app
+
+#RUN addgroup -S olefy && adduser -S olefy -G olefy \
+RUN apk add --virtual .build-deps gcc python3-dev musl-dev libffi-dev openssl-dev \
+ && apk add --update --no-cache python3 openssl tzdata libmagic \
+ && pip3 install --upgrade pip \
+ && pip3 install --upgrade asyncio python-magic \
+ && pip3 install --upgrade https://github.com/HeinleinSupport/oletools/archive/master.zip \
+ && apk del .build-deps
+# && sed -i 's/decompress_stream(bytearray(compressed_code))/bytes2str(decompress_stream(bytearray(compressed_code)))/g' /usr/lib/python3.8/site-packages/oletools/olevba.py
+
+ADD https://raw.githubusercontent.com/HeinleinSupport/olefy/master/olefy.py /app/
+
+RUN chown -R nobody:nobody /app /tmp
+
+USER nobody
+
+CMD ["python3", "-u", "/app/olefy.py"]
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/phpfpm/Dockerfile b/mailcow/src/mailcow-dockerized/data/Dockerfiles/phpfpm/Dockerfile
new file mode 100644
index 0000000..5a2d578
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/phpfpm/Dockerfile
@@ -0,0 +1,91 @@
+FROM php:7.4-fpm-alpine3.11
+LABEL maintainer "Andre Peters <andre.peters@servercow.de>"
+
+ENV APCU_PECL 5.1.18
+ENV IMAGICK_PECL 3.4.4
+# Mailparse is pulled from master branch
+#ENV MAILPARSE_PECL 3.0.2
+ENV MEMCACHED_PECL 3.1.5
+ENV REDIS_PECL 5.3.1
+
+RUN apk add -U --no-cache autoconf \
+ aspell-dev \
+ aspell-libs \
+ bash \
+ c-client \
+ cyrus-sasl-dev \
+ freetype \
+ freetype-dev \
+ g++ \
+ git \
+ gettext-dev \
+ gmp-dev \
+ gnupg \
+ icu-dev \
+ icu-libs \
+ imagemagick \
+ imagemagick-dev \
+ imap-dev \
+ jq \
+ libjpeg-turbo \
+ libjpeg-turbo-dev \
+ libmemcached-dev \
+ libpng \
+ libpng-dev \
+ libressl \
+ libressl-dev \
+ librsvg \
+ libtool \
+ libwebp-dev \
+ libxml2-dev \
+ libxpm-dev \
+ libzip-dev \
+ make \
+ mysql-client \
+ openldap-dev \
+ pcre-dev \
+ re2c \
+ redis \
+ samba-client \
+ zlib-dev \
+ tzdata \
+ && git clone https://github.com/php/pecl-mail-mailparse \
+ && cd pecl-mail-mailparse \
+ && pecl install package.xml \
+ && cd .. \
+ && rm -r pecl-mail-mailparse \
+ && pecl install redis-${REDIS_PECL} memcached-${MEMCACHED_PECL} APCu-${APCU_PECL} imagick-${IMAGICK_PECL} \
+ && docker-php-ext-enable apcu imagick memcached mailparse redis \
+ && pecl clear-cache \
+ && docker-php-ext-configure intl \
+ && docker-php-ext-configure exif \
+ && docker-php-ext-configure gd --with-freetype=/usr/include/ \
+ --with-jpeg=/usr/include/ \
+ && docker-php-ext-install -j 4 exif gd gettext intl ldap opcache pcntl pdo pdo_mysql pspell soap sockets xmlrpc zip bcmath gmp \
+ && docker-php-ext-configure imap --with-imap --with-imap-ssl \
+ && docker-php-ext-install -j 4 imap \
+ && curl --silent --show-error https://getcomposer.org/installer | php \
+ && mv composer.phar /usr/local/bin/composer \
+ && chmod +x /usr/local/bin/composer \
+ && apk del --purge autoconf \
+ aspell-dev \
+ cyrus-sasl-dev \
+ freetype-dev \
+ g++ \
+ icu-dev \
+ imagemagick-dev \
+ imap-dev \
+ libjpeg-turbo-dev \
+ libpng-dev \
+ libressl-dev \
+ libwebp-dev \
+ libxml2-dev \
+ make \
+ pcre-dev \
+ zlib-dev
+
+COPY ./docker-entrypoint.sh /
+
+ENTRYPOINT ["/docker-entrypoint.sh"]
+
+CMD ["php-fpm"]
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/phpfpm/docker-entrypoint.sh b/mailcow/src/mailcow-dockerized/data/Dockerfiles/phpfpm/docker-entrypoint.sh
new file mode 100755
index 0000000..80df768
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/phpfpm/docker-entrypoint.sh
@@ -0,0 +1,182 @@
+#!/bin/bash
+
+function array_by_comma { local IFS=","; echo "$*"; }
+
+# Wait for containers
+while ! mysqladmin status --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
+ echo "Waiting for SQL..."
+ sleep 2
+done
+
+# Do not attempt to write to slave
+if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
+ REDIS_CMDLINE="redis-cli -h ${REDIS_SLAVEOF_IP} -p ${REDIS_SLAVEOF_PORT}"
+else
+ REDIS_CMDLINE="redis-cli -h redis -p 6379"
+fi
+
+until [[ $(${REDIS_CMDLINE} PING) == "PONG" ]]; do
+ echo "Waiting for Redis..."
+ sleep 2
+done
+
+# Check mysql_upgrade (master and slave)
+CONTAINER_ID=
+until [[ ! -z "${CONTAINER_ID}" ]] && [[ "${CONTAINER_ID}" =~ ^[[:alnum:]]*$ ]]; do
+ CONTAINER_ID=$(curl --silent --insecure https://dockerapi/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" 2> /dev/null | jq -rc "select( .name | tostring | contains(\"mysql-mailcow\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id" 2> /dev/null)
+done
+echo "MySQL @ ${CONTAINER_ID}"
+SQL_LOOP_C=0
+SQL_CHANGED=0
+until [[ ${SQL_UPGRADE_STATUS} == 'success' ]]; do
+ if [ ${SQL_LOOP_C} -gt 4 ]; then
+ echo "Tried to upgrade MySQL and failed, giving up after ${SQL_LOOP_C} retries and starting container (oops, not good)"
+ break
+ fi
+ SQL_FULL_UPGRADE_RETURN=$(curl --silent --insecure -XPOST https://dockerapi/containers/${CONTAINER_ID}/exec -d '{"cmd":"system", "task":"mysql_upgrade"}' --silent -H 'Content-type: application/json')
+ SQL_UPGRADE_STATUS=$(echo ${SQL_FULL_UPGRADE_RETURN} | jq -r .type)
+ SQL_LOOP_C=$((SQL_LOOP_C+1))
+ echo "SQL upgrade iteration #${SQL_LOOP_C}"
+ if [[ ${SQL_UPGRADE_STATUS} == 'warning' ]]; then
+ SQL_CHANGED=1
+ echo "MySQL applied an upgrade, debug output:"
+ echo ${SQL_FULL_UPGRADE_RETURN}
+ sleep 3
+ while ! mysqladmin status --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
+ echo "Waiting for SQL to return, please wait"
+ sleep 2
+ done
+ continue
+ elif [[ ${SQL_UPGRADE_STATUS} == 'success' ]]; then
+ echo "MySQL is up-to-date - debug output:"
+ echo ${SQL_FULL_UPGRADE_RETURN}
+ else
+ echo "No valid reponse for mysql_upgrade was received, debug output:"
+ echo ${SQL_FULL_UPGRADE_RETURN}
+ fi
+done
+
+# doing post-installation stuff, if SQL was upgraded (master and slave)
+if [ ${SQL_CHANGED} -eq 1 ]; then
+ POSTFIX=$(curl --silent --insecure https://dockerapi/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" 2> /dev/null | jq -rc "select( .name | tostring | contains(\"postfix-mailcow\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id" 2> /dev/null)
+ if [[ -z "${POSTFIX}" ]] || ! [[ "${POSTFIX}" =~ ^[[:alnum:]]*$ ]]; then
+ echo "Could not determine Postfix container ID, skipping Postfix restart."
+ else
+ echo "Restarting Postfix"
+ curl -X POST --silent --insecure https://dockerapi/containers/${POSTFIX}/restart | jq -r '.msg'
+ echo "Sleeping 5 seconds..."
+ sleep 5
+ fi
+fi
+
+# Check mysql tz import (master and slave)
+TZ_CHECK=$(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT CONVERT_TZ('2019-11-02 23:33:00','Europe/Berlin','UTC') AS time;" -BN 2> /dev/null)
+if [[ -z ${TZ_CHECK} ]] || [[ "${TZ_CHECK}" == "NULL" ]]; then
+ SQL_FULL_TZINFO_IMPORT_RETURN=$(curl --silent --insecure -XPOST https://dockerapi/containers/${CONTAINER_ID}/exec -d '{"cmd":"system", "task":"mysql_tzinfo_to_sql"}' --silent -H 'Content-type: application/json')
+ echo "MySQL mysql_tzinfo_to_sql - debug output:"
+ echo ${SQL_FULL_TZINFO_IMPORT_RETURN}
+fi
+
+if [[ "${MASTER}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
+ echo "We are master, preparing..."
+ # Set a default release format
+ if [[ -z $(${REDIS_CMDLINE} --raw GET Q_RELEASE_FORMAT) ]]; then
+ ${REDIS_CMDLINE} --raw SET Q_RELEASE_FORMAT raw
+ fi
+
+ # Set max age of q items - if unset
+ if [[ -z $(${REDIS_CMDLINE} --raw GET Q_MAX_AGE) ]]; then
+ ${REDIS_CMDLINE} --raw SET Q_MAX_AGE 365
+ fi
+
+ # Trigger db init
+ echo "Running DB init..."
+ php -c /usr/local/etc/php -f /web/inc/init_db.inc.php
+
+ # Recreating domain map
+ echo "Rebuilding domain map in Redis..."
+ declare -a DOMAIN_ARR
+ ${REDIS_CMDLINE} DEL DOMAIN_MAP > /dev/null
+ while read line
+ do
+ DOMAIN_ARR+=("$line")
+ done < <(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT domain FROM domain" -Bs)
+ while read line
+ do
+ DOMAIN_ARR+=("$line")
+ done < <(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT alias_domain FROM alias_domain" -Bs)
+
+ if [[ ! -z ${DOMAIN_ARR} ]]; then
+ for domain in "${DOMAIN_ARR[@]}"; do
+ ${REDIS_CMDLINE} HSET DOMAIN_MAP ${domain} 1 > /dev/null
+ done
+ fi
+
+ # Set API options if env vars are not empty
+ if [[ ${API_ALLOW_FROM} != "invalid" ]] && [[ ! -z ${API_ALLOW_FROM} ]]; then
+ IFS=',' read -r -a API_ALLOW_FROM_ARR <<< "${API_ALLOW_FROM}"
+ declare -a VALIDATED_API_ALLOW_FROM_ARR
+ REGEX_IP6='^([0-9a-fA-F]{0,4}:){1,7}[0-9a-fA-F]{0,4}(/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8]))?$'
+ REGEX_IP4='^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+(/([0-9]|[1-2][0-9]|3[0-2]))?$'
+ for IP in "${API_ALLOW_FROM_ARR[@]}"; do
+ if [[ ${IP} =~ ${REGEX_IP6} ]] || [[ ${IP} =~ ${REGEX_IP4} ]]; then
+ VALIDATED_API_ALLOW_FROM_ARR+=("${IP}")
+ fi
+ done
+ VALIDATED_IPS=$(array_by_comma ${VALIDATED_API_ALLOW_FROM_ARR[*]})
+ if [[ ! -z ${VALIDATED_IPS} ]]; then
+ if [[ ${API_KEY} != "invalid" ]] && [[ ! -z ${API_KEY} ]]; then
+ mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} << EOF
+DELETE FROM api WHERE access = 'rw';
+INSERT INTO api (api_key, active, allow_from, access) VALUES ("${API_KEY}", "1", "${VALIDATED_IPS}", "rw");
+EOF
+ fi
+ if [[ ${API_KEY_READ_ONLY} != "invalid" ]] && [[ ! -z ${API_KEY_READ_ONLY} ]]; then
+ mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} << EOF
+DELETE FROM api WHERE access = 'ro';
+INSERT INTO api (api_key, active, allow_from, access) VALUES ("${API_KEY_READ_ONLY}", "1", "${VALIDATED_IPS}", "ro");
+EOF
+ fi
+ fi
+ fi
+
+ # Create events (master only, STATUS for event on slave will be SLAVESIDE_DISABLED)
+ mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} << EOF
+DROP EVENT IF EXISTS clean_spamalias;
+DELIMITER //
+CREATE EVENT clean_spamalias
+ON SCHEDULE EVERY 1 DAY DO
+BEGIN
+ DELETE FROM spamalias WHERE validity < UNIX_TIMESTAMP();
+END;
+//
+DELIMITER ;
+DROP EVENT IF EXISTS clean_oauth2;
+DELIMITER //
+CREATE EVENT clean_oauth2
+ON SCHEDULE EVERY 1 DAY DO
+BEGIN
+ DELETE FROM oauth_refresh_tokens WHERE expires < NOW();
+ DELETE FROM oauth_access_tokens WHERE expires < NOW();
+ DELETE FROM oauth_authorization_codes WHERE expires < NOW();
+END;
+//
+DELIMITER ;
+EOF
+fi
+
+# Create dummy for custom overrides of mailcow style
+[[ ! -f /web/css/build/0081-custom-mailcow.css ]] && echo '/* Autogenerated by mailcow */' > /web/css/build/0081-custom-mailcow.css
+
+# Fix permissions for global filters
+chown -R 82:82 /global_sieve/*
+
+# Run hooks
+for file in /hooks/*; do
+ if [ -x "${file}" ]; then
+ echo "Running hook ${file}"
+ "${file}"
+ fi
+done
+
+exec "$@"
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/Dockerfile b/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/Dockerfile
new file mode 100644
index 0000000..8b913af
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/Dockerfile
@@ -0,0 +1,64 @@
+FROM debian:buster-slim
+LABEL maintainer "Andre Peters <andre.peters@servercow.de>"
+
+ARG DEBIAN_FRONTEND=noninteractive
+ENV LC_ALL C
+
+RUN dpkg-divert --local --rename --add /sbin/initctl \
+ && ln -sf /bin/true /sbin/initctl \
+ && dpkg-divert --local --rename --add /usr/bin/ischroot \
+ && ln -sf /bin/true /usr/bin/ischroot
+
+# Add groups and users before installing Postfix to not break compatibility
+RUN groupadd -g 102 postfix \
+ && groupadd -g 103 postdrop \
+ && useradd -g postfix -u 101 -d /var/spool/postfix -s /usr/sbin/nologin postfix \
+ && apt-get update && apt-get install -y --no-install-recommends \
+ ca-certificates \
+ curl \
+ dirmngr \
+ dnsutils \
+ gnupg \
+ libsasl2-modules \
+ mariadb-client \
+ perl \
+ postfix \
+ postfix-mysql \
+ postfix-pcre \
+ redis-tools \
+ sasl2-bin \
+ sudo \
+ supervisor \
+ syslog-ng \
+ syslog-ng-core \
+ syslog-ng-mod-redis \
+ tzdata \
+ && rm -rf /var/lib/apt/lists/* \
+ && touch /etc/default/locale \
+ && printf '#!/bin/bash\n/usr/sbin/postconf -c /opt/postfix/conf "$@"' > /usr/local/sbin/postconf \
+ && chmod +x /usr/local/sbin/postconf
+
+COPY supervisord.conf /etc/supervisor/supervisord.conf
+COPY syslog-ng.conf /etc/syslog-ng/syslog-ng.conf
+COPY syslog-ng-redis_slave.conf /etc/syslog-ng/syslog-ng-redis_slave.conf
+COPY postfix.sh /opt/postfix.sh
+COPY rspamd-pipe-ham /usr/local/bin/rspamd-pipe-ham
+COPY rspamd-pipe-spam /usr/local/bin/rspamd-pipe-spam
+COPY whitelist_forwardinghosts.sh /usr/local/bin/whitelist_forwardinghosts.sh
+COPY smtpd_last_login.sh /usr/local/bin/smtpd_last_login.sh
+COPY stop-supervisor.sh /usr/local/sbin/stop-supervisor.sh
+COPY docker-entrypoint.sh /docker-entrypoint.sh
+
+RUN chmod +x /opt/postfix.sh \
+ /usr/local/bin/rspamd-pipe-ham \
+ /usr/local/bin/rspamd-pipe-spam \
+ /usr/local/bin/whitelist_forwardinghosts.sh \
+ /usr/local/bin/smtpd_last_login.sh \
+ /usr/local/sbin/stop-supervisor.sh
+RUN rm -rf /tmp/* /var/tmp/*
+
+EXPOSE 588
+
+ENTRYPOINT ["/docker-entrypoint.sh"]
+
+CMD exec /usr/bin/supervisord -c /etc/supervisor/supervisord.conf
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/docker-entrypoint.sh b/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/docker-entrypoint.sh
new file mode 100755
index 0000000..c97b128
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/docker-entrypoint.sh
@@ -0,0 +1,15 @@
+#!/bin/bash
+
+# Run hooks
+for file in /hooks/*; do
+ if [ -x "${file}" ]; then
+ echo "Running hook ${file}"
+ "${file}"
+ fi
+done
+
+if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
+ cp /etc/syslog-ng/syslog-ng-redis_slave.conf /etc/syslog-ng/syslog-ng.conf
+fi
+
+exec "$@"
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/postfix.sh b/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/postfix.sh
new file mode 100755
index 0000000..3b18de4
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/postfix.sh
@@ -0,0 +1,379 @@
+#!/bin/bash
+
+trap "postfix stop" EXIT
+
+[[ ! -d /opt/postfix/conf/sql/ ]] && mkdir -p /opt/postfix/conf/sql/
+
+# Wait for MySQL to warm-up
+while ! mysqladmin status --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
+ echo "Waiting for database to come up..."
+ sleep 2
+done
+
+until dig +short mailcow.email @unbound > /dev/null; do
+ echo "Waiting for DNS..."
+ sleep 1
+done
+
+cat <<EOF > /etc/aliases
+# Autogenerated by mailcow
+null: /dev/null
+watchdog: /dev/null
+ham: "|/usr/local/bin/rspamd-pipe-ham"
+spam: "|/usr/local/bin/rspamd-pipe-spam"
+EOF
+newaliases;
+
+# create sni configuration
+echo -n "" > /opt/postfix/conf/sni.map;
+for cert_dir in /etc/ssl/mail/*/ ; do
+ if [[ ! -f ${cert_dir}domains ]] || [[ ! -f ${cert_dir}cert.pem ]] || [[ ! -f ${cert_dir}key.pem ]]; then
+ continue;
+ fi
+ IFS=" " read -r -a domains <<< "$(cat "${cert_dir}domains")"
+ for domain in "${domains[@]}"; do
+ echo -n "${domain} ${cert_dir}key.pem ${cert_dir}cert.pem" >> /opt/postfix/conf/sni.map;
+ echo "" >> /opt/postfix/conf/sni.map;
+ done
+done
+postmap -F hash:/opt/postfix/conf/sni.map;
+
+cat <<EOF > /opt/postfix/conf/sql/mysql_relay_ne.cf
+# Autogenerated by mailcow
+user = ${DBUSER}
+password = ${DBPASS}
+hosts = unix:/var/run/mysqld/mysqld.sock
+dbname = ${DBNAME}
+query = SELECT IF(EXISTS(SELECT address, domain FROM alias
+ WHERE address = '%s'
+ AND domain IN (
+ SELECT domain FROM domain
+ WHERE backupmx = '1'
+ AND relay_all_recipients = '1'
+ AND relay_unknown_only = '1')
+
+ ), 'lmtp:inet:dovecot:24', NULL) AS 'transport'
+EOF
+
+cat <<EOF > /opt/postfix/conf/sql/mysql_relay_recipient_maps.cf
+# Autogenerated by mailcow
+user = ${DBUSER}
+password = ${DBPASS}
+hosts = unix:/var/run/mysqld/mysqld.sock
+dbname = ${DBNAME}
+query = SELECT DISTINCT
+ CASE WHEN '%d' IN (
+ SELECT domain FROM domain
+ WHERE relay_all_recipients=1
+ AND domain='%d'
+ AND backupmx=1
+ )
+ THEN '%s' ELSE (
+ SELECT goto FROM alias WHERE address='%s' AND active='1'
+ )
+ END AS result;
+EOF
+
+cat <<EOF > /opt/postfix/conf/sql/mysql_tls_policy_override_maps.cf
+# Autogenerated by mailcow
+user = ${DBUSER}
+password = ${DBPASS}
+hosts = unix:/var/run/mysqld/mysqld.sock
+dbname = ${DBNAME}
+query = SELECT CONCAT(policy, ' ', parameters) AS tls_policy FROM tls_policy_override WHERE active = '1' AND dest = '%s'
+EOF
+
+cat <<EOF > /opt/postfix/conf/sql/mysql_tls_enforce_in_policy.cf
+# Autogenerated by mailcow
+user = ${DBUSER}
+password = ${DBPASS}
+hosts = unix:/var/run/mysqld/mysqld.sock
+dbname = ${DBNAME}
+query = SELECT IF(EXISTS(
+ SELECT 'TLS_ACTIVE' FROM alias
+ LEFT OUTER JOIN mailbox ON mailbox.username = alias.goto
+ WHERE (address='%s'
+ OR address IN (
+ SELECT CONCAT('%u', '@', target_domain) FROM alias_domain
+ WHERE alias_domain='%d'
+ )
+ ) AND JSON_UNQUOTE(JSON_VALUE(attributes, '$.tls_enforce_in')) = '1' AND mailbox.active = '1'
+ ), 'reject_plaintext_session', NULL) AS 'tls_enforce_in';
+EOF
+
+cat <<EOF > /opt/postfix/conf/sql/mysql_sender_dependent_default_transport_maps.cf
+# Autogenerated by mailcow
+user = ${DBUSER}
+password = ${DBPASS}
+hosts = unix:/var/run/mysqld/mysqld.sock
+dbname = ${DBNAME}
+query = SELECT GROUP_CONCAT(transport SEPARATOR '') AS transport_maps
+ FROM (
+ SELECT IF(EXISTS(SELECT 'smtp_type' FROM alias
+ LEFT OUTER JOIN mailbox ON mailbox.username = alias.goto
+ WHERE (address = '%s'
+ OR address IN (
+ SELECT CONCAT('%u', '@', target_domain) FROM alias_domain
+ WHERE alias_domain = '%d'
+ )
+ )
+ AND JSON_UNQUOTE(JSON_VALUE(attributes, '$.tls_enforce_out')) = '1'
+ AND mailbox.active = '1'
+ ), 'smtp_enforced_tls:', 'smtp:') AS 'transport'
+ UNION ALL
+ SELECT hostname AS transport FROM relayhosts
+ LEFT OUTER JOIN domain ON domain.relayhost = relayhosts.id
+ WHERE relayhosts.active = '1'
+ AND domain = '%d'
+ OR domain IN (
+ SELECT target_domain FROM alias_domain
+ WHERE alias_domain = '%d'
+ )
+ )
+ AS transport_view;
+EOF
+
+cat <<EOF > /opt/postfix/conf/sql/mysql_transport_maps.cf
+# Autogenerated by mailcow
+user = ${DBUSER}
+password = ${DBPASS}
+hosts = unix:/var/run/mysqld/mysqld.sock
+dbname = ${DBNAME}
+query = SELECT CONCAT('smtp_via_transport_maps:', nexthop) AS transport FROM transports
+ WHERE active = '1'
+ AND destination = '%s';
+EOF
+
+cat <<EOF > /opt/postfix/conf/sql/mysql_virtual_resource_maps.cf
+# Autogenerated by mailcow
+user = ${DBUSER}
+password = ${DBPASS}
+hosts = unix:/var/run/mysqld/mysqld.sock
+dbname = ${DBNAME}
+query = SELECT 'null@localhost' FROM mailbox
+ WHERE kind REGEXP 'location|thing|group' AND username = '%s';
+EOF
+
+cat <<EOF > /opt/postfix/conf/sql/mysql_sasl_passwd_maps_sender_dependent.cf
+# Autogenerated by mailcow
+user = ${DBUSER}
+password = ${DBPASS}
+hosts = unix:/var/run/mysqld/mysqld.sock
+dbname = ${DBNAME}
+query = SELECT CONCAT_WS(':', username, password) AS auth_data FROM relayhosts
+ WHERE id IN (
+ SELECT relayhost FROM domain
+ WHERE CONCAT('@', domain) = '%s'
+ OR domain IN (
+ SELECT target_domain FROM alias_domain WHERE CONCAT('@', alias_domain) = '%s'
+ )
+ )
+ AND active = '1'
+ AND username != '';
+EOF
+
+cat <<EOF > /opt/postfix/conf/sql/mysql_sasl_passwd_maps_transport_maps.cf
+# Autogenerated by mailcow
+user = ${DBUSER}
+password = ${DBPASS}
+hosts = unix:/var/run/mysqld/mysqld.sock
+dbname = ${DBNAME}
+query = SELECT CONCAT_WS(':', username, password) AS auth_data FROM transports
+ WHERE nexthop = '%s'
+ AND active = '1'
+ AND username != ''
+ LIMIT 1;
+EOF
+
+cat <<EOF > /opt/postfix/conf/sql/mysql_virtual_alias_domain_maps.cf
+# Autogenerated by mailcow
+user = ${DBUSER}
+password = ${DBPASS}
+hosts = unix:/var/run/mysqld/mysqld.sock
+dbname = ${DBNAME}
+query = SELECT username FROM mailbox, alias_domain
+ WHERE alias_domain.alias_domain = '%d'
+ AND mailbox.username = CONCAT('%u', '@', alias_domain.target_domain)
+ AND (mailbox.active = '1' OR mailbox.active = '2')
+ AND alias_domain.active='1'
+EOF
+
+cat <<EOF > /opt/postfix/conf/sql/mysql_virtual_alias_maps.cf
+# Autogenerated by mailcow
+user = ${DBUSER}
+password = ${DBPASS}
+hosts = unix:/var/run/mysqld/mysqld.sock
+dbname = ${DBNAME}
+query = SELECT goto FROM alias
+ WHERE address='%s'
+ AND (active='1' OR active='2');
+EOF
+
+cat <<EOF > /opt/postfix/conf/sql/mysql_recipient_bcc_maps.cf
+# Autogenerated by mailcow
+user = ${DBUSER}
+password = ${DBPASS}
+hosts = unix:/var/run/mysqld/mysqld.sock
+dbname = ${DBNAME}
+query = SELECT bcc_dest FROM bcc_maps
+ WHERE local_dest='%s'
+ AND type='rcpt'
+ AND active='1';
+EOF
+
+cat <<EOF > /opt/postfix/conf/sql/mysql_sender_bcc_maps.cf
+# Autogenerated by mailcow
+user = ${DBUSER}
+password = ${DBPASS}
+hosts = unix:/var/run/mysqld/mysqld.sock
+dbname = ${DBNAME}
+query = SELECT bcc_dest FROM bcc_maps
+ WHERE local_dest='%s'
+ AND type='sender'
+ AND active='1';
+EOF
+
+cat <<EOF > /opt/postfix/conf/sql/mysql_recipient_canonical_maps.cf
+# Autogenerated by mailcow
+user = ${DBUSER}
+password = ${DBPASS}
+hosts = unix:/var/run/mysqld/mysqld.sock
+dbname = ${DBNAME}
+query = SELECT new_dest FROM recipient_maps
+ WHERE old_dest='%s'
+ AND active='1';
+EOF
+
+cat <<EOF > /opt/postfix/conf/sql/mysql_virtual_domains_maps.cf
+# Autogenerated by mailcow
+user = ${DBUSER}
+password = ${DBPASS}
+hosts = unix:/var/run/mysqld/mysqld.sock
+dbname = ${DBNAME}
+query = SELECT alias_domain from alias_domain WHERE alias_domain='%s' AND active='1'
+ UNION
+ SELECT domain FROM domain
+ WHERE domain='%s'
+ AND active = '1'
+ AND backupmx = '0'
+EOF
+
+cat <<EOF > /opt/postfix/conf/sql/mysql_virtual_mailbox_maps.cf
+# Autogenerated by mailcow
+user = ${DBUSER}
+password = ${DBPASS}
+hosts = unix:/var/run/mysqld/mysqld.sock
+dbname = ${DBNAME}
+query = SELECT CONCAT(JSON_UNQUOTE(JSON_VALUE(attributes, '$.mailbox_format')), mailbox_path_prefix, '%d/%u/') FROM mailbox WHERE username='%s' AND (active = '1' OR active = '2')
+EOF
+
+cat <<EOF > /opt/postfix/conf/sql/mysql_virtual_relay_domain_maps.cf
+# Autogenerated by mailcow
+user = ${DBUSER}
+password = ${DBPASS}
+hosts = unix:/var/run/mysqld/mysqld.sock
+dbname = ${DBNAME}
+query = SELECT domain FROM domain WHERE domain='%s' AND backupmx = '1' AND active = '1'
+EOF
+
+cat <<EOF > /opt/postfix/conf/sql/mysql_virtual_sender_acl.cf
+# Autogenerated by mailcow
+user = ${DBUSER}
+password = ${DBPASS}
+hosts = unix:/var/run/mysqld/mysqld.sock
+dbname = ${DBNAME}
+# First select queries domain and alias_domain to determine if domains are active.
+query = SELECT goto FROM alias
+ WHERE address='%s'
+ AND active='1'
+ AND (domain IN
+ (SELECT domain FROM domain
+ WHERE domain='%d'
+ AND active='1')
+ OR domain in (
+ SELECT alias_domain FROM alias_domain
+ WHERE alias_domain='%d'
+ AND active='1'
+ )
+ )
+ UNION
+ SELECT logged_in_as FROM sender_acl
+ WHERE send_as='@%d'
+ OR send_as='%s'
+ OR send_as='*'
+ OR send_as IN (
+ SELECT CONCAT('@',target_domain) FROM alias_domain
+ WHERE alias_domain = '%d')
+ OR send_as IN (
+ SELECT CONCAT('%u','@',target_domain) FROM alias_domain
+ WHERE alias_domain = '%d')
+ AND logged_in_as NOT IN (
+ SELECT goto FROM alias
+ WHERE address='%s')
+ UNION
+ SELECT username FROM mailbox, alias_domain
+ WHERE alias_domain.alias_domain = '%d'
+ AND mailbox.username = CONCAT('%u','@',alias_domain.target_domain)
+ AND (mailbox.active = '1' OR mailbox.active ='2')
+ AND alias_domain.active='1'
+EOF
+
+# Reject sasl usernames with smtp disabled
+cat <<EOF > /opt/postfix/conf/sql/mysql_sasl_access_maps.cf
+# Autogenerated by mailcow
+user = ${DBUSER}
+password = ${DBPASS}
+hosts = unix:/var/run/mysqld/mysqld.sock
+dbname = ${DBNAME}
+query = SELECT 'REJECT' FROM mailbox WHERE username = '%u' AND JSON_UNQUOTE(JSON_VALUE(attributes, '$.smtp_access')) = '0';
+EOF
+
+cat <<EOF > /opt/postfix/conf/sql/mysql_virtual_spamalias_maps.cf
+# Autogenerated by mailcow
+user = ${DBUSER}
+password = ${DBPASS}
+hosts = unix:/var/run/mysqld/mysqld.sock
+dbname = ${DBNAME}
+query = SELECT goto FROM spamalias
+ WHERE address='%s'
+ AND validity >= UNIX_TIMESTAMP()
+EOF
+
+sed -i '/User overrides/q' /opt/postfix/conf/main.cf
+echo >> /opt/postfix/conf/main.cf
+touch /opt/postfix/conf/extra.cf
+sed -i '/myhostname/d' /opt/postfix/conf/extra.cf
+echo -e "myhostname = ${MAILCOW_HOSTNAME}\n$(cat /opt/postfix/conf/extra.cf)" > /opt/postfix/conf/extra.cf
+
+cat /opt/postfix/conf/extra.cf >> /opt/postfix/conf/main.cf
+
+if [ ! -f /opt/postfix/conf/custom_transport.pcre ]; then
+ echo "Creating dummy custom_transport.pcre"
+ touch /opt/postfix/conf/custom_transport.pcre
+fi
+
+if [[ ! -f /opt/postfix/conf/custom_postscreen_whitelist.cidr ]]; then
+ echo "Creating dummy custom_postscreen_whitelist.cidr"
+ echo '# Autogenerated by mailcow' > /opt/postfix/conf/custom_postscreen_whitelist.cidr
+fi
+
+# Fix SMTP last login on slaves
+sed -i "s/__REDIS_SLAVEOF_IP__/${REDIS_SLAVEOF_IP}/g" /usr/local/bin/smtpd_last_login.sh
+
+# Fix Postfix permissions
+chown -R root:postfix /opt/postfix/conf/sql/ /opt/postfix/conf/custom_transport.pcre
+chmod 640 /opt/postfix/conf/sql/*.cf /opt/postfix/conf/custom_transport.pcre
+chgrp -R postdrop /var/spool/postfix/public
+chgrp -R postdrop /var/spool/postfix/maildrop
+postfix set-permissions
+
+# Check Postfix configuration
+postconf -c /opt/postfix/conf > /dev/null
+
+if [[ $? != 0 ]]; then
+ echo "Postfix configuration error, refusing to start."
+ exit 1
+else
+ postfix -c /opt/postfix/conf start
+ sleep 126144000
+fi
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/rspamd-pipe-ham b/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/rspamd-pipe-ham
new file mode 100755
index 0000000..9b26817
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/rspamd-pipe-ham
@@ -0,0 +1,9 @@
+#!/bin/bash
+FILE=/tmp/mail$$
+cat > $FILE
+trap "/bin/rm -f $FILE" 0 1 2 3 13 15
+
+cat ${FILE} | /usr/bin/curl -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd/learnham
+cat ${FILE} | /usr/bin/curl -H "Flag: 13" -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd/fuzzyadd
+
+exit 0
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/rspamd-pipe-spam b/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/rspamd-pipe-spam
new file mode 100755
index 0000000..d06aa91
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/rspamd-pipe-spam
@@ -0,0 +1,9 @@
+#!/bin/bash
+FILE=/tmp/mail$$
+cat > $FILE
+trap "/bin/rm -f $FILE" 0 1 2 3 13 15
+
+cat ${FILE} | /usr/bin/curl -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd/learnspam
+cat ${FILE} | /usr/bin/curl -H "Flag: 11" -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd/fuzzyadd
+
+exit 0
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/smtpd_last_login.sh b/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/smtpd_last_login.sh
new file mode 100755
index 0000000..9d249af
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/smtpd_last_login.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+REDIS_SLAVEOF_IP=__REDIS_SLAVEOF_IP__
+
+# Do not attempt to write to slave
+if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
+ REDIS_CMDLINE="redis-cli -h ${REDIS_SLAVEOF_IP} -p ${REDIS_SLAVEOF_PORT}"
+else
+ REDIS_CMDLINE="redis-cli -h redis -p 6379"
+fi
+
+while read QUERY; do
+ QUERY=($QUERY)
+ # If nothing matched, end here - Postfix last line will be empty
+ if [[ -z "$(echo ${QUERY[0]} | tr -d '\040\011\012\015')" ]]; then
+ echo -ne "action=dunno\n\n"
+ # We found a username, log and return
+ elif [[ "${QUERY[0]}" =~ sasl_username ]]; then
+ MUSER=$(printf "%q" ${QUERY[0]#sasl_username=})
+ ${REDIS_CMDLINE} SET "last-login/smtp/$MUSER" "$(date +%s)"
+ echo -ne "action=dunno\n\n"
+ fi
+done
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/stop-supervisor.sh b/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/stop-supervisor.sh
new file mode 100755
index 0000000..5394490
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/stop-supervisor.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+printf "READY\n";
+
+while read line; do
+ echo "Processing Event: $line" >&2;
+ kill -3 $(cat "/var/run/supervisord.pid")
+done < /dev/stdin
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/supervisord.conf b/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/supervisord.conf
new file mode 100644
index 0000000..134a6c6
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/supervisord.conf
@@ -0,0 +1,24 @@
+[supervisord]
+pidfile=/var/run/supervisord.pid
+nodaemon=true
+user=root
+
+[program:syslog-ng]
+command=/usr/sbin/syslog-ng --foreground --no-caps
+stdout_logfile=/dev/stdout
+stdout_logfile_maxbytes=0
+stderr_logfile=/dev/stderr
+stderr_logfile_maxbytes=0
+autostart=true
+
+[program:postfix]
+command=/opt/postfix.sh
+stdout_logfile=/dev/stdout
+stdout_logfile_maxbytes=0
+stderr_logfile=/dev/stderr
+stderr_logfile_maxbytes=0
+autorestart=true
+
+[eventlistener:processes]
+command=/usr/local/sbin/stop-supervisor.sh
+events=PROCESS_STATE_STOPPED, PROCESS_STATE_EXITED, PROCESS_STATE_FATAL
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/syslog-ng-redis_slave.conf b/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/syslog-ng-redis_slave.conf
new file mode 100644
index 0000000..609ee55
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/syslog-ng-redis_slave.conf
@@ -0,0 +1,53 @@
+@version: 3.19
+@include "scl.conf"
+options {
+ chain_hostnames(off);
+ flush_lines(0);
+ use_dns(no);
+ dns_cache(no);
+ use_fqdn(no);
+ owner("root"); group("adm"); perm(0640);
+ stats_freq(0);
+ bad_hostname("^gconfd$");
+};
+source s_src {
+ unix-stream("/dev/log");
+ internal();
+};
+destination d_stdout { pipe("/dev/stdout"); };
+destination d_redis_ui_log {
+ redis(
+ host("`REDIS_SLAVEOF_IP`")
+ persist-name("redis1")
+ port(`REDIS_SLAVEOF_PORT`)
+ command("LPUSH" "POSTFIX_MAILLOG" "$(format-json time=\"$S_UNIXTIME\" priority=\"$PRIORITY\" program=\"$PROGRAM\" message=\"$MESSAGE\")\n")
+ );
+};
+destination d_redis_f2b_channel {
+ redis(
+ host("`REDIS_SLAVEOF_IP`")
+ persist-name("redis2")
+ port(`REDIS_SLAVEOF_PORT`)
+ command("PUBLISH" "F2B_CHANNEL" "$MESSAGE")
+ );
+};
+filter f_mail { facility(mail); };
+# start
+# overriding warnings are still displayed when the entrypoint runs its initial check
+# warnings logged by postfix-mailcow to syslog are hidden to reduce repeating msgs
+# Some other warnings are ignored
+filter f_ignore {
+ not match("overriding earlier entry" value("MESSAGE"));
+ not match("TLS SNI from checks.mailcow.email" value("MESSAGE"));
+ not match("no SASL support" value("MESSAGE"));
+ not facility (local0, local1, local2, local3, local4, local5, local6, local7);
+};
+# end
+log {
+ source(s_src);
+ filter(f_ignore);
+ destination(d_stdout);
+ filter(f_mail);
+ destination(d_redis_ui_log);
+ destination(d_redis_f2b_channel);
+};
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/syslog-ng.conf b/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/syslog-ng.conf
new file mode 100644
index 0000000..9e14fe1
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/syslog-ng.conf
@@ -0,0 +1,53 @@
+@version: 3.19
+@include "scl.conf"
+options {
+ chain_hostnames(off);
+ flush_lines(0);
+ use_dns(no);
+ dns_cache(no);
+ use_fqdn(no);
+ owner("root"); group("adm"); perm(0640);
+ stats_freq(0);
+ bad_hostname("^gconfd$");
+};
+source s_src {
+ unix-stream("/dev/log");
+ internal();
+};
+destination d_stdout { pipe("/dev/stdout"); };
+destination d_redis_ui_log {
+ redis(
+ host("redis-mailcow")
+ persist-name("redis1")
+ port(6379)
+ command("LPUSH" "POSTFIX_MAILLOG" "$(format-json time=\"$S_UNIXTIME\" priority=\"$PRIORITY\" program=\"$PROGRAM\" message=\"$MESSAGE\")\n")
+ );
+};
+destination d_redis_f2b_channel {
+ redis(
+ host("redis-mailcow")
+ persist-name("redis2")
+ port(6379)
+ command("PUBLISH" "F2B_CHANNEL" "$MESSAGE")
+ );
+};
+filter f_mail { facility(mail); };
+# start
+# overriding warnings are still displayed when the entrypoint runs its initial check
+# warnings logged by postfix-mailcow to syslog are hidden to reduce repeating msgs
+# Some other warnings are ignored
+filter f_ignore {
+ not match("overriding earlier entry" value("MESSAGE"));
+ not match("TLS SNI from checks.mailcow.email" value("MESSAGE"));
+ not match("no SASL support" value("MESSAGE"));
+ not facility (local0, local1, local2, local3, local4, local5, local6, local7);
+};
+# end
+log {
+ source(s_src);
+ filter(f_ignore);
+ destination(d_stdout);
+ filter(f_mail);
+ destination(d_redis_ui_log);
+ destination(d_redis_f2b_channel);
+};
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/whitelist_forwardinghosts.sh b/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/whitelist_forwardinghosts.sh
new file mode 100755
index 0000000..4ad5ab3
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/whitelist_forwardinghosts.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+while read QUERY; do
+ QUERY=($QUERY)
+ if [ "${QUERY[0]}" != "get" ]; then
+ echo "500 dunno"
+ continue
+ fi
+ result=$(curl -s http://nginx:8081/forwardinghosts.php?host=${QUERY[1]})
+ logger -t whitelist_forwardinghosts -p mail.info "Look up ${QUERY[1]} on whitelist, result $result"
+ echo ${result}
+done
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/rspamd/Dockerfile b/mailcow/src/mailcow-dockerized/data/Dockerfiles/rspamd/Dockerfile
new file mode 100644
index 0000000..888bdcb
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/rspamd/Dockerfile
@@ -0,0 +1,33 @@
+FROM debian:buster-slim
+LABEL maintainer "Andre Peters <andre.peters@servercow.de>"
+
+ARG DEBIAN_FRONTEND=noninteractive
+ARG CODENAME=buster
+ENV LC_ALL C
+
+RUN apt-get update && apt-get install -y \
+ tzdata \
+ ca-certificates \
+ gnupg2 \
+ apt-transport-https \
+ dnsutils \
+ netcat \
+ && apt-key adv --fetch-keys https://rspamd.com/apt-stable/gpg.key \
+ && echo "deb [arch=amd64] https://rspamd.com/apt-stable/ $CODENAME main" > /etc/apt/sources.list.d/rspamd.list \
+ && apt-get update \
+ && apt-get --no-install-recommends -y install rspamd redis-tools \
+ && rm -rf /var/lib/apt/lists/* \
+ && apt-get autoremove --purge \
+ && apt-get clean \
+ && mkdir -p /run/rspamd \
+ && chown _rspamd:_rspamd /run/rspamd
+
+COPY settings.conf /etc/rspamd/settings.conf
+COPY metadata_exporter.lua /usr/share/rspamd/plugins/metadata_exporter.lua
+COPY docker-entrypoint.sh /docker-entrypoint.sh
+
+ENTRYPOINT ["/docker-entrypoint.sh"]
+
+STOPSIGNAL SIGTERM
+
+CMD ["/usr/bin/rspamd", "-f", "-u", "_rspamd", "-g", "_rspamd"]
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/rspamd/docker-entrypoint.sh b/mailcow/src/mailcow-dockerized/data/Dockerfiles/rspamd/docker-entrypoint.sh
new file mode 100755
index 0000000..203a196
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/rspamd/docker-entrypoint.sh
@@ -0,0 +1,123 @@
+#!/bin/bash
+
+until nc phpfpm 9001 -z; do
+ echo "Waiting for PHP on port 9001..."
+ sleep 3
+done
+
+until nc phpfpm 9002 -z; do
+ echo "Waiting for PHP on port 9002..."
+ sleep 3
+done
+
+mkdir -p /etc/rspamd/plugins.d \
+ /etc/rspamd/custom
+
+touch /etc/rspamd/rspamd.conf.local \
+ /etc/rspamd/rspamd.conf.override
+
+chmod 755 /var/lib/rspamd
+
+
+[[ ! -f /etc/rspamd/override.d/worker-controller-password.inc ]] && echo '# Autogenerated by mailcow' > /etc/rspamd/override.d/worker-controller-password.inc
+
+echo ${IPV4_NETWORK}.0/24 > /etc/rspamd/custom/mailcow_networks.map
+echo ${IPV6_NETWORK} >> /etc/rspamd/custom/mailcow_networks.map
+
+DOVECOT_V4=
+DOVECOT_V6=
+until [[ ! -z ${DOVECOT_V4} ]]; do
+ DOVECOT_V4=$(dig a dovecot +short)
+ DOVECOT_V6=$(dig aaaa dovecot +short)
+ [[ ! -z ${DOVECOT_V4} ]] && break;
+ echo "Waiting for Dovecot..."
+ sleep 3
+done
+echo ${DOVECOT_V4}/32 > /etc/rspamd/custom/dovecot_trusted.map
+if [[ ! -z ${DOVECOT_V6} ]]; then
+ echo ${DOVECOT_V6}/128 >> /etc/rspamd/custom/dovecot_trusted.map
+fi
+
+RSPAMD_V4=
+RSPAMD_V6=
+until [[ ! -z ${RSPAMD_V4} ]]; do
+ RSPAMD_V4=$(dig a rspamd +short)
+ RSPAMD_V6=$(dig aaaa rspamd +short)
+ [[ ! -z ${RSPAMD_V4} ]] && break;
+ echo "Waiting for Rspamd..."
+ sleep 3
+done
+echo ${RSPAMD_V4}/32 > /etc/rspamd/custom/rspamd_trusted.map
+if [[ ! -z ${RSPAMD_V6} ]]; then
+ echo ${RSPAMD_V6}/128 >> /etc/rspamd/custom/rspamd_trusted.map
+fi
+
+if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
+ cat <<EOF > /etc/rspamd/local.d/redis.conf
+read_servers = "redis:6379";
+write_servers = "${REDIS_SLAVEOF_IP}:${REDIS_SLAVEOF_PORT}";
+timeout = 10;
+EOF
+ until [[ $(redis-cli -h redis-mailcow PING) == "PONG" ]]; do
+ echo "Waiting for Redis @redis-mailcow..."
+ sleep 2
+ done
+ until [[ $(redis-cli -h ${REDIS_SLAVEOF_IP} -p ${REDIS_SLAVEOF_PORT} PING) == "PONG" ]]; do
+ echo "Waiting for Redis @${REDIS_SLAVEOF_IP}..."
+ sleep 2
+ done
+ redis-cli -h redis-mailcow SLAVEOF ${REDIS_SLAVEOF_IP} ${REDIS_SLAVEOF_PORT}
+else
+ cat <<EOF > /etc/rspamd/local.d/redis.conf
+servers = "redis:6379";
+timeout = 10;
+EOF
+ until [[ $(redis-cli -h redis-mailcow PING) == "PONG" ]]; do
+ echo "Waiting for Redis slave..."
+ sleep 2
+ done
+ redis-cli -h redis-mailcow SLAVEOF NO ONE
+fi
+
+chown -R _rspamd:_rspamd /var/lib/rspamd \
+ /etc/rspamd/local.d \
+ /etc/rspamd/override.d \
+ /etc/rspamd/rspamd.conf.local \
+ /etc/rspamd/rspamd.conf.override \
+ /etc/rspamd/plugins.d
+
+# Fix missing default global maps, if any
+# These exists in mailcow UI and should not be removed
+touch /etc/rspamd/custom/global_mime_from_blacklist.map \
+ /etc/rspamd/custom/global_rcpt_blacklist.map \
+ /etc/rspamd/custom/global_smtp_from_blacklist.map \
+ /etc/rspamd/custom/global_mime_from_whitelist.map \
+ /etc/rspamd/custom/global_rcpt_whitelist.map \
+ /etc/rspamd/custom/global_smtp_from_whitelist.map \
+ /etc/rspamd/custom/bad_languages.map \
+ /etc/rspamd/custom/sa-rules \
+ /etc/rspamd/custom/dovecot_trusted.map \
+ /etc/rspamd/custom/rspamd_trusted.map \
+ /etc/rspamd/custom/mailcow_networks.map \
+ /etc/rspamd/custom/ip_wl.map \
+ /etc/rspamd/custom/fishy_tlds.map \
+ /etc/rspamd/custom/bad_words.map \
+ /etc/rspamd/custom/bad_asn.map \
+ /etc/rspamd/custom/bad_words_de.map \
+ /etc/rspamd/custom/bulk_header.map
+
+# www-data (82) group needs to write to these files
+chown _rspamd:_rspamd /etc/rspamd/custom/
+chmod 0755 /etc/rspamd/custom/.
+chown -R 82:82 /etc/rspamd/custom/*
+chmod 644 -R /etc/rspamd/custom/*
+
+# Run hooks
+for file in /hooks/*; do
+ if [ -x "${file}" ]; then
+ echo "Running hook ${file}"
+ "${file}"
+ fi
+done
+
+exec "$@"
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/rspamd/metadata_exporter.lua b/mailcow/src/mailcow-dockerized/data/Dockerfiles/rspamd/metadata_exporter.lua
new file mode 100644
index 0000000..48a5ffc
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/rspamd/metadata_exporter.lua
@@ -0,0 +1,632 @@
+--[[
+Copyright (c) 2016, Andrew Lewis <nerf@judo.za.org>
+Copyright (c) 2016, Vsevolod Stakhov <vsevolod@highsecure.ru>
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+]]--
+
+if confighelp then
+ return
+end
+
+-- A plugin that pushes metadata (or whole messages) to external services
+
+local redis_params
+local lua_util = require "lua_util"
+local rspamd_http = require "rspamd_http"
+local rspamd_util = require "rspamd_util"
+local rspamd_logger = require "rspamd_logger"
+local ucl = require "ucl"
+local E = {}
+local N = 'metadata_exporter'
+
+local settings = {
+ pusher_enabled = {},
+ pusher_format = {},
+ pusher_select = {},
+ mime_type = 'text/plain',
+ defer = false,
+ mail_from = '',
+ mail_to = 'postmaster@localhost',
+ helo = 'rspamd',
+ email_template = [[From: "Rspamd" <$mail_from>
+To: $mail_to
+Subject: Spam alert
+Date: $date
+MIME-Version: 1.0
+Message-ID: <$our_message_id>
+Content-type: text/plain; charset=utf-8
+Content-Transfer-Encoding: 8bit
+
+Authenticated username: $user
+IP: $ip
+Queue ID: $qid
+SMTP FROM: $from
+SMTP RCPT: $rcpt
+MIME From: $header_from
+MIME To: $header_to
+MIME Date: $header_date
+Subject: $header_subject
+Message-ID: $message_id
+Action: $action
+Score: $score
+Symbols: $symbols]],
+}
+
+local function get_general_metadata(task, flatten, no_content)
+ local r = {}
+ local ip = task:get_from_ip()
+ if ip and ip:is_valid() then
+ r.ip = tostring(ip)
+ else
+ r.ip = 'unknown'
+ end
+ r.user = task:get_user() or 'unknown'
+ r.qid = task:get_queue_id() or 'unknown'
+ r.subject = task:get_subject() or 'unknown'
+ r.action = task:get_metric_action('default')
+
+ local s = task:get_metric_score('default')[1]
+ r.score = flatten and string.format('%.2f', s) or s
+
+ local fuzzy = task:get_mempool():get_variable("fuzzy_hashes", "fstrings")
+ if fuzzy and #fuzzy > 0 then
+ local fz = {}
+ for _,h in ipairs(fuzzy) do
+ table.insert(fz, h)
+ end
+ if not flatten then
+ r.fuzzy = fz
+ else
+ r.fuzzy = table.concat(fz, ', ')
+ end
+ else
+ r.fuzzy = 'unknown'
+ end
+
+ local rcpt = task:get_recipients('smtp')
+ if rcpt then
+ local l = {}
+ for _, a in ipairs(rcpt) do
+ table.insert(l, a['addr'])
+ end
+ if not flatten then
+ r.rcpt = l
+ else
+ r.rcpt = table.concat(l, ', ')
+ end
+ else
+ r.rcpt = 'unknown'
+ end
+ local from = task:get_from('smtp')
+ if ((from or E)[1] or E).addr then
+ r.from = from[1].addr
+ else
+ r.from = 'unknown'
+ end
+ local syminf = task:get_symbols_all()
+ if flatten then
+ local l = {}
+ for _, sym in ipairs(syminf) do
+ local txt
+ if sym.options then
+ local topt = table.concat(sym.options, ', ')
+ txt = sym.name .. '(' .. string.format('%.2f', sym.score) .. ')' .. ' [' .. topt .. ']'
+ else
+ txt = sym.name .. '(' .. string.format('%.2f', sym.score) .. ')'
+ end
+ table.insert(l, txt)
+ end
+ r.symbols = table.concat(l, '\n\t')
+ else
+ r.symbols = syminf
+ end
+ local function process_header(name)
+ local hdr = task:get_header_full(name)
+ if hdr then
+ local l = {}
+ for _, h in ipairs(hdr) do
+ table.insert(l, h.decoded)
+ end
+ if not flatten then
+ return l
+ else
+ return table.concat(l, '\n')
+ end
+ else
+ return 'unknown'
+ end
+ end
+ if not no_content then
+ r.header_from = process_header('from')
+ r.header_to = process_header('to')
+ r.header_subject = process_header('subject')
+ r.header_date = process_header('date')
+ r.message_id = task:get_message_id()
+ end
+ return r
+end
+
+local formatters = {
+ default = function(task)
+ return task:get_content(), {}
+ end,
+ email_alert = function(task, rule, extra)
+ local meta = get_general_metadata(task, true)
+ local display_emails = {}
+ local mail_targets = {}
+ meta.mail_from = rule.mail_from or settings.mail_from
+ local mail_rcpt = rule.mail_to or settings.mail_to
+ if type(mail_rcpt) ~= 'table' then
+ table.insert(display_emails, string.format('<%s>', mail_rcpt))
+ table.insert(mail_targets, mail_rcpt)
+ else
+ for _, e in ipairs(mail_rcpt) do
+ table.insert(display_emails, string.format('<%s>', e))
+ table.insert(mail_targets, mail_rcpt)
+ end
+ end
+ if rule.email_alert_sender then
+ local x = task:get_from('smtp')
+ if x and string.len(x[1].addr) > 0 then
+ table.insert(mail_targets, x)
+ table.insert(display_emails, string.format('<%s>', x[1].addr))
+ end
+ end
+ if rule.email_alert_user then
+ local x = task:get_user()
+ if x then
+ table.insert(mail_targets, x)
+ table.insert(display_emails, string.format('<%s>', x))
+ end
+ end
+ if rule.email_alert_recipients then
+ local x = task:get_recipients('smtp')
+ if x then
+ for _, e in ipairs(x) do
+ if string.len(e.addr) > 0 then
+ table.insert(mail_targets, e.addr)
+ table.insert(display_emails, string.format('<%s>', e.addr))
+ end
+ end
+ end
+ end
+ meta.mail_to = table.concat(display_emails, ', ')
+ meta.our_message_id = rspamd_util.random_hex(12) .. '@rspamd'
+ meta.date = rspamd_util.time_to_string(rspamd_util.get_time())
+ return lua_util.template(rule.email_template or settings.email_template, meta), { mail_targets = mail_targets}
+ end,
+ json = function(task)
+ return ucl.to_format(get_general_metadata(task), 'json-compact')
+ end
+}
+
+local function is_spam(action)
+ return (action == 'reject' or action == 'add header' or action == 'rewrite subject')
+end
+
+local selectors = {
+ default = function(task)
+ return true
+ end,
+ is_spam = function(task)
+ local action = task:get_metric_action('default')
+ return is_spam(action)
+ end,
+ is_spam_authed = function(task)
+ if not task:get_user() then
+ return false
+ end
+ local action = task:get_metric_action('default')
+ return is_spam(action)
+ end,
+ is_reject = function(task)
+ local action = task:get_metric_action('default')
+ return (action == 'reject')
+ end,
+ is_reject_authed = function(task)
+ if not task:get_user() then
+ return false
+ end
+ local action = task:get_metric_action('default')
+ return (action == 'reject')
+ end,
+}
+
+local function maybe_defer(task, rule)
+ if rule.defer then
+ rspamd_logger.warnx(task, 'deferring message')
+ task:set_pre_result('soft reject', 'deferred', N)
+ end
+end
+
+local pushers = {
+ redis_pubsub = function(task, formatted, rule)
+ local _,ret,upstream
+ local function redis_pub_cb(err)
+ if err then
+ rspamd_logger.errx(task, 'got error %s when publishing on server %s',
+ err, upstream:get_addr())
+ return maybe_defer(task, rule)
+ end
+ return true
+ end
+ ret,_,upstream = rspamd_redis_make_request(task,
+ redis_params, -- connect params
+ nil, -- hash key
+ true, -- is write
+ redis_pub_cb, --callback
+ 'PUBLISH', -- command
+ {rule.channel, formatted} -- arguments
+ )
+ if not ret then
+ rspamd_logger.errx(task, 'error connecting to redis')
+ maybe_defer(task, rule)
+ end
+ end,
+ http = function(task, formatted, rule)
+ local function http_callback(err, code)
+ if err then
+ rspamd_logger.errx(task, 'got error %s in http callback', err)
+ return maybe_defer(task, rule)
+ end
+ if code ~= 200 then
+ rspamd_logger.errx(task, 'got unexpected http status: %s', code)
+ return maybe_defer(task, rule)
+ end
+ return true
+ end
+ local hdrs = {}
+ if rule.meta_headers then
+ local gm = get_general_metadata(task, false, true)
+ local pfx = rule.meta_header_prefix or 'X-Rspamd-'
+ for k, v in pairs(gm) do
+ if type(v) == 'table' then
+ hdrs[pfx .. k] = ucl.to_format(v, 'json-compact')
+ else
+ hdrs[pfx .. k] = v
+ end
+ end
+ end
+ rspamd_http.request({
+ task=task,
+ url=rule.url,
+ body=formatted,
+ callback=http_callback,
+ mime_type=rule.mime_type or settings.mime_type,
+ headers=hdrs,
+ })
+ end,
+ send_mail = function(task, formatted, rule, extra)
+ local lua_smtp = require "lua_smtp"
+ local function sendmail_cb(ret, err)
+ if not ret then
+ rspamd_logger.errx(task, 'SMTP export error: %s', err)
+ maybe_defer(task, rule)
+ end
+ end
+
+ lua_smtp.sendmail({
+ task = task,
+ host = rule.smtp,
+ port = rule.smtp_port or settings.smtp_port or 25,
+ from = rule.mail_from or settings.mail_from,
+ recipients = extra.mail_targets or rule.mail_to or settings.mail_to,
+ helo = rule.helo or settings.helo,
+ timeout = rule.timeout or settings.timeout,
+ }, formatted, sendmail_cb)
+ end,
+}
+
+local opts = rspamd_config:get_all_opt(N)
+if not opts then return end
+local process_settings = {
+ select = function(val)
+ selectors.custom = assert(load(val))()
+ end,
+ format = function(val)
+ formatters.custom = assert(load(val))()
+ end,
+ push = function(val)
+ pushers.custom = assert(load(val))()
+ end,
+ custom_push = function(val)
+ if type(val) == 'table' then
+ for k, v in pairs(val) do
+ pushers[k] = assert(load(v))()
+ end
+ end
+ end,
+ custom_select = function(val)
+ if type(val) == 'table' then
+ for k, v in pairs(val) do
+ selectors[k] = assert(load(v))()
+ end
+ end
+ end,
+ custom_format = function(val)
+ if type(val) == 'table' then
+ for k, v in pairs(val) do
+ formatters[k] = assert(load(v))()
+ end
+ end
+ end,
+ pusher_enabled = function(val)
+ if type(val) == 'string' then
+ if pushers[val] then
+ settings.pusher_enabled[val] = true
+ else
+ rspamd_logger.errx(rspamd_config, 'Pusher type: %s is invalid', val)
+ end
+ elseif type(val) == 'table' then
+ for _, v in ipairs(val) do
+ if pushers[v] then
+ settings.pusher_enabled[v] = true
+ else
+ rspamd_logger.errx(rspamd_config, 'Pusher type: %s is invalid', val)
+ end
+ end
+ end
+ end,
+}
+for k, v in pairs(opts) do
+ local f = process_settings[k]
+ if f then
+ f(opts[k])
+ else
+ settings[k] = v
+ end
+end
+if type(settings.rules) ~= 'table' then
+ -- Legacy config
+ settings.rules = {}
+ if not next(settings.pusher_enabled) then
+ if pushers.custom then
+ rspamd_logger.infox(rspamd_config, 'Custom pusher implicitly enabled')
+ settings.pusher_enabled.custom = true
+ else
+ -- Check legacy options
+ if settings.url then
+ rspamd_logger.warnx(rspamd_config, 'HTTP pusher implicitly enabled')
+ settings.pusher_enabled.http = true
+ end
+ if settings.channel then
+ rspamd_logger.warnx(rspamd_config, 'Redis Pubsub pusher implicitly enabled')
+ settings.pusher_enabled.redis_pubsub = true
+ end
+ if settings.smtp and settings.mail_to then
+ rspamd_logger.warnx(rspamd_config, 'SMTP pusher implicitly enabled')
+ settings.pusher_enabled.send_mail = true
+ end
+ end
+ end
+ if not next(settings.pusher_enabled) then
+ rspamd_logger.errx(rspamd_config, 'No push backend enabled')
+ return
+ end
+ if settings.formatter then
+ settings.format = formatters[settings.formatter]
+ if not settings.format then
+ rspamd_logger.errx(rspamd_config, 'No such formatter: %s', settings.formatter)
+ return
+ end
+ end
+ if settings.selector then
+ settings.select = selectors[settings.selector]
+ if not settings.select then
+ rspamd_logger.errx(rspamd_config, 'No such selector: %s', settings.selector)
+ return
+ end
+ end
+ for k in pairs(settings.pusher_enabled) do
+ local formatter = settings.pusher_format[k]
+ local selector = settings.pusher_select[k]
+ if not formatter then
+ settings.pusher_format[k] = settings.formatter or 'default'
+ rspamd_logger.infox(rspamd_config, 'Using default formatter for %s pusher', k)
+ else
+ if not formatters[formatter] then
+ rspamd_logger.errx(rspamd_config, 'No such formatter: %s - disabling %s', formatter, k)
+ settings.pusher_enabled.k = nil
+ end
+ end
+ if not selector then
+ settings.pusher_select[k] = settings.selector or 'default'
+ rspamd_logger.infox(rspamd_config, 'Using default selector for %s pusher', k)
+ else
+ if not selectors[selector] then
+ rspamd_logger.errx(rspamd_config, 'No such selector: %s - disabling %s', selector, k)
+ settings.pusher_enabled.k = nil
+ end
+ end
+ end
+ if settings.pusher_enabled.redis_pubsub then
+ redis_params = rspamd_parse_redis_server(N)
+ if not redis_params then
+ rspamd_logger.errx(rspamd_config, 'No redis servers are specified')
+ settings.pusher_enabled.redis_pubsub = nil
+ else
+ local r = {}
+ r.backend = 'redis_pubsub'
+ r.channel = settings.channel
+ r.defer = settings.defer
+ r.selector = settings.pusher_select.redis_pubsub
+ r.formatter = settings.pusher_format.redis_pubsub
+ settings.rules[r.backend:upper()] = r
+ end
+ end
+ if settings.pusher_enabled.http then
+ if not settings.url then
+ rspamd_logger.errx(rspamd_config, 'No URL is specified')
+ settings.pusher_enabled.http = nil
+ else
+ local r = {}
+ r.backend = 'http'
+ r.url = settings.url
+ r.mime_type = settings.mime_type
+ r.defer = settings.defer
+ r.selector = settings.pusher_select.http
+ r.formatter = settings.pusher_format.http
+ settings.rules[r.backend:upper()] = r
+ end
+ end
+ if settings.pusher_enabled.send_mail then
+ if not (settings.mail_to and settings.smtp) then
+ rspamd_logger.errx(rspamd_config, 'No mail_to and/or smtp setting is specified')
+ settings.pusher_enabled.send_mail = nil
+ else
+ local r = {}
+ r.backend = 'send_mail'
+ r.mail_to = settings.mail_to
+ r.mail_from = settings.mail_from
+ r.helo = settings.hello
+ r.smtp = settings.smtp
+ r.smtp_port = settings.smtp_port
+ r.email_template = settings.email_template
+ r.defer = settings.defer
+ r.selector = settings.pusher_select.send_mail
+ r.formatter = settings.pusher_format.send_mail
+ settings.rules[r.backend:upper()] = r
+ end
+ end
+ if not next(settings.pusher_enabled) then
+ rspamd_logger.errx(rspamd_config, 'No push backend enabled')
+ return
+ end
+elseif not next(settings.rules) then
+ lua_util.debugm(N, rspamd_config, 'No rules enabled')
+ return
+end
+if not settings.rules or not next(settings.rules) then
+ rspamd_logger.errx(rspamd_config, 'No rules enabled')
+ return
+end
+local backend_required_elements = {
+ http = {
+ 'url',
+ },
+ smtp = {
+ 'mail_to',
+ 'smtp',
+ },
+ redis_pubsub = {
+ 'channel',
+ },
+}
+local check_element = {
+ selector = function(k, v)
+ if not selectors[v] then
+ rspamd_logger.errx(rspamd_config, 'Rule %s has invalid selector %s', k, v)
+ return false
+ else
+ return true
+ end
+ end,
+ formatter = function(k, v)
+ if not formatters[v] then
+ rspamd_logger.errx(rspamd_config, 'Rule %s has invalid formatter %s', k, v)
+ return false
+ else
+ return true
+ end
+ end,
+}
+local backend_check = {
+ default = function(k, rule)
+ local reqset = backend_required_elements[rule.backend]
+ if reqset then
+ for _, e in ipairs(reqset) do
+ if not rule[e] then
+ rspamd_logger.errx(rspamd_config, 'Rule %s misses required setting %s', k, e)
+ settings.rules[k] = nil
+ end
+ end
+ end
+ for sett, v in pairs(rule) do
+ local f = check_element[sett]
+ if f then
+ if not f(sett, v) then
+ settings.rules[k] = nil
+ end
+ end
+ end
+ end,
+}
+backend_check.redis_pubsub = function(k, rule)
+ if not redis_params then
+ redis_params = rspamd_parse_redis_server(N)
+ end
+ if not redis_params then
+ rspamd_logger.errx(rspamd_config, 'No redis servers are specified')
+ settings.rules[k] = nil
+ else
+ backend_check.default(k, rule)
+ end
+end
+setmetatable(backend_check, {
+ __index = function()
+ return backend_check.default
+ end,
+})
+for k, v in pairs(settings.rules) do
+ if type(v) == 'table' then
+ local backend = v.backend
+ if not backend then
+ rspamd_logger.errx(rspamd_config, 'Rule %s has no backend', k)
+ settings.rules[k] = nil
+ elseif not pushers[backend] then
+ rspamd_logger.errx(rspamd_config, 'Rule %s has invalid backend %s', k, backend)
+ settings.rules[k] = nil
+ else
+ local f = backend_check[backend]
+ f(k, v)
+ end
+ else
+ rspamd_logger.errx(rspamd_config, 'Rule %s has bad type: %s', k, type(v))
+ settings.rules[k] = nil
+ end
+end
+
+local function gen_exporter(rule)
+ return function (task)
+ if task:has_flag('skip') then return end
+ local selector = rule.selector or 'default'
+ local selected = selectors[selector](task)
+ if selected then
+ lua_util.debugm(N, task, 'Message selected for processing')
+ local formatter = rule.formatter or 'default'
+ local formatted, extra = formatters[formatter](task, rule)
+ if formatted then
+ pushers[rule.backend](task, formatted, rule, extra)
+ else
+ lua_util.debugm(N, task, 'Formatter [%s] returned non-truthy value [%s]', formatter, formatted)
+ end
+ else
+ lua_util.debugm(N, task, 'Selector [%s] returned non-truthy value [%s]', selector, selected)
+ end
+ end
+end
+
+if not next(settings.rules) then
+ rspamd_logger.errx(rspamd_config, 'No rules enabled')
+ lua_util.disable_module(N, "config")
+end
+for k, r in pairs(settings.rules) do
+ rspamd_config:register_symbol({
+ name = 'EXPORT_METADATA_' .. k,
+ type = 'idempotent',
+ callback = gen_exporter(r),
+ priority = 10,
+ flags = 'empty,explicit_disable,ignore_passthrough',
+ })
+end
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/rspamd/sa_trivial_convert.lua b/mailcow/src/mailcow-dockerized/data/Dockerfiles/rspamd/sa_trivial_convert.lua
new file mode 100644
index 0000000..4725dab
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/rspamd/sa_trivial_convert.lua
@@ -0,0 +1,443 @@
+local fun = require "fun"
+local rspamd_logger = require "rspamd_logger"
+local util = require "rspamd_util"
+local lua_util = require "lua_util"
+local rspamd_regexp = require "rspamd_regexp"
+local ucl = require "ucl"
+
+local complicated = {}
+local rules = {}
+local scores = {}
+
+local function words_to_re(words, start)
+ return table.concat(fun.totable(fun.drop_n(start, words)), " ");
+end
+
+local function split(str, delim)
+ local result = {}
+
+ if not delim then
+ delim = '[^%s]+'
+ end
+
+ for token in string.gmatch(str, delim) do
+ table.insert(result, token)
+ end
+
+ return result
+end
+
+local function handle_header_def(hline, cur_rule)
+ --Now check for modifiers inside header's name
+ local hdrs = split(hline, '[^|]+')
+ local hdr_params = {}
+ local cur_param = {}
+ -- Check if an re is an ordinary re
+ local ordinary = true
+
+ for _,h in ipairs(hdrs) do
+ if h == 'ALL' or h == 'ALL:raw' then
+ ordinary = false
+ else
+ local args = split(h, '[^:]+')
+ cur_param['strong'] = false
+ cur_param['raw'] = false
+ cur_param['header'] = args[1]
+
+ if args[2] then
+ -- We have some ops that are required for the header, so it's not ordinary
+ ordinary = false
+ end
+
+ fun.each(function(func)
+ if func == 'addr' then
+ cur_param['function'] = function(str)
+ local addr_parsed = util.parse_addr(str)
+ local ret = {}
+ if addr_parsed then
+ for _,elt in ipairs(addr_parsed) do
+ if elt['addr'] then
+ table.insert(ret, elt['addr'])
+ end
+ end
+ end
+
+ return ret
+ end
+ elseif func == 'name' then
+ cur_param['function'] = function(str)
+ local addr_parsed = util.parse_addr(str)
+ local ret = {}
+ if addr_parsed then
+ for _,elt in ipairs(addr_parsed) do
+ if elt['name'] then
+ table.insert(ret, elt['name'])
+ end
+ end
+ end
+
+ return ret
+ end
+ elseif func == 'raw' then
+ cur_param['raw'] = true
+ elseif func == 'case' then
+ cur_param['strong'] = true
+ else
+ rspamd_logger.warnx(rspamd_config, 'Function %1 is not supported in %2',
+ func, cur_rule['symbol'])
+ end
+ end, fun.tail(args))
+
+ -- Some header rules require splitting to check of multiple headers
+ if cur_param['header'] == 'MESSAGEID' then
+ -- Special case for spamassassin
+ ordinary = false
+ elseif cur_param['header'] == 'ToCc' then
+ ordinary = false
+ else
+ table.insert(hdr_params, cur_param)
+ end
+ end
+
+ cur_rule['ordinary'] = ordinary and (not (#hdr_params > 1))
+ cur_rule['header'] = hdr_params
+ end
+end
+
+local function process_sa_conf(f)
+ local cur_rule = {}
+ local valid_rule = false
+
+ local function insert_cur_rule()
+ if not rules[cur_rule.type] then
+ rules[cur_rule.type] = {}
+ end
+
+ local target = rules[cur_rule.type]
+
+ if cur_rule.type == 'header' then
+ if not cur_rule.header[1].header then
+ rspamd_logger.errx(rspamd_config, 'bad rule definition: %1', cur_rule)
+ return
+ end
+ if not target[cur_rule.header[1].header] then
+ target[cur_rule.header[1].header] = {}
+ end
+ target = target[cur_rule.header[1].header]
+ end
+
+ if not cur_rule['symbol'] then
+ rspamd_logger.errx(rspamd_config, 'bad rule definition: %1', cur_rule)
+ return
+ end
+ target[cur_rule['symbol']] = cur_rule
+ cur_rule = {}
+ valid_rule = false
+ end
+
+ local function parse_score(words)
+ if #words == 3 then
+ -- score rule <x>
+ return tonumber(words[3])
+ elseif #words == 6 then
+ -- score rule <x1> <x2> <x3> <x4>
+ -- we assume here that bayes and network are enabled and select <x4>
+ return tonumber(words[6])
+ else
+ rspamd_logger.errx(rspamd_config, 'invalid score for %1', words[2])
+ end
+
+ return 0
+ end
+
+ local skip_to_endif = false
+ local if_nested = 0
+ for l in f:lines() do
+ (function ()
+ l = lua_util.rspamd_str_trim(l)
+ -- Replace bla=~/re/ with bla =~ /re/ (#2372)
+ l = l:gsub('([^%s])%s*([=!]~)%s*([^%s])', '%1 %2 %3')
+
+ if string.len(l) == 0 or string.sub(l, 1, 1) == '#' then
+ return
+ end
+
+ -- Unbalanced if/endif
+ if if_nested < 0 then if_nested = 0 end
+ if skip_to_endif then
+ if string.match(l, '^endif') then
+ if_nested = if_nested - 1
+
+ if if_nested == 0 then
+ skip_to_endif = false
+ end
+ elseif string.match(l, '^if') then
+ if_nested = if_nested + 1
+ elseif string.match(l, '^else') then
+ -- Else counterpart for if
+ skip_to_endif = false
+ end
+ table.insert(complicated, l)
+ return
+ else
+ if string.match(l, '^ifplugin') then
+ skip_to_endif = true
+ if_nested = if_nested + 1
+ table.insert(complicated, l)
+ elseif string.match(l, '^if !plugin%(') then
+ skip_to_endif = true
+ if_nested = if_nested + 1
+ table.insert(complicated, l)
+ elseif string.match(l, '^if') then
+ -- Unknown if
+ skip_to_endif = true
+ if_nested = if_nested + 1
+ table.insert(complicated, l)
+ elseif string.match(l, '^else') then
+ -- Else counterpart for if
+ skip_to_endif = true
+ table.insert(complicated, l)
+ elseif string.match(l, '^endif') then
+ if_nested = if_nested - 1
+ table.insert(complicated, l)
+ end
+ end
+
+ -- Skip comments
+ local words = fun.totable(fun.take_while(
+ function(w) return string.sub(w, 1, 1) ~= '#' end,
+ fun.filter(function(w)
+ return w ~= "" end,
+ fun.iter(split(l)))))
+
+ if words[1] == "header" then
+ -- header SYMBOL Header ~= /regexp/
+ if valid_rule then
+ insert_cur_rule()
+ end
+ if words[4] and (words[4] == '=~' or words[4] == '!~') then
+ cur_rule['type'] = 'header'
+ cur_rule['symbol'] = words[2]
+
+ if words[4] == '!~' then
+ table.insert(complicated, l)
+ return
+ end
+
+ cur_rule['re_expr'] = words_to_re(words, 4)
+ local unset_comp = string.find(cur_rule['re_expr'], '%s+%[if%-unset:')
+ if unset_comp then
+ table.insert(complicated, l)
+ return
+ end
+
+ cur_rule['re'] = rspamd_regexp.create(cur_rule['re_expr'])
+
+ if not cur_rule['re'] then
+ rspamd_logger.warnx(rspamd_config, "Cannot parse regexp '%1' for %2",
+ cur_rule['re_expr'], cur_rule['symbol'])
+ table.insert(complicated, l)
+ return
+ else
+ handle_header_def(words[3], cur_rule)
+ if not cur_rule['ordinary'] then
+ table.insert(complicated, l)
+ return
+ end
+ end
+
+ valid_rule = true
+ else
+ table.insert(complicated, l)
+ return
+ end
+ elseif words[1] == "body" then
+ -- body SYMBOL /regexp/
+ if valid_rule then
+ insert_cur_rule()
+ end
+
+ cur_rule['symbol'] = words[2]
+ if words[3] and (string.sub(words[3], 1, 1) == '/'
+ or string.sub(words[3], 1, 1) == 'm') then
+ cur_rule['type'] = 'sabody'
+ cur_rule['re_expr'] = words_to_re(words, 2)
+ cur_rule['re'] = rspamd_regexp.create(cur_rule['re_expr'])
+ if cur_rule['re'] then
+
+ valid_rule = true
+ end
+ else
+ -- might be function
+ table.insert(complicated, l)
+ return
+ end
+ elseif words[1] == "rawbody" then
+ -- body SYMBOL /regexp/
+ if valid_rule then
+ insert_cur_rule()
+ end
+
+ cur_rule['symbol'] = words[2]
+ if words[3] and (string.sub(words[3], 1, 1) == '/'
+ or string.sub(words[3], 1, 1) == 'm') then
+ cur_rule['type'] = 'sarawbody'
+ cur_rule['re_expr'] = words_to_re(words, 2)
+ cur_rule['re'] = rspamd_regexp.create(cur_rule['re_expr'])
+ if cur_rule['re'] then
+ valid_rule = true
+ end
+ else
+ table.insert(complicated, l)
+ return
+ end
+ elseif words[1] == "full" then
+ -- body SYMBOL /regexp/
+ if valid_rule then
+ insert_cur_rule()
+ end
+
+ cur_rule['symbol'] = words[2]
+
+ if words[3] and (string.sub(words[3], 1, 1) == '/'
+ or string.sub(words[3], 1, 1) == 'm') then
+ cur_rule['type'] = 'message'
+ cur_rule['re_expr'] = words_to_re(words, 2)
+ cur_rule['re'] = rspamd_regexp.create(cur_rule['re_expr'])
+ cur_rule['raw'] = true
+ if cur_rule['re'] then
+ valid_rule = true
+ end
+ else
+ table.insert(complicated, l)
+ return
+ end
+ elseif words[1] == "uri" then
+ -- uri SYMBOL /regexp/
+ if valid_rule then
+ insert_cur_rule()
+ end
+ cur_rule['type'] = 'uri'
+ cur_rule['symbol'] = words[2]
+ cur_rule['re_expr'] = words_to_re(words, 2)
+ cur_rule['re'] = rspamd_regexp.create(cur_rule['re_expr'])
+ if cur_rule['re'] and cur_rule['symbol'] then
+ valid_rule = true
+ else
+ table.insert(complicated, l)
+ return
+ end
+ elseif words[1] == "meta" then
+ -- meta SYMBOL expression
+ if valid_rule then
+ insert_cur_rule()
+ end
+ table.insert(complicated, l)
+ return
+ elseif words[1] == "describe" and valid_rule then
+ cur_rule['description'] = words_to_re(words, 2)
+ elseif words[1] == "score" then
+ scores[words[2]] = parse_score(words)
+ else
+ table.insert(complicated, l)
+ return
+ end
+ end)()
+ end
+ if valid_rule then
+ insert_cur_rule()
+ end
+end
+
+for _,matched in ipairs(arg) do
+ local f = io.open(matched, "r")
+ if f then
+ rspamd_logger.messagex(rspamd_config, 'loading SA rules from %s', matched)
+ process_sa_conf(f)
+ else
+ rspamd_logger.errx(rspamd_config, "cannot open %1", matched)
+ end
+end
+
+local multimap_conf = {}
+
+local function handle_rule(what, syms, hdr)
+ local mtype
+ local filter
+ local fname
+ local header
+ local sym = what:upper()
+ if what == 'sabody' then
+ mtype = 'content'
+ fname = 'body_re.map'
+ filter = 'oneline'
+ elseif what == 'sarawbody' then
+ fname = 'raw_body_re.map'
+ mtype = 'content'
+ filter = 'rawtext'
+ elseif what == 'full' then
+ fname = 'full_re.map'
+ mtype = 'content'
+ filter = 'full'
+ elseif what == 'uri' then
+ fname = 'uri_re.map'
+ mtype = 'url'
+ filter = 'full'
+ elseif what == 'header' then
+ fname = ('hdr_' .. hdr .. '_re.map'):lower()
+ mtype = 'header'
+ header = hdr
+ sym = sym .. '_' .. hdr:upper()
+ else
+ rspamd_logger.errx('unknown type: %s', what)
+ return
+ end
+ local conf = {
+ type = mtype,
+ filter = filter,
+ symbol = 'SA_MAP_AUTO_' .. sym,
+ regexp = true,
+ map = fname,
+ header = header,
+ symbols = {}
+ }
+ local re_file = io.open(fname, 'w')
+
+ for k,r in pairs(syms) do
+ local score = 0.0
+ if scores[k] then
+ score = scores[k]
+ end
+ re_file:write(string.format('/%s/ %s:%f\n', tostring(r.re), k, score))
+ table.insert(conf.symbols, k)
+ end
+
+ re_file:close()
+
+ multimap_conf[sym:lower()] = conf
+ rspamd_logger.messagex('stored %s regexp in %s', sym:lower(), fname)
+end
+
+for k,v in pairs(rules) do
+ if k == 'header' then
+ for h,r in pairs(v) do
+ handle_rule(k, r, h)
+ end
+ else
+ handle_rule(k, v)
+ end
+end
+
+local out = ucl.to_format(multimap_conf, 'ucl')
+local mmap_conf = io.open('auto_multimap.conf', 'w')
+mmap_conf:write(out)
+mmap_conf:close()
+rspamd_logger.messagex('stored multimap conf in %s', 'auto_multimap.conf')
+
+local sa_remain = io.open('auto_sa.conf', 'w')
+fun.each(function(l)
+ sa_remain:write(l)
+ sa_remain:write('\n')
+end, fun.filter(function(l) return not string.match(l, '^%s+$') end, complicated))
+sa_remain:close()
+rspamd_logger.messagex('stored sa remains conf in %s', 'auto_sa.conf')
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/rspamd/settings.conf b/mailcow/src/mailcow-dockerized/data/Dockerfiles/rspamd/settings.conf
new file mode 100644
index 0000000..4449f09
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/rspamd/settings.conf
@@ -0,0 +1 @@
+settings = "http://nginx:8081/settings.php";
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/sogo/Dockerfile b/mailcow/src/mailcow-dockerized/data/Dockerfiles/sogo/Dockerfile
new file mode 100644
index 0000000..1e49965
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/sogo/Dockerfile
@@ -0,0 +1,56 @@
+FROM debian:buster-slim
+LABEL maintainer "Andre Peters <andre.peters@servercow.de>"
+
+ARG DEBIAN_FRONTEND=noninteractive
+ARG SOGO_DEBIAN_REPOSITORY=http://packages.inverse.ca/SOGo/nightly/5/debian/
+ENV LC_ALL C
+ENV GOSU_VERSION 1.11
+
+# Prerequisites
+RUN echo "Building from repository $SOGO_DEBIAN_REPOSITORY" \
+ && apt-get update && apt-get install -y --no-install-recommends \
+ apt-transport-https \
+ ca-certificates \
+ cron \
+ gettext \
+ gnupg \
+ mariadb-client \
+ rsync \
+ supervisor \
+ syslog-ng \
+ syslog-ng-core \
+ syslog-ng-mod-redis \
+ dirmngr \
+ netcat \
+ psmisc \
+ wget \
+ patch \
+ && dpkgArch="$(dpkg --print-architecture | awk -F- '{ print $NF }')" \
+ && wget -O /usr/local/bin/gosu "https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$dpkgArch" \
+ && chmod +x /usr/local/bin/gosu \
+ && gosu nobody true \
+ && mkdir /usr/share/doc/sogo \
+ && touch /usr/share/doc/sogo/empty.sh \
+ && apt-key adv --keyserver keyserver.ubuntu.com --recv-key 0x810273C4 \
+ && echo "deb ${SOGO_DEBIAN_REPOSITORY} buster buster" > /etc/apt/sources.list.d/sogo.list \
+ && apt-get update && apt-get install -y --no-install-recommends \
+ sogo \
+ sogo-activesync \
+ && apt-get autoclean \
+ && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/sogo.list \
+ && touch /etc/default/locale
+
+COPY ./bootstrap-sogo.sh /bootstrap-sogo.sh
+COPY syslog-ng.conf /etc/syslog-ng/syslog-ng.conf
+COPY syslog-ng-redis_slave.conf /etc/syslog-ng/syslog-ng-redis_slave.conf
+COPY supervisord.conf /etc/supervisor/supervisord.conf
+COPY acl.diff /acl.diff
+COPY stop-supervisor.sh /usr/local/sbin/stop-supervisor.sh
+COPY docker-entrypoint.sh /
+
+RUN chmod +x /bootstrap-sogo.sh \
+ /usr/local/sbin/stop-supervisor.sh
+
+ENTRYPOINT ["/docker-entrypoint.sh"]
+
+CMD exec /usr/bin/supervisord -c /etc/supervisor/supervisord.conf
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/sogo/acl.diff b/mailcow/src/mailcow-dockerized/data/Dockerfiles/sogo/acl.diff
new file mode 100644
index 0000000..5137003
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/sogo/acl.diff
@@ -0,0 +1,11 @@
+--- /usr/lib/GNUstep/SOGo/Templates/UIxAclEditor.wox 2018-08-17 18:29:57.987504204 +0200
++++ /usr/lib/GNUstep/SOGo/Templates/UIxAclEditor.wox 2018-08-17 18:29:35.918291298 +0200
+@@ -46,7 +46,7 @@
+ </md-item-template>
+ </md-autocomplete>
+ </div>
+- <md-card ng-repeat="user in acl.users | orderBy:['userClass', 'cn']"
++ <md-card ng-repeat="user in acl.users | filter:{ userClass: 'normal' } | orderBy:['cn']"
+ class="sg-collapsed"
+ ng-class="{ 'sg-expanded': user.uid == acl.selectedUid }">
+ <a class="md-flex md-button" ng-click="acl.selectUser(user, $event)">
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/sogo/bootstrap-sogo.sh b/mailcow/src/mailcow-dockerized/data/Dockerfiles/sogo/bootstrap-sogo.sh
new file mode 100755
index 0000000..fef7958
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/sogo/bootstrap-sogo.sh
@@ -0,0 +1,262 @@
+#!/bin/bash
+
+# Wait for MySQL to warm-up
+while ! mysqladmin status --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
+ echo "Waiting for database to come up..."
+ sleep 2
+done
+
+# Wait until port becomes free and send sig
+until ! nc -z sogo-mailcow 20000;
+do
+ killall -TERM sogod
+ sleep 3
+done
+
+# Wait for updated schema
+DBV_NOW=$(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT version FROM versions WHERE application = 'db_schema';" -BN)
+DBV_NEW=$(grep -oE '\$db_version = .*;' init_db.inc.php | sed 's/$db_version = //g;s/;//g' | cut -d \" -f2)
+while [[ "${DBV_NOW}" != "${DBV_NEW}" ]]; do
+ echo "Waiting for schema update..."
+ DBV_NOW=$(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT version FROM versions WHERE application = 'db_schema';" -BN)
+ DBV_NEW=$(grep -oE '\$db_version = .*;' init_db.inc.php | sed 's/$db_version = //g;s/;//g' | cut -d \" -f2)
+ sleep 5
+done
+echo "DB schema is ${DBV_NOW}"
+
+# Recreate view
+if [[ "${MASTER}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
+ echo "We are master, preparing sogo_view..."
+ mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "DROP VIEW IF EXISTS sogo_view"
+ while [[ ${VIEW_OK} != 'OK' ]]; do
+ mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} << EOF
+CREATE VIEW sogo_view (c_uid, domain, c_name, c_password, c_cn, mail, aliases, ad_aliases, ext_acl, kind, multiple_bookings) AS
+SELECT
+ mailbox.username,
+ mailbox.domain,
+ mailbox.username,
+ IF(JSON_UNQUOTE(JSON_VALUE(attributes, '$.force_pw_update')) = '0', IF(JSON_UNQUOTE(JSON_VALUE(attributes, '$.sogo_access')) = 1, password, '{SSHA256}A123A123A321A321A321B321B321B123B123B321B432F123E321123123321321'), '{SSHA256}A123A123A321A321A321B321B321B123B123B321B432F123E321123123321321'),
+ mailbox.name,
+ mailbox.username,
+ IFNULL(GROUP_CONCAT(ga.aliases ORDER BY ga.aliases SEPARATOR ' '), ''),
+ IFNULL(gda.ad_alias, ''),
+ IFNULL(external_acl.send_as_acl, ''),
+ mailbox.kind,
+ mailbox.multiple_bookings
+FROM
+ mailbox
+ LEFT OUTER JOIN
+ grouped_mail_aliases ga
+ ON ga.username REGEXP CONCAT('(^|,)', mailbox.username, '($|,)')
+ LEFT OUTER JOIN
+ grouped_domain_alias_address gda
+ ON gda.username = mailbox.username
+ LEFT OUTER JOIN
+ grouped_sender_acl_external external_acl
+ ON external_acl.username = mailbox.username
+WHERE
+ mailbox.active = '1'
+GROUP BY
+ mailbox.username;
+EOF
+ if [[ ! -z $(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -B -e "SELECT 'OK' FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 'sogo_view'") ]]; then
+ VIEW_OK=OK
+ else
+ echo "Will retry to setup SOGo view in 3s..."
+ sleep 3
+ fi
+ done
+else
+ while [[ ${VIEW_OK} != 'OK' ]]; do
+ if [[ ! -z $(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -B -e "SELECT 'OK' FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 'sogo_view'") ]]; then
+ VIEW_OK=OK
+ else
+ echo "Waiting for SOGo view to be created by master..."
+ sleep 3
+ fi
+ done
+fi
+
+# Wait for static view table if missing after update and update content
+if [[ "${MASTER}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
+ echo "We are master, preparing _sogo_static_view..."
+ while [[ ${STATIC_VIEW_OK} != 'OK' ]]; do
+ if [[ ! -z $(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -B -e "SELECT 'OK' FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = '_sogo_static_view'") ]]; then
+ STATIC_VIEW_OK=OK
+ echo "Updating _sogo_static_view content..."
+ # If changed, also update init_db.inc.php
+ mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -B -e "REPLACE INTO _sogo_static_view (c_uid, domain, c_name, c_password, c_cn, mail, aliases, ad_aliases, ext_acl, kind, multiple_bookings) SELECT c_uid, domain, c_name, c_password, c_cn, mail, aliases, ad_aliases, ext_acl, kind, multiple_bookings from sogo_view;"
+ mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -B -e "DELETE FROM _sogo_static_view WHERE c_uid NOT IN (SELECT username FROM mailbox WHERE active = '1')"
+ else
+ echo "Waiting for database initialization..."
+ sleep 3
+ fi
+ done
+else
+ while [[ ${STATIC_VIEW_OK} != 'OK' ]]; do
+ if [[ ! -z $(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -B -e "SELECT 'OK' FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = '_sogo_static_view'") ]]; then
+ STATIC_VIEW_OK=OK
+ else
+ echo "Waiting for database initialization by master..."
+ sleep 3
+ fi
+ done
+fi
+
+
+# Recreate password update trigger
+if [[ "${MASTER}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
+ echo "We are master, preparing update trigger..."
+ mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "DROP TRIGGER IF EXISTS sogo_update_password"
+ while [[ ${TRIGGER_OK} != 'OK' ]]; do
+ mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} << EOF
+DELIMITER -
+CREATE TRIGGER sogo_update_password AFTER UPDATE ON _sogo_static_view
+FOR EACH ROW
+BEGIN
+UPDATE mailbox SET password = NEW.c_password WHERE NEW.c_uid = username;
+END;
+-
+DELIMITER ;
+EOF
+ if [[ ! -z $(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -B -e "SELECT 'OK' FROM INFORMATION_SCHEMA.TRIGGERS WHERE TRIGGER_NAME = 'sogo_update_password'") ]]; then
+ TRIGGER_OK=OK
+ else
+ echo "Will retry to setup SOGo password update trigger in 3s"
+ sleep 3
+ fi
+ done
+fi
+
+if [[ "${ALLOW_ADMIN_EMAIL_LOGIN}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
+ TRUST_PROXY="YES"
+else
+ TRUST_PROXY="NO"
+fi
+# cat /dev/urandom seems to hang here occasionally and is not recommended anyway, better use openssl
+RAND_PASS=$(openssl rand -base64 16 | tr -dc _A-Z-a-z-0-9)
+
+# Generate plist header with timezone data
+mkdir -p /var/lib/sogo/GNUstep/Defaults/
+cat <<EOF > /var/lib/sogo/GNUstep/Defaults/sogod.plist
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//GNUstep//DTD plist 0.9//EN" "http://www.gnustep.org/plist-0_9.xml">
+<plist version="0.9">
+<dict>
+ <key>OCSAclURL</key>
+ <string>mysql://${DBUSER}:${DBPASS}@%2Fvar%2Frun%2Fmysqld%2Fmysqld.sock/${DBNAME}/sogo_acl</string>
+ <key>SOGoIMAPServer</key>
+ <string>imap://${IPV4_NETWORK}.250:143/?TLS=YES&tlsVerifyMode=none</string>
+ <key>SOGoTrustProxyAuthentication</key>
+ <string>${TRUST_PROXY}</string>
+ <key>SOGoEncryptionKey</key>
+ <string>${RAND_PASS}</string>
+ <key>OCSCacheFolderURL</key>
+ <string>mysql://${DBUSER}:${DBPASS}@%2Fvar%2Frun%2Fmysqld%2Fmysqld.sock/${DBNAME}/sogo_cache_folder</string>
+ <key>OCSEMailAlarmsFolderURL</key>
+ <string>mysql://${DBUSER}:${DBPASS}@%2Fvar%2Frun%2Fmysqld%2Fmysqld.sock/${DBNAME}/sogo_alarms_folder</string>
+ <key>OCSFolderInfoURL</key>
+ <string>mysql://${DBUSER}:${DBPASS}@%2Fvar%2Frun%2Fmysqld%2Fmysqld.sock/${DBNAME}/sogo_folder_info</string>
+ <key>OCSSessionsFolderURL</key>
+ <string>mysql://${DBUSER}:${DBPASS}@%2Fvar%2Frun%2Fmysqld%2Fmysqld.sock/${DBNAME}/sogo_sessions_folder</string>
+ <key>OCSStoreURL</key>
+ <string>mysql://${DBUSER}:${DBPASS}@%2Fvar%2Frun%2Fmysqld%2Fmysqld.sock/${DBNAME}/sogo_store</string>
+ <key>SOGoProfileURL</key>
+ <string>mysql://${DBUSER}:${DBPASS}@%2Fvar%2Frun%2Fmysqld%2Fmysqld.sock/${DBNAME}/sogo_user_profile</string>
+ <key>SOGoTimeZone</key>
+ <string>${TZ}</string>
+ <key>domains</key>
+ <dict>
+EOF
+
+# Generate multi-domain setup
+while read -r line gal
+ do
+ echo " <key>${line}</key>
+ <dict>
+ <key>SOGoMailDomain</key>
+ <string>${line}</string>
+ <key>SOGoUserSources</key>
+ <array>
+ <dict>
+ <key>MailFieldNames</key>
+ <array>
+ <string>aliases</string>
+ <string>ad_aliases</string>
+ <string>ext_acl</string>
+ </array>
+ <key>KindFieldName</key>
+ <string>kind</string>
+ <key>DomainFieldName</key>
+ <string>domain</string>
+ <key>MultipleBookingsFieldName</key>
+ <string>multiple_bookings</string>
+ <key>listRequiresDot</key>
+ <string>NO</string>
+ <key>canAuthenticate</key>
+ <string>YES</string>
+ <key>displayName</key>
+ <string>GAL ${line}</string>
+ <key>id</key>
+ <string>${line}</string>
+ <key>isAddressBook</key>
+ <string>${gal}</string>
+ <key>type</key>
+ <string>sql</string>
+ <key>userPasswordAlgorithm</key>
+ <string>${MAILCOW_PASS_SCHEME}</string>
+ <key>prependPasswordScheme</key>
+ <string>YES</string>
+ <key>viewURL</key>
+ <string>mysql://${DBUSER}:${DBPASS}@%2Fvar%2Frun%2Fmysqld%2Fmysqld.sock/${DBNAME}/_sogo_static_view</string>
+ </dict>" >> /var/lib/sogo/GNUstep/Defaults/sogod.plist
+ # Generate alternative LDAP authentication dict, when SQL authentication fails
+ # This will nevertheless read attributes from LDAP
+ line=${line} envsubst < /etc/sogo/plist_ldap >> /var/lib/sogo/GNUstep/Defaults/sogod.plist
+ echo " </array>
+ </dict>" >> /var/lib/sogo/GNUstep/Defaults/sogod.plist
+done < <(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT domain, CASE gal WHEN '1' THEN 'YES' ELSE 'NO' END AS gal FROM domain;" -B -N)
+
+# Generate footer
+echo ' </dict>
+</dict>
+</plist>' >> /var/lib/sogo/GNUstep/Defaults/sogod.plist
+
+# Fix permissions
+chown sogo:sogo -R /var/lib/sogo/
+chmod 600 /var/lib/sogo/GNUstep/Defaults/sogod.plist
+
+# Patch ACLs
+#if [[ ${ACL_ANYONE} == 'allow' ]]; then
+# #enable any or authenticated targets for ACL
+# if patch -R -sfN --dry-run /usr/lib/GNUstep/SOGo/Templates/UIxAclEditor.wox < /acl.diff > /dev/null; then
+# patch -R /usr/lib/GNUstep/SOGo/Templates/UIxAclEditor.wox < /acl.diff;
+# fi
+#else
+# #disable any or authenticated targets for ACL
+# if patch -sfN --dry-run /usr/lib/GNUstep/SOGo/Templates/UIxAclEditor.wox < /acl.diff > /dev/null; then
+# patch /usr/lib/GNUstep/SOGo/Templates/UIxAclEditor.wox < /acl.diff;
+# fi
+#fi
+
+# Copy logo, if any
+[[ -f /etc/sogo/sogo-full.svg ]] && cp /etc/sogo/sogo-full.svg /usr/lib/GNUstep/SOGo/WebServerResources/img/sogo-full.svg
+
+# Rsync web content
+echo "Syncing web content with named volume"
+rsync -a /usr/lib/GNUstep/SOGo/. /sogo_web/
+
+# Chown backup path
+chown -R sogo:sogo /sogo_backup
+
+# Creating cronjobs
+if [[ "${MASTER}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
+ echo "* * * * * sogo /usr/sbin/sogo-ealarms-notify -p /etc/sogo/sieve.creds 2>/dev/null" > /etc/cron.d/sogo
+ echo "* * * * * sogo /usr/sbin/sogo-tool expire-sessions ${SOGO_EXPIRE_SESSION}" >> /etc/cron.d/sogo
+ echo "0 0 * * * sogo /usr/sbin/sogo-tool update-autoreply -p /etc/sogo/sieve.creds" >> /etc/cron.d/sogo
+ echo "0 2 * * * sogo /usr/sbin/sogo-tool backup /sogo_backup ALL" >> /etc/cron.d/sogo
+else
+ rm /etc/cron.d/sogo
+fi
+
+exec gosu sogo /usr/sbin/sogod
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/sogo/docker-entrypoint.sh b/mailcow/src/mailcow-dockerized/data/Dockerfiles/sogo/docker-entrypoint.sh
new file mode 100755
index 0000000..ce28c34
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/sogo/docker-entrypoint.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+if [[ "${SKIP_SOGO}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
+ echo "SKIP_SOGO=y, skipping SOGo..."
+ sleep 365d
+ exit 0
+fi
+
+if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
+ cp /etc/syslog-ng/syslog-ng-redis_slave.conf /etc/syslog-ng/syslog-ng.conf
+fi
+
+exec "$@"
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/sogo/stop-supervisor.sh b/mailcow/src/mailcow-dockerized/data/Dockerfiles/sogo/stop-supervisor.sh
new file mode 100755
index 0000000..5394490
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/sogo/stop-supervisor.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+printf "READY\n";
+
+while read line; do
+ echo "Processing Event: $line" >&2;
+ kill -3 $(cat "/var/run/supervisord.pid")
+done < /dev/stdin
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/sogo/supervisord.conf b/mailcow/src/mailcow-dockerized/data/Dockerfiles/sogo/supervisord.conf
new file mode 100644
index 0000000..551a8e1
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/sogo/supervisord.conf
@@ -0,0 +1,32 @@
+[supervisord]
+nodaemon=true
+user=root
+
+[program:syslog-ng]
+command=/usr/sbin/syslog-ng --foreground --no-caps
+stdout_logfile=/dev/stdout
+stdout_logfile_maxbytes=0
+stderr_logfile=/dev/stderr
+stderr_logfile_maxbytes=0
+autostart=true
+priority=1
+
+[program:cron]
+command=/usr/sbin/cron -f
+autorestart=true
+priority=2
+
+[program:bootstrap-sogo]
+command=/bootstrap-sogo.sh
+stdout_logfile=/dev/stdout
+stdout_logfile_maxbytes=0
+stderr_logfile=/dev/stderr
+stderr_logfile_maxbytes=0
+priority=3
+startretries=10
+autorestart=true
+stopwaitsecs=120
+
+[eventlistener:processes]
+command=/usr/local/sbin/stop-supervisor.sh
+events=PROCESS_STATE_STOPPED, PROCESS_STATE_EXITED, PROCESS_STATE_FATAL
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/sogo/syslog-ng-redis_slave.conf b/mailcow/src/mailcow-dockerized/data/Dockerfiles/sogo/syslog-ng-redis_slave.conf
new file mode 100644
index 0000000..9b04c78
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/sogo/syslog-ng-redis_slave.conf
@@ -0,0 +1,45 @@
+@version: 3.19
+@include "scl.conf"
+options {
+ chain_hostnames(off);
+ flush_lines(0);
+ use_dns(no);
+ use_fqdn(no);
+ owner("root"); group("adm"); perm(0640);
+ stats_freq(0);
+ bad_hostname("^gconfd$");
+};
+source s_src {
+ unix-stream("/dev/log");
+ internal();
+};
+source s_sogo {
+ pipe("/dev/sogo_log" owner(sogo) group(sogo));
+};
+destination d_stdout { pipe("/dev/stdout"); };
+destination d_redis_ui_log {
+ redis(
+ host("`REDIS_SLAVEOF_IP`")
+ persist-name("redis1")
+ port(`REDIS_SLAVEOF_PORT`)
+ command("LPUSH" "SOGO_LOG" "$(format-json time=\"$S_UNIXTIME\" priority=\"$PRIORITY\" program=\"$PROGRAM\" message=\"$MESSAGE\")\n")
+ );
+};
+destination d_redis_f2b_channel {
+ redis(
+ host("`REDIS_SLAVEOF_IP`")
+ persist-name("redis2")
+ port(`REDIS_SLAVEOF_PORT`)
+ command("PUBLISH" "F2B_CHANNEL" "$MESSAGE")
+ );
+};
+log {
+ source(s_sogo);
+ destination(d_redis_ui_log);
+ destination(d_redis_f2b_channel);
+};
+log {
+ source(s_sogo);
+ source(s_src);
+ destination(d_stdout);
+};
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/sogo/syslog-ng.conf b/mailcow/src/mailcow-dockerized/data/Dockerfiles/sogo/syslog-ng.conf
new file mode 100644
index 0000000..0c257d6
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/sogo/syslog-ng.conf
@@ -0,0 +1,45 @@
+@version: 3.19
+@include "scl.conf"
+options {
+ chain_hostnames(off);
+ flush_lines(0);
+ use_dns(no);
+ use_fqdn(no);
+ owner("root"); group("adm"); perm(0640);
+ stats_freq(0);
+ bad_hostname("^gconfd$");
+};
+source s_src {
+ unix-stream("/dev/log");
+ internal();
+};
+source s_sogo {
+ pipe("/dev/sogo_log" owner(sogo) group(sogo));
+};
+destination d_stdout { pipe("/dev/stdout"); };
+destination d_redis_ui_log {
+ redis(
+ host("redis-mailcow")
+ persist-name("redis1")
+ port(6379)
+ command("LPUSH" "SOGO_LOG" "$(format-json time=\"$S_UNIXTIME\" priority=\"$PRIORITY\" program=\"$PROGRAM\" message=\"$MESSAGE\")\n")
+ );
+};
+destination d_redis_f2b_channel {
+ redis(
+ host("redis-mailcow")
+ persist-name("redis2")
+ port(6379)
+ command("PUBLISH" "F2B_CHANNEL" "$MESSAGE")
+ );
+};
+log {
+ source(s_sogo);
+ destination(d_redis_ui_log);
+ destination(d_redis_f2b_channel);
+};
+log {
+ source(s_sogo);
+ source(s_src);
+ destination(d_stdout);
+};
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/solr/Dockerfile b/mailcow/src/mailcow-dockerized/data/Dockerfiles/solr/Dockerfile
new file mode 100644
index 0000000..6dfec41
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/solr/Dockerfile
@@ -0,0 +1,25 @@
+FROM solr:7.7-slim
+
+USER root
+
+ENV GOSU_VERSION 1.11
+
+COPY solr.sh /
+COPY solr-config-7.7.0.xml /
+COPY solr-schema-7.7.0.xml /
+
+RUN dpkgArch="$(dpkg --print-architecture | awk -F- '{ print $NF }')" \
+ && wget -O /usr/local/bin/gosu "https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$dpkgArch" \
+ && chmod +x /usr/local/bin/gosu \
+ && gosu nobody true \
+ && apt-get update && apt-get install -y --no-install-recommends \
+ tzdata \
+ curl \
+ bash \
+ && apt-get autoclean \
+ && rm -rf /var/lib/apt/lists/* \
+ && chmod +x /solr.sh \
+ && sync \
+ && bash /solr.sh --bootstrap
+
+CMD ["/solr.sh"]
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/solr/solr-config-7.7.0.xml b/mailcow/src/mailcow-dockerized/data/Dockerfiles/solr/solr-config-7.7.0.xml
new file mode 100644
index 0000000..3661874
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/solr/solr-config-7.7.0.xml
@@ -0,0 +1,289 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+
+<!-- This is the default config with stuff non-essential to Dovecot removed. -->
+
+<config>
+ <!-- Controls what version of Lucene various components of Solr
+ adhere to. Generally, you want to use the latest version to
+ get all bug fixes and improvements. It is highly recommended
+ that you fully re-index after changing this setting as it can
+ affect both how text is indexed and queried.
+ -->
+ <luceneMatchVersion>7.7.0</luceneMatchVersion>
+
+ <!-- A 'dir' option by itself adds any files found in the directory
+ to the classpath, this is useful for including all jars in a
+ directory.
+
+ When a 'regex' is specified in addition to a 'dir', only the
+ files in that directory which completely match the regex
+ (anchored on both ends) will be included.
+
+ If a 'dir' option (with or without a regex) is used and nothing
+ is found that matches, a warning will be logged.
+
+ The examples below can be used to load some solr-contribs along
+ with their external dependencies.
+ -->
+ <lib dir="${solr.install.dir:../../../..}/contrib/extraction/lib" regex=".*\.jar" />
+ <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-cell-\d.*\.jar" />
+
+ <lib dir="${solr.install.dir:../../../..}/contrib/clustering/lib/" regex=".*\.jar" />
+ <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-clustering-\d.*\.jar" />
+
+ <lib dir="${solr.install.dir:../../../..}/contrib/langid/lib/" regex=".*\.jar" />
+ <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-langid-\d.*\.jar" />
+
+ <lib dir="${solr.install.dir:../../../..}/contrib/velocity/lib" regex=".*\.jar" />
+ <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-velocity-\d.*\.jar" />
+
+ <!-- Data Directory
+
+ Used to specify an alternate directory to hold all index data
+ other than the default ./data under the Solr home. If
+ replication is in use, this should match the replication
+ configuration.
+ -->
+ <dataDir>${solr.data.dir:}</dataDir>
+
+ <!-- The default high-performance update handler -->
+ <updateHandler class="solr.DirectUpdateHandler2">
+
+ <!-- Enables a transaction log, used for real-time get, durability, and
+ and solr cloud replica recovery. The log can grow as big as
+ uncommitted changes to the index, so use of a hard autoCommit
+ is recommended (see below).
+ "dir" - the target directory for transaction logs, defaults to the
+ solr data directory.
+ "numVersionBuckets" - sets the number of buckets used to keep
+ track of max version values when checking for re-ordered
+ updates; increase this value to reduce the cost of
+ synchronizing access to version buckets during high-volume
+ indexing, this requires 8 bytes (long) * numVersionBuckets
+ of heap space per Solr core.
+ -->
+ <updateLog>
+ <str name="dir">${solr.ulog.dir:}</str>
+ <int name="numVersionBuckets">${solr.ulog.numVersionBuckets:65536}</int>
+ </updateLog>
+
+ <!-- AutoCommit
+
+ Perform a hard commit automatically under certain conditions.
+ Instead of enabling autoCommit, consider using "commitWithin"
+ when adding documents.
+
+ http://wiki.apache.org/solr/UpdateXmlMessages
+
+ maxDocs - Maximum number of documents to add since the last
+ commit before automatically triggering a new commit.
+
+ maxTime - Maximum amount of time in ms that is allowed to pass
+ since a document was added before automatically
+ triggering a new commit.
+ openSearcher - if false, the commit causes recent index changes
+ to be flushed to stable storage, but does not cause a new
+ searcher to be opened to make those changes visible.
+
+ If the updateLog is enabled, then it's highly recommended to
+ have some sort of hard autoCommit to limit the log size.
+ -->
+ <autoCommit>
+ <maxTime>${solr.autoCommit.maxTime:15000}</maxTime>
+ <openSearcher>false</openSearcher>
+ </autoCommit>
+
+ <!-- softAutoCommit is like autoCommit except it causes a
+ 'soft' commit which only ensures that changes are visible
+ but does not ensure that data is synced to disk. This is
+ faster and more near-realtime friendly than a hard commit.
+ -->
+ <autoSoftCommit>
+ <maxTime>${solr.autoSoftCommit.maxTime:-1}</maxTime>
+ </autoSoftCommit>
+
+ <!-- Update Related Event Listeners
+
+ Various IndexWriter related events can trigger Listeners to
+ take actions.
+
+ postCommit - fired after every commit or optimize command
+ postOptimize - fired after every optimize command
+ -->
+
+ </updateHandler>
+
+ <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ Query section - these settings control query time things like caches
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
+ <query>
+ <!-- Solr Internal Query Caches
+
+ There are two implementations of cache available for Solr,
+ LRUCache, based on a synchronized LinkedHashMap, and
+ FastLRUCache, based on a ConcurrentHashMap.
+
+ FastLRUCache has faster gets and slower puts in single
+ threaded operation and thus is generally faster than LRUCache
+ when the hit ratio of the cache is high (> 75%), and may be
+ faster under other scenarios on multi-cpu systems.
+ -->
+
+ <!-- Filter Cache
+
+ Cache used by SolrIndexSearcher for filters (DocSets),
+ unordered sets of *all* documents that match a query. When a
+ new searcher is opened, its caches may be prepopulated or
+ "autowarmed" using data from caches in the old searcher.
+ autowarmCount is the number of items to prepopulate. For
+ LRUCache, the autowarmed items will be the most recently
+ accessed items.
+
+ Parameters:
+ class - the SolrCache implementation LRUCache or
+ (LRUCache or FastLRUCache)
+ size - the maximum number of entries in the cache
+ initialSize - the initial capacity (number of entries) of
+ the cache. (see java.util.HashMap)
+ autowarmCount - the number of entries to prepopulate from
+ and old cache.
+ maxRamMB - the maximum amount of RAM (in MB) that this cache is allowed
+ to occupy. Note that when this option is specified, the size
+ and initialSize parameters are ignored.
+ -->
+ <filterCache class="solr.FastLRUCache"
+ size="512"
+ initialSize="512"
+ autowarmCount="0"/>
+
+ <!-- Query Result Cache
+
+ Caches results of searches - ordered lists of document ids
+ (DocList) based on a query, a sort, and the range of documents requested.
+ Additional supported parameter by LRUCache:
+ maxRamMB - the maximum amount of RAM (in MB) that this cache is allowed
+ to occupy
+ -->
+ <queryResultCache class="solr.LRUCache"
+ size="512"
+ initialSize="512"
+ autowarmCount="0"/>
+
+ <!-- Document Cache
+
+ Caches Lucene Document objects (the stored fields for each
+ document). Since Lucene internal document ids are transient,
+ this cache will not be autowarmed.
+ -->
+ <documentCache class="solr.LRUCache"
+ size="512"
+ initialSize="512"
+ autowarmCount="0"/>
+
+ <!-- custom cache currently used by block join -->
+ <cache name="perSegFilter"
+ class="solr.search.LRUCache"
+ size="10"
+ initialSize="0"
+ autowarmCount="10"
+ regenerator="solr.NoOpRegenerator" />
+
+ <!-- Lazy Field Loading
+
+ If true, stored fields that are not requested will be loaded
+ lazily. This can result in a significant speed improvement
+ if the usual case is to not load all stored fields,
+ especially if the skipped fields are large compressed text
+ fields.
+ -->
+ <enableLazyFieldLoading>true</enableLazyFieldLoading>
+
+ <!-- Result Window Size
+
+ An optimization for use with the queryResultCache. When a search
+ is requested, a superset of the requested number of document ids
+ are collected. For example, if a search for a particular query
+ requests matching documents 10 through 19, and queryWindowSize is 50,
+ then documents 0 through 49 will be collected and cached. Any further
+ requests in that range can be satisfied via the cache.
+ -->
+ <queryResultWindowSize>20</queryResultWindowSize>
+
+ <!-- Maximum number of documents to cache for any entry in the
+ queryResultCache.
+ -->
+ <queryResultMaxDocsCached>200</queryResultMaxDocsCached>
+
+ <!-- Use Cold Searcher
+
+ If a search request comes in and there is no current
+ registered searcher, then immediately register the still
+ warming searcher and use it. If "false" then all requests
+ will block until the first searcher is done warming.
+ -->
+ <useColdSearcher>false</useColdSearcher>
+
+ </query>
+
+
+ <!-- Request Dispatcher
+
+ This section contains instructions for how the SolrDispatchFilter
+ should behave when processing requests for this SolrCore.
+
+ -->
+ <requestDispatcher>
+ <httpCaching never304="true" />
+ </requestDispatcher>
+
+ <!-- Request Handlers
+
+ http://wiki.apache.org/solr/SolrRequestHandler
+
+ Incoming queries will be dispatched to a specific handler by name
+ based on the path specified in the request.
+
+ If a Request Handler is declared with startup="lazy", then it will
+ not be initialized until the first request that uses it.
+
+ -->
+ <!-- SearchHandler
+
+ http://wiki.apache.org/solr/SearchHandler
+
+ For processing Search Queries, the primary Request Handler
+ provided with Solr is "SearchHandler" It delegates to a sequent
+ of SearchComponents (see below) and supports distributed
+ queries across multiple shards
+ -->
+ <requestHandler name="/select" class="solr.SearchHandler">
+ <!-- default values for query parameters can be specified, these
+ will be overridden by parameters in the request
+ -->
+ <lst name="defaults">
+ <str name="echoParams">explicit</str>
+ <int name="rows">10</int>
+ </lst>
+ </requestHandler>
+
+ <initParams path="/update/**,/select">
+ <lst name="defaults">
+ <str name="df">_text_</str>
+ </lst>
+ </initParams>
+
+ <!-- Response Writers
+
+ http://wiki.apache.org/solr/QueryResponseWriter
+
+ Request responses will be written using the writer specified by
+ the 'wt' request parameter matching the name of a registered
+ writer.
+
+ The "default" writer is the default and will be used if 'wt' is
+ not specified in the request.
+ -->
+ <queryResponseWriter name="xml"
+ default="true"
+ class="solr.XMLResponseWriter" />
+</config>
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/solr/solr-schema-7.7.0.xml b/mailcow/src/mailcow-dockerized/data/Dockerfiles/solr/solr-schema-7.7.0.xml
new file mode 100644
index 0000000..2c2e634
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/solr/solr-schema-7.7.0.xml
@@ -0,0 +1,49 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<schema name="dovecot-fts" version="2.0">
+ <fieldType name="string" class="solr.StrField" omitNorms="true" sortMissingLast="true"/>
+ <fieldType name="long" class="solr.LongPointField" positionIncrementGap="0"/>
+ <fieldType name="boolean" class="solr.BoolField" sortMissingLast="true"/>
+
+ <fieldType name="text" class="solr.TextField" autoGeneratePhraseQueries="true" positionIncrementGap="100">
+ <analyzer type="index">
+ <tokenizer class="solr.StandardTokenizerFactory"/>
+ <filter class="solr.EdgeNGramFilterFactory" minGramSize="3" maxGramSize="20"/>
+ <filter class="solr.StopFilterFactory" words="stopwords.txt" ignoreCase="true"/>
+ <filter class="solr.WordDelimiterGraphFilterFactory" catenateNumbers="1" generateNumberParts="1" splitOnCaseChange="1" generateWordParts="1" splitOnNumerics="1" catenateAll="1" catenateWords="1"/>
+ <filter class="solr.FlattenGraphFilterFactory"/>
+ <filter class="solr.LowerCaseFilterFactory"/>
+ <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
+ <filter class="solr.PorterStemFilterFactory"/>
+ </analyzer>
+ <analyzer type="query">
+ <tokenizer class="solr.StandardTokenizerFactory"/>
+ <filter class="solr.SynonymGraphFilterFactory" expand="true" ignoreCase="true" synonyms="synonyms.txt"/>
+ <filter class="solr.FlattenGraphFilterFactory"/>
+ <filter class="solr.StopFilterFactory" words="stopwords.txt" ignoreCase="true"/>
+ <filter class="solr.WordDelimiterGraphFilterFactory" catenateNumbers="1" generateNumberParts="1" splitOnCaseChange="1" generateWordParts="1" splitOnNumerics="1" catenateAll="1" catenateWords="1"/>
+ <filter class="solr.LowerCaseFilterFactory"/>
+ <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
+ <filter class="solr.PorterStemFilterFactory"/>
+ </analyzer>
+ </fieldType>
+
+ <field name="id" type="string" indexed="true" required="true" stored="true"/>
+ <field name="uid" type="long" indexed="true" required="true" stored="true"/>
+ <field name="box" type="string" indexed="true" required="true" stored="true"/>
+ <field name="user" type="string" indexed="true" required="true" stored="true"/>
+
+ <field name="hdr" type="text" indexed="true" stored="false"/>
+ <field name="body" type="text" indexed="true" stored="false"/>
+
+ <field name="from" type="text" indexed="true" stored="false"/>
+ <field name="to" type="text" indexed="true" stored="false"/>
+ <field name="cc" type="text" indexed="true" stored="false"/>
+ <field name="bcc" type="text" indexed="true" stored="false"/>
+ <field name="subject" type="text" indexed="true" stored="false"/>
+
+ <!-- Used by Solr internally: -->
+ <field name="_version_" type="long" indexed="true" stored="true"/>
+
+ <uniqueKey>id</uniqueKey>
+</schema>
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/solr/solr.sh b/mailcow/src/mailcow-dockerized/data/Dockerfiles/solr/solr.sh
new file mode 100755
index 0000000..1c5c6f5
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/solr/solr.sh
@@ -0,0 +1,61 @@
+#!/bin/bash
+
+if [[ "${SKIP_SOLR}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
+ echo "SKIP_SOLR=y, skipping Solr..."
+ sleep 365d
+ exit 0
+fi
+
+MEM_TOTAL=$(awk '/MemTotal/ {print $2}' /proc/meminfo)
+
+if [[ "${1}" != "--bootstrap" ]]; then
+ if [ ${MEM_TOTAL} -lt "2097152" ]; then
+ echo "System memory less than 2 GB, skipping Solr..."
+ sleep 365d
+ exit 0
+ fi
+fi
+
+set -e
+
+# run the optional initdb
+. /opt/docker-solr/scripts/run-initdb
+
+# fixing volume permission
+[[ -d /opt/solr/server/solr/dovecot-fts/data ]] && chown -R solr:solr /opt/solr/server/solr/dovecot-fts/data
+if [[ "${1}" != "--bootstrap" ]]; then
+ sed -i '/SOLR_HEAP=/c\SOLR_HEAP="'${SOLR_HEAP:-1024}'m"' /opt/solr/bin/solr.in.sh
+else
+ sed -i '/SOLR_HEAP=/c\SOLR_HEAP="256m"' /opt/solr/bin/solr.in.sh
+fi
+
+if [[ "${1}" == "--bootstrap" ]]; then
+ echo "Creating initial configuration"
+ echo "Modifying default config set"
+ cp /solr-config-7.7.0.xml /opt/solr/server/solr/configsets/_default/conf/solrconfig.xml
+ cp /solr-schema-7.7.0.xml /opt/solr/server/solr/configsets/_default/conf/schema.xml
+ rm /opt/solr/server/solr/configsets/_default/conf/managed-schema
+
+ echo "Starting local Solr instance to setup configuration"
+ gosu solr start-local-solr
+
+ echo "Creating core \"dovecot-fts\""
+ gosu solr /opt/solr/bin/solr create -c "dovecot-fts"
+
+ # See https://github.com/docker-solr/docker-solr/issues/27
+ echo "Checking core"
+ while ! wget -O - 'http://localhost:8983/solr/admin/cores?action=STATUS' | grep -q instanceDir; do
+ echo "Could not find any cores, waiting..."
+ sleep 3
+ done
+
+ echo "Created core \"dovecot-fts\""
+
+ echo "Stopping local Solr"
+ gosu solr stop-local-solr
+
+ exit 0
+fi
+
+exec gosu solr solr-foreground
+
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/unbound/Dockerfile b/mailcow/src/mailcow-dockerized/data/Dockerfiles/unbound/Dockerfile
new file mode 100644
index 0000000..cb34e45
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/unbound/Dockerfile
@@ -0,0 +1,23 @@
+FROM alpine:3.11
+
+LABEL maintainer "Andre Peters <andre.peters@servercow.de>"
+
+RUN apk add --update --no-cache \
+ curl \
+ unbound \
+ bash \
+ openssl \
+ drill \
+ tzdata \
+ && curl -o /etc/unbound/root.hints https://www.internic.net/domain/named.cache \
+ && chown root:unbound /etc/unbound \
+ && adduser unbound tty \
+ && chmod 775 /etc/unbound
+
+EXPOSE 53/udp 53/tcp
+
+COPY docker-entrypoint.sh /docker-entrypoint.sh
+
+ENTRYPOINT ["/docker-entrypoint.sh"]
+
+CMD ["/usr/sbin/unbound"]
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/unbound/docker-entrypoint.sh b/mailcow/src/mailcow-dockerized/data/Dockerfiles/unbound/docker-entrypoint.sh
new file mode 100755
index 0000000..bb9c115
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/unbound/docker-entrypoint.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+echo "Setting console permissions..."
+chown root:tty /dev/console
+chmod g+rw /dev/console
+echo "Receiving anchor key..."
+/usr/sbin/unbound-anchor -a /etc/unbound/trusted-key.key
+echo "Receiving root hints..."
+curl -#o /etc/unbound/root.hints https://www.internic.net/domain/named.cache
+/usr/sbin/unbound-control-setup
+
+# Run hooks
+for file in /hooks/*; do
+ if [ -x "${file}" ]; then
+ echo "Running hook ${file}"
+ "${file}"
+ fi
+done
+
+exec "$@"
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/watchdog/Dockerfile b/mailcow/src/mailcow-dockerized/data/Dockerfiles/watchdog/Dockerfile
new file mode 100644
index 0000000..e82bc5d
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/watchdog/Dockerfile
@@ -0,0 +1,39 @@
+FROM alpine:3.11
+LABEL maintainer "André Peters <andre.peters@servercow.de>"
+
+# Installation
+RUN apk add --update \
+ && apk add --no-cache nagios-plugins-smtp \
+ nagios-plugins-tcp \
+ nagios-plugins-http \
+ nagios-plugins-ping \
+ mariadb-client \
+ curl \
+ bash \
+ coreutils \
+ jq \
+ fcgi \
+ openssl \
+ nagios-plugins-mysql \
+ nagios-plugins-dns \
+ nagios-plugins-disk \
+ bind-tools \
+ redis \
+ perl \
+ perl-net-dns \
+ perl-io-socket-ssl \
+ perl-io-socket-inet6 \
+ perl-socket \
+ perl-socket6 \
+ perl-mime-lite \
+ perl-term-readkey \
+ tini \
+ tzdata \
+ whois \
+ && curl https://raw.githubusercontent.com/mludvig/smtp-cli/v3.10/smtp-cli -o /smtp-cli \
+ && chmod +x smtp-cli
+
+COPY watchdog.sh /watchdog.sh
+COPY check_mysql_slavestatus.sh /usr/lib/nagios/plugins/check_mysql_slavestatus.sh
+
+CMD /watchdog.sh 2> /dev/null
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/watchdog/check_mysql_slavestatus.sh b/mailcow/src/mailcow-dockerized/data/Dockerfiles/watchdog/check_mysql_slavestatus.sh
new file mode 100755
index 0000000..ed4f0db
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/watchdog/check_mysql_slavestatus.sh
@@ -0,0 +1,223 @@
+#!/bin/bash
+#########################################################################
+# Script: check_mysql_slavestatus.sh #
+# Author: Claudio Kuenzler www.claudiokuenzler.com #
+# Purpose: Monitor MySQL Replication status with Nagios #
+# Description: Connects to given MySQL hosts and checks for running #
+# SLAVE state and delivers additional info #
+# Original: This script is a modified version of #
+# check mysql slave sql running written by dhirajt #
+# Thanks to: Victor Balada Diaz for his ideas added on 20080930 #
+# Soren Klintrup for stuff added on 20081015 #
+# Marc Feret for Slave_IO_Running check 20111227 #
+# Peter Lecki for his mods added on 20120803 #
+# Serge Victor for his mods added on 20131223 #
+# Omri Bahumi for his fix added on 20131230 #
+# Marc Falzon for his option mods added on 20190822 #
+# Andreas Pfeiffer for adding socket option on 20190822 #
+# History: #
+# 2008041700 Original Script modified #
+# 2008041701 Added additional info if status OK #
+# 2008041702 Added usage of script with params -H -u -p #
+# 2008041703 Added bindir variable for multiple platforms #
+# 2008041704 Added help because mankind needs help #
+# 2008093000 Using /bin/sh instead of /bin/bash #
+# 2008093001 Added port for MySQL server #
+# 2008093002 Added mysqldir if mysql binary is elsewhere #
+# 2008101501 Changed bindir/mysqldir to use PATH #
+# 2008101501 Use $() instead of `` to avoid forks #
+# 2008101501 Use ${} for variables to prevent problems #
+# 2008101501 Check if required commands exist #
+# 2008101501 Check if mysql connection works #
+# 2008101501 Exit with unknown status at script end #
+# 2008101501 Also display help if no option is given #
+# 2008101501 Add warning/critical check to delay #
+# 2011062200 Add perfdata #
+# 2011122700 Checking Slave_IO_Running #
+# 2012080300 Changed to use only one mysql query #
+# 2012080301 Added warn and crit delay as optional args #
+# 2012080302 Added standard -h option for syntax help #
+# 2012080303 Added check for mandatory options passed in #
+# 2012080304 Added error output from mysql #
+# 2012080305 Changed from 'cut' to 'awk' (eliminate ws) #
+# 2012111600 Do not show password in error output #
+# 2013042800 Changed PATH to use existing PATH, too #
+# 2013050800 Bugfix in PATH export #
+# 2013092700 Bugfix in PATH export #
+# 2013092701 Bugfix in getopts #
+# 2013101600 Rewrite of threshold logic and handling #
+# 2013101601 Optical clean up #
+# 2013101602 Rewrite help output #
+# 2013101700 Handle Slave IO in 'Connecting' state #
+# 2013101701 Minor changes in output, handling UNKWNON situations now #
+# 2013101702 Exit CRITICAL when Slave IO in Connecting state #
+# 2013123000 Slave_SQL_Running also matched Slave_SQL_Running_State #
+# 2015011600 Added 'moving' check to catch possible connection issues #
+# 2015011900 Use its own threshold for replication moving check #
+# 2019082200 Add support for mysql option file #
+# 2019082201 Improve password security (remove from mysql cli) #
+# 2019082202 Added socket parameter (-S) #
+# 2019082203 Use default port 3306, makes -P optional #
+# 2019082204 Fix moving subcheck, improve documentation #
+#########################################################################
+# Usage: ./check_mysql_slavestatus.sh (-o file|(-H dbhost [-P port]|-S socket) -u dbuser -p dbpass) [-s connection] [-w integer] [-c integer] [-m integer]
+#########################################################################
+help="\ncheck_mysql_slavestatus.sh (c) 2008-2019 GNU GPLv2 licence
+Usage: $0 (-o file|(-H dbhost [-P port]|-S socket) -u username -p password) [-s connection] [-w integer] [-c integer] [-m]\n
+Options:\n-o Path to option file containing connection settings (e.g. /home/nagios/.my.cnf). Note: If this option is used, -H, -u, -p parameters will become optional\n-H Hostname or IP of slave server\n-P MySQL Port of slave server (optional, defaults to 3306)\n-u Username of DB-user\n-p Password of DB-user\n-S database socket\n-s Connection name (optional, with multi-source replication)\n-w Replication delay in seconds for Warning status (optional)\n-c Replication delay in seconds for Critical status (optional)\n-m Threshold in seconds since when replication did not move (compares the slaves log position)\n
+Attention: The DB-user you type in must have CLIENT REPLICATION rights on the DB-server. Example:\n\tGRANT REPLICATION CLIENT on *.* TO 'nagios'@'%' IDENTIFIED BY 'secret';"
+
+STATE_OK=0 # define the exit code if status is OK
+STATE_WARNING=1 # define the exit code if status is Warning (not really used)
+STATE_CRITICAL=2 # define the exit code if status is Critical
+STATE_UNKNOWN=3 # define the exit code if status is Unknown
+export PATH=$PATH:/usr/local/bin:/usr/bin:/bin # Set path
+crit="No" # what is the answer of MySQL Slave_SQL_Running for a Critical status?
+ok="Yes" # what is the answer of MySQL Slave_SQL_Running for an OK status?
+port="-P 3306" # on which tcp port is the target MySQL slave listening?
+
+for cmd in mysql awk grep expr [
+do
+ if ! `which ${cmd} &>/dev/null`
+ then
+ echo "UNKNOWN: This script requires the command '${cmd}' but it does not exist; please check if command exists and PATH is correct"
+ exit ${STATE_UNKNOWN}
+ fi
+done
+
+# Check for people who need help
+#########################################################################
+if [ "${1}" = "--help" -o "${#}" = "0" ];
+ then
+ echo -e "${help}";
+ exit 1;
+fi
+
+# Important given variables for the DB-Connect
+#########################################################################
+while getopts "H:P:u:p:S:s:w:c:o:m:h" Input;
+do
+ case ${Input} in
+ H) host="-h ${OPTARG}";slavetarget=${OPTARG};;
+ P) port="-P ${OPTARG}";;
+ u) user="-u ${OPTARG}";;
+ p) password="${OPTARG}"; export MYSQL_PWD="${OPTARG}";;
+ S) socket="-S ${OPTARG}";;
+ s) connection=\"${OPTARG}\";;
+ w) warn_delay=${OPTARG};;
+ c) crit_delay=${OPTARG};;
+ o) optfile="--defaults-extra-file=${OPTARG}";;
+ m) moving=${OPTARG};;
+ h) echo -e "${help}"; exit 1;;
+ \?) echo "Wrong option given. Check help (-h, --help) for usage."
+ exit 1
+ ;;
+ esac
+done
+
+# Check if we can write to tmp
+#########################################################################
+test -w /tmp && tmpfile="/tmp/mysql_slave_${slavetarget}_pos.txt"
+
+# Connect to the DB server and check for informations
+#########################################################################
+# Check whether all required arguments were passed in (either option file or full connection settings)
+if [[ -z "${optfile}" && -z "${host}" && -z "${socket}" ]]; then
+ echo -e "Missing required parameter(s)"; exit ${STATE_UNKNOWN}
+elif [[ -n "${host}" && (-z "${user}" || -z "${password}") ]]; then
+ echo -e "Missing required parameter(s)"; exit ${STATE_UNKNOWN}
+elif [[ -n "${socket}" && (-z "${user}" || -z "${password}") ]]; then
+ echo -e "Missing required parameter(s)"; exit ${STATE_UNKNOWN}
+fi
+
+# Connect to the DB server and store output in vars
+if [[ -n $socket ]]; then
+ ConnectionResult=$(mysql ${optfile} ${socket} ${user} -e "show slave ${connection} status\G" 2>&1)
+else
+ ConnectionResult=$(mysql ${optfile} ${host} ${port} ${user} -e "show slave ${connection} status\G" 2>&1)
+fi
+
+if [ -z "`echo "${ConnectionResult}" |grep Slave_IO_State`" ]; then
+ echo -e "CRITICAL: Unable to connect to server"
+ exit ${STATE_CRITICAL}
+fi
+check=`echo "${ConnectionResult}" |grep Slave_SQL_Running: | awk '{print $2}'`
+checkio=`echo "${ConnectionResult}" |grep Slave_IO_Running: | awk '{print $2}'`
+masterinfo=`echo "${ConnectionResult}" |grep Master_Host: | awk '{print $2}'`
+delayinfo=`echo "${ConnectionResult}" |grep Seconds_Behind_Master: | awk '{print $2}'`
+readpos=`echo "${ConnectionResult}" |grep Read_Master_Log_Pos: | awk '{print $2}'`
+execpos=`echo "${ConnectionResult}" |grep Exec_Master_Log_Pos: | awk '{print $2}'`
+
+# Output of different exit states
+#########################################################################
+if [ ${check} = "NULL" ]; then
+echo "CRITICAL: Slave_SQL_Running is answering NULL"; exit ${STATE_CRITICAL};
+fi
+
+if [ ${check} = ${crit} ]; then
+echo "CRITICAL: ${host}:${port} Slave_SQL_Running: ${check}"; exit ${STATE_CRITICAL};
+fi
+
+if [ ${checkio} = ${crit} ]; then
+echo "CRITICAL: ${host} Slave_IO_Running: ${checkio}"; exit ${STATE_CRITICAL};
+fi
+
+if [ ${checkio} = "Connecting" ]; then
+echo "CRITICAL: ${host} Slave_IO_Running: ${checkio}"; exit ${STATE_CRITICAL};
+fi
+
+if [ ${check} = ${ok} ] && [ ${checkio} = ${ok} ]; then
+ # Delay thresholds are set
+ if [[ -n ${warn_delay} ]] && [[ -n ${crit_delay} ]]; then
+ if ! [[ ${warn_delay} -gt 0 ]]; then echo "Warning threshold must be a valid integer greater than 0"; exit $STATE_UNKNOWN; fi
+ if ! [[ ${crit_delay} -gt 0 ]]; then echo "Warning threshold must be a valid integer greater than 0"; exit $STATE_UNKNOWN; fi
+ if [[ -z ${warn_delay} ]] || [[ -z ${crit_delay} ]]; then echo "Both warning and critical thresholds must be set"; exit $STATE_UNKNOWN; fi
+ if [[ ${warn_delay} -gt ${crit_delay} ]]; then echo "Warning threshold cannot be greater than critical"; exit $STATE_UNKNOWN; fi
+
+ if [[ ${delayinfo} -ge ${crit_delay} ]]
+ then echo "CRITICAL: Slave is ${delayinfo} seconds behind Master | delay=${delayinfo}s"; exit ${STATE_CRITICAL}
+ elif [[ ${delayinfo} -ge ${warn_delay} ]]
+ then echo "WARNING: Slave is ${delayinfo} seconds behind Master | delay=${delayinfo}s"; exit ${STATE_WARNING}
+ else
+ # Everything looks OK here but now let us check if the replication is moving
+ if [[ -n ${moving} ]] && [[ -n ${tmpfile} ]] && [[ $readpos -eq $execpos ]]
+ then
+ #echo "Debug: Read pos is $readpos - Exec pos is $execpos"
+ # Check if tmp file exists
+ curtime=`date +%s`
+ if [[ -w $tmpfile ]]
+ then
+ tmpfiletime=`date +%s -r $tmpfile`
+ if [[ `expr $curtime - $tmpfiletime` -gt ${moving} ]]
+ then
+ exectmp=`cat $tmpfile`
+ #echo "Debug: Exec pos in tmpfile is $exectmp"
+ if [[ $exectmp -eq $execpos ]]
+ then
+ # The value read from the tmp file and from db are the same. Replication hasnt moved!
+ echo "WARNING: Slave replication has not moved in ${moving} seconds. Manual check required."; exit ${STATE_WARNING}
+ else
+ # Replication has moved since the tmp file was written. Delete tmp file and output OK.
+ rm $tmpfile
+ echo "OK: Slave SQL running: ${check} Slave IO running: ${checkio} / master: ${masterinfo} / slave is ${delayinfo} seconds behind master | delay=${delayinfo}s"; exit ${STATE_OK};
+ fi
+ else
+ echo "OK: Slave SQL running: ${check} Slave IO running: ${checkio} / master: ${masterinfo} / slave is ${delayinfo} seconds behind master | delay=${delayinfo}s"; exit ${STATE_OK};
+ fi
+ else
+ echo "$execpos" > $tmpfile
+ echo "OK: Slave SQL running: ${check} Slave IO running: ${checkio} / master: ${masterinfo} / slave is ${delayinfo} seconds behind master | delay=${delayinfo}s"; exit ${STATE_OK};
+ fi
+ else # Everything OK (no additional moving check)
+ echo "OK: Slave SQL running: ${check} Slave IO running: ${checkio} / master: ${masterinfo} / slave is ${delayinfo} seconds behind master | delay=${delayinfo}s"; exit ${STATE_OK};
+ fi
+ fi
+ else
+ # Without delay thresholds
+ echo "OK: Slave SQL running: ${check} Slave IO running: ${checkio} / master: ${masterinfo} / slave is ${delayinfo} seconds behind master | delay=${delayinfo}s"
+ exit ${STATE_OK};
+ fi
+fi
+
+echo "UNKNOWN: should never reach this part (Slave_SQL_Running is ${check}, Slave_IO_Running is ${checkio})"
+exit ${STATE_UNKNOWN}
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/watchdog/watchdog.sh b/mailcow/src/mailcow-dockerized/data/Dockerfiles/watchdog/watchdog.sh
new file mode 100755
index 0000000..1e7c2f4
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/watchdog/watchdog.sh
@@ -0,0 +1,1126 @@
+#!/bin/bash
+
+trap "exit" INT TERM
+trap "kill 0" EXIT
+
+# Prepare
+BACKGROUND_TASKS=()
+echo "Waiting for containers to settle..."
+sleep 30
+
+if [[ "${USE_WATCHDOG}" =~ ^([nN][oO]|[nN])+$ ]]; then
+ echo -e "$(date) - USE_WATCHDOG=n, skipping watchdog..."
+ sleep 365d
+ exec $(readlink -f "$0")
+fi
+
+# Checks pipe their corresponding container name in this pipe
+if [[ ! -p /tmp/com_pipe ]]; then
+ mkfifo /tmp/com_pipe
+fi
+
+# Wait for containers
+while ! mysqladmin status --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
+ echo "Waiting for SQL..."
+ sleep 2
+done
+
+# Do not attempt to write to slave
+if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
+ REDIS_CMDLINE="redis-cli -h ${REDIS_SLAVEOF_IP} -p ${REDIS_SLAVEOF_PORT}"
+else
+ REDIS_CMDLINE="redis-cli -h redis -p 6379"
+fi
+
+until [[ $(${REDIS_CMDLINE} PING) == "PONG" ]]; do
+ echo "Waiting for Redis..."
+ sleep 2
+done
+
+${REDIS_CMDLINE} DEL F2B_RES > /dev/null
+
+# Common functions
+get_ipv6(){
+ local IPV6=
+ local IPV6_SRCS=
+ local TRY=
+ IPV6_SRCS[0]="ip6.korves.net"
+ IPV6_SRCS[1]="ip6.mailcow.email"
+ until [[ ! -z ${IPV6} ]] || [[ ${TRY} -ge 10 ]]; do
+ IPV6=$(curl --connect-timeout 3 -m 10 -L6s ${IPV6_SRCS[$RANDOM % ${#IPV6_SRCS[@]} ]} | grep "^\([0-9a-fA-F]\{0,4\}:\)\{1,7\}[0-9a-fA-F]\{0,4\}$")
+ [[ ! -z ${TRY} ]] && sleep 1
+ TRY=$((TRY+1))
+ done
+ echo ${IPV6}
+}
+
+array_diff() {
+ # https://stackoverflow.com/questions/2312762, Alex Offshore
+ eval local ARR1=\(\"\${$2[@]}\"\)
+ eval local ARR2=\(\"\${$3[@]}\"\)
+ local IFS=$'\n'
+ mapfile -t $1 < <(comm -23 <(echo "${ARR1[*]}" | sort) <(echo "${ARR2[*]}" | sort))
+}
+
+progress() {
+ SERVICE=${1}
+ TOTAL=${2}
+ CURRENT=${3}
+ DIFF=${4}
+ [[ -z ${DIFF} ]] && DIFF=0
+ [[ -z ${TOTAL} || -z ${CURRENT} ]] && return
+ [[ ${CURRENT} -gt ${TOTAL} ]] && return
+ [[ ${CURRENT} -lt 0 ]] && CURRENT=0
+ PERCENT=$(( 200 * ${CURRENT} / ${TOTAL} % 2 + 100 * ${CURRENT} / ${TOTAL} ))
+ ${REDIS_CMDLINE} LPUSH WATCHDOG_LOG "{\"time\":\"$(date +%s)\",\"service\":\"${SERVICE}\",\"lvl\":\"${PERCENT}\",\"hpnow\":\"${CURRENT}\",\"hptotal\":\"${TOTAL}\",\"hpdiff\":\"${DIFF}\"}" > /dev/null
+ log_msg "${SERVICE} health level: ${PERCENT}% (${CURRENT}/${TOTAL}), health trend: ${DIFF}" no_redis
+ # Return 10 to indicate a dead service
+ [ ${CURRENT} -le 0 ] && return 10
+}
+
+log_msg() {
+ if [[ ${2} != "no_redis" ]]; then
+ ${REDIS_CMDLINE} LPUSH WATCHDOG_LOG "{\"time\":\"$(date +%s)\",\"message\":\"$(printf '%s' "${1}" | \
+ tr '\r\n%&;$"_[]{}-' ' ')\"}" > /dev/null
+ fi
+ echo $(date) $(printf '%s\n' "${1}")
+}
+
+function mail_error() {
+ THROTTLE=
+ [[ -z ${1} ]] && return 1
+ # If exists, body will be the content of "/tmp/${1}", even if ${2} is set
+ [[ -z ${2} ]] && BODY="Service was restarted on $(date), please check your mailcow installation." || BODY="$(date) - ${2}"
+ # If exists, mail will be throttled by argument in seconds
+ [[ ! -z ${3} ]] && THROTTLE=${3}
+ if [[ ! -z ${THROTTLE} ]]; then
+ TTL_LEFT="$(${REDIS_CMDLINE} TTL THROTTLE_${1} 2> /dev/null)"
+ if [[ "${TTL_LEFT}" == "-2" ]]; then
+ # Delay key not found, setting a delay key now
+ ${REDIS_CMDLINE} SET THROTTLE_${1} 1 EX ${THROTTLE}
+ else
+ log_msg "Not sending notification email now, blocked for ${TTL_LEFT} seconds..."
+ return 1
+ fi
+ fi
+ WATCHDOG_NOTIFY_EMAIL=$(echo "${WATCHDOG_NOTIFY_EMAIL}" | sed 's/"//;s|"$||')
+ # Some exceptions for subject and body formats
+ if [[ ${1} == "fail2ban" ]]; then
+ SUBJECT="${BODY}"
+ BODY="Please see netfilter-mailcow for more details and triggered rules."
+ else
+ SUBJECT="Watchdog ALERT: ${1}"
+ fi
+ IFS=',' read -r -a MAIL_RCPTS <<< "${WATCHDOG_NOTIFY_EMAIL}"
+ for rcpt in "${MAIL_RCPTS[@]}"; do
+ RCPT_DOMAIN=
+ #RCPT_MX=
+ RCPT_DOMAIN=$(echo ${rcpt} | awk -F @ {'print $NF'})
+ # Latest smtp-cli looks up mx via dns
+ #RCPT_MX=$(dig +short ${RCPT_DOMAIN} mx | sort -n | awk '{print $2; exit}')
+ #if [[ -z ${RCPT_MX} ]]; then
+ # log_msg "Cannot determine MX for ${rcpt}, skipping email notification..."
+ # return 1
+ #fi
+ [ -f "/tmp/${1}" ] && BODY="/tmp/${1}"
+ timeout 10s ./smtp-cli --missing-modules-ok \
+ --charset=UTF-8 \
+ --subject="${SUBJECT}" \
+ --body-plain="${BODY}" \
+ --add-header="X-Priority: 1" \
+ --to=${rcpt} \
+ --from="watchdog@${MAILCOW_HOSTNAME}" \
+ --hello-host=${MAILCOW_HOSTNAME} \
+ --ipv4
+ #--server="${RCPT_MX}"
+ log_msg "Sent notification email to ${rcpt}"
+ done
+}
+
+get_container_ip() {
+ # ${1} is container
+ CONTAINER_ID=()
+ CONTAINER_IPS=()
+ CONTAINER_IP=
+ LOOP_C=1
+ until [[ ${CONTAINER_IP} =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]] || [[ ${LOOP_C} -gt 5 ]]; do
+ if [ ${IP_BY_DOCKER_API} -eq 0 ]; then
+ CONTAINER_IP=$(dig a "${1}" +short)
+ else
+ sleep 0.5
+ # get long container id for exact match
+ CONTAINER_ID=($(curl --silent --insecure https://dockerapi/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | jq -rc "select( .name | tostring == \"${1}\") | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id"))
+ # returned id can have multiple elements (if scaled), shuffle for random test
+ CONTAINER_ID=($(printf "%s\n" "${CONTAINER_ID[@]}" | shuf))
+ if [[ ! -z ${CONTAINER_ID} ]]; then
+ for matched_container in "${CONTAINER_ID[@]}"; do
+ CONTAINER_IPS=($(curl --silent --insecure https://dockerapi/containers/${matched_container}/json | jq -r '.NetworkSettings.Networks[].IPAddress'))
+ for ip_match in "${CONTAINER_IPS[@]}"; do
+ # grep will do nothing if one of these vars is empty
+ [[ -z ${ip_match} ]] && continue
+ [[ -z ${IPV4_NETWORK} ]] && continue
+ # only return ips that are part of our network
+ if ! grep -q ${IPV4_NETWORK} <(echo ${ip_match}); then
+ continue
+ else
+ CONTAINER_IP=${ip_match}
+ break
+ fi
+ done
+ [[ ! -z ${CONTAINER_IP} ]] && break
+ done
+ fi
+ fi
+ LOOP_C=$((LOOP_C + 1))
+ done
+ [[ ${LOOP_C} -gt 5 ]] && echo 240.0.0.0 || echo ${CONTAINER_IP}
+}
+
+# One-time check
+if grep -qi "$(echo ${IPV6_NETWORK} | cut -d: -f1-3)" <<< "$(ip a s)"; then
+ if [[ -z "$(get_ipv6)" ]]; then
+ mail_error "ipv6-config" "enable_ipv6 is true in docker-compose.yml, but an IPv6 link could not be established. Please verify your IPv6 connection."
+ fi
+fi
+
+external_checks() {
+ err_count=0
+ diff_c=0
+ THRESHOLD=${EXTERNAL_CHECKS_THRESHOLD}
+ # Reduce error count by 2 after restarting an unhealthy container
+ GUID=$(mysql -u${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT version FROM versions WHERE application = 'GUID'" -BN)
+ trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
+ while [ ${err_count} -lt ${THRESHOLD} ]; do
+ err_c_cur=${err_count}
+ CHECK_REPONSE="$(curl --connect-timeout 3 -m 10 -4 -s https://checks.mailcow.email -X POST -dguid=${GUID} 2> /dev/null)"
+ if [[ ! -z "${CHECK_REPONSE}" ]] && [[ "$(echo ${CHECK_REPONSE} | jq -r .response)" == "critical" ]]; then
+ echo ${CHECK_REPONSE} | jq -r .out > /tmp/external_checks
+ err_count=$(( ${err_count} + 1 ))
+ fi
+ CHECK_REPONSE6="$(curl --connect-timeout 3 -m 10 -6 -s https://checks.mailcow.email -X POST -dguid=${GUID} 2> /dev/null)"
+ if [[ ! -z "${CHECK_REPONSE6}" ]] && [[ "$(echo ${CHECK_REPONSE6} | jq -r .response)" == "critical" ]]; then
+ echo ${CHECK_REPONSE} | jq -r .out > /tmp/external_checks
+ err_count=$(( ${err_count} + 1 ))
+ fi
+ [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
+ [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
+ progress "External checks" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
+ if [[ $? == 10 ]]; then
+ diff_c=0
+ sleep 60
+ else
+ diff_c=0
+ sleep $(( ( RANDOM % 20 ) + 120 ))
+ fi
+ done
+ return 1
+}
+
+nginx_checks() {
+ err_count=0
+ diff_c=0
+ THRESHOLD=${NGINX_THRESHOLD}
+ # Reduce error count by 2 after restarting an unhealthy container
+ trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
+ while [ ${err_count} -lt ${THRESHOLD} ]; do
+ touch /tmp/nginx-mailcow; echo "$(tail -50 /tmp/nginx-mailcow)" > /tmp/nginx-mailcow
+ host_ip=$(get_container_ip nginx-mailcow)
+ err_c_cur=${err_count}
+ /usr/lib/nagios/plugins/check_http -4 -H ${host_ip} -u / -p 8081 2>> /tmp/nginx-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
+ [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
+ [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
+ progress "Nginx" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
+ if [[ $? == 10 ]]; then
+ diff_c=0
+ sleep 1
+ else
+ diff_c=0
+ sleep $(( ( RANDOM % 60 ) + 20 ))
+ fi
+ done
+ return 1
+}
+
+unbound_checks() {
+ err_count=0
+ diff_c=0
+ THRESHOLD=${UNBOUND_THRESHOLD}
+ # Reduce error count by 2 after restarting an unhealthy container
+ trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
+ while [ ${err_count} -lt ${THRESHOLD} ]; do
+ touch /tmp/unbound-mailcow; echo "$(tail -50 /tmp/unbound-mailcow)" > /tmp/unbound-mailcow
+ host_ip=$(get_container_ip unbound-mailcow)
+ err_c_cur=${err_count}
+ /usr/lib/nagios/plugins/check_dns -s ${host_ip} -H stackoverflow.com 2>> /tmp/unbound-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
+ DNSSEC=$(dig com +dnssec | egrep 'flags:.+ad')
+ if [[ -z ${DNSSEC} ]]; then
+ echo "DNSSEC failure" 2>> /tmp/unbound-mailcow 1>&2
+ err_count=$(( ${err_count} + 1))
+ else
+ echo "DNSSEC check succeeded" 2>> /tmp/unbound-mailcow 1>&2
+ fi
+ [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
+ [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
+ progress "Unbound" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
+ if [[ $? == 10 ]]; then
+ diff_c=0
+ sleep 1
+ else
+ diff_c=0
+ sleep $(( ( RANDOM % 60 ) + 20 ))
+ fi
+ done
+ return 1
+}
+
+redis_checks() {
+ # A check for the local redis container
+ err_count=0
+ diff_c=0
+ THRESHOLD=${REDIS_THRESHOLD}
+ # Reduce error count by 2 after restarting an unhealthy container
+ trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
+ while [ ${err_count} -lt ${THRESHOLD} ]; do
+ touch /tmp/redis-mailcow; echo "$(tail -50 /tmp/redis-mailcow)" > /tmp/redis-mailcow
+ host_ip=$(get_container_ip redis-mailcow)
+ err_c_cur=${err_count}
+ /usr/lib/nagios/plugins/check_tcp -4 -H redis-mailcow -p 6379 -E -s "PING\n" -q "QUIT" -e "PONG" 2>> /tmp/redis-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
+ [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
+ [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
+ progress "Redis" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
+ if [[ $? == 10 ]]; then
+ diff_c=0
+ sleep 1
+ else
+ diff_c=0
+ sleep $(( ( RANDOM % 60 ) + 20 ))
+ fi
+ done
+ return 1
+}
+
+mysql_checks() {
+ err_count=0
+ diff_c=0
+ THRESHOLD=${MYSQL_THRESHOLD}
+ # Reduce error count by 2 after restarting an unhealthy container
+ trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
+ while [ ${err_count} -lt ${THRESHOLD} ]; do
+ touch /tmp/mysql-mailcow; echo "$(tail -50 /tmp/mysql-mailcow)" > /tmp/mysql-mailcow
+ err_c_cur=${err_count}
+ /usr/lib/nagios/plugins/check_mysql -s /var/run/mysqld/mysqld.sock -u ${DBUSER} -p ${DBPASS} -d ${DBNAME} 2>> /tmp/mysql-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
+ /usr/lib/nagios/plugins/check_mysql_query -s /var/run/mysqld/mysqld.sock -u ${DBUSER} -p ${DBPASS} -d ${DBNAME} -q "SELECT COUNT(*) FROM information_schema.tables" 2>> /tmp/mysql-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
+ [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
+ [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
+ progress "MySQL/MariaDB" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
+ if [[ $? == 10 ]]; then
+ diff_c=0
+ sleep 1
+ else
+ diff_c=0
+ sleep $(( ( RANDOM % 60 ) + 20 ))
+ fi
+ done
+ return 1
+}
+
+mysql_repl_checks() {
+ err_count=0
+ diff_c=0
+ THRESHOLD=${MYSQL_REPLICATION_THRESHOLD}
+ # Reduce error count by 2 after restarting an unhealthy container
+ trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
+ while [ ${err_count} -lt ${THRESHOLD} ]; do
+ touch /tmp/mysql_repl_checks; echo "$(tail -50 /tmp/mysql_repl_checks)" > /tmp/mysql_repl_checks
+ err_c_cur=${err_count}
+ /usr/lib/nagios/plugins/check_mysql_slavestatus.sh -S /var/run/mysqld/mysqld.sock -u root -p ${DBROOT} 2>> /tmp/mysql_repl_checks 1>&2; err_count=$(( ${err_count} + $? ))
+ [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
+ [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
+ progress "MySQL/MariaDB replication" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
+ if [[ $? == 10 ]]; then
+ diff_c=0
+ sleep 60
+ else
+ diff_c=0
+ sleep $(( ( RANDOM % 60 ) + 20 ))
+ fi
+ done
+ return 1
+}
+
+sogo_checks() {
+ err_count=0
+ diff_c=0
+ THRESHOLD=${SOGO_THRESHOLD}
+ # Reduce error count by 2 after restarting an unhealthy container
+ trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
+ while [ ${err_count} -lt ${THRESHOLD} ]; do
+ touch /tmp/sogo-mailcow; echo "$(tail -50 /tmp/sogo-mailcow)" > /tmp/sogo-mailcow
+ host_ip=$(get_container_ip sogo-mailcow)
+ err_c_cur=${err_count}
+ /usr/lib/nagios/plugins/check_http -4 -H ${host_ip} -u /SOGo.index/ -p 20000 -R "SOGo\.MainUI" 2>> /tmp/sogo-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
+ [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
+ [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
+ progress "SOGo" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
+ if [[ $? == 10 ]]; then
+ diff_c=0
+ sleep 1
+ else
+ diff_c=0
+ sleep $(( ( RANDOM % 60 ) + 20 ))
+ fi
+ done
+ return 1
+}
+
+postfix_checks() {
+ err_count=0
+ diff_c=0
+ THRESHOLD=${POSTFIX_THRESHOLD}
+ # Reduce error count by 2 after restarting an unhealthy container
+ trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
+ while [ ${err_count} -lt ${THRESHOLD} ]; do
+ touch /tmp/postfix-mailcow; echo "$(tail -50 /tmp/postfix-mailcow)" > /tmp/postfix-mailcow
+ host_ip=$(get_container_ip postfix-mailcow)
+ err_c_cur=${err_count}
+ /usr/lib/nagios/plugins/check_smtp -4 -H ${host_ip} -p 589 -f "watchdog@invalid" -C "RCPT TO:watchdog@localhost" -C DATA -C . -R 250 2>> /tmp/postfix-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
+ /usr/lib/nagios/plugins/check_smtp -4 -H ${host_ip} -p 589 -S 2>> /tmp/postfix-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
+ [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
+ [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
+ progress "Postfix" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
+ if [[ $? == 10 ]]; then
+ diff_c=0
+ sleep 1
+ else
+ diff_c=0
+ sleep $(( ( RANDOM % 60 ) + 20 ))
+ fi
+ done
+ return 1
+}
+
+clamd_checks() {
+ err_count=0
+ diff_c=0
+ THRESHOLD=${CLAMD_THRESHOLD}
+ # Reduce error count by 2 after restarting an unhealthy container
+ trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
+ while [ ${err_count} -lt ${THRESHOLD} ]; do
+ touch /tmp/clamd-mailcow; echo "$(tail -50 /tmp/clamd-mailcow)" > /tmp/clamd-mailcow
+ host_ip=$(get_container_ip clamd-mailcow)
+ err_c_cur=${err_count}
+ /usr/lib/nagios/plugins/check_clamd -4 -H ${host_ip} 2>> /tmp/clamd-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
+ [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
+ [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
+ progress "Clamd" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
+ if [[ $? == 10 ]]; then
+ diff_c=0
+ sleep 1
+ else
+ diff_c=0
+ sleep $(( ( RANDOM % 120 ) + 20 ))
+ fi
+ done
+ return 1
+}
+
+dovecot_checks() {
+ err_count=0
+ diff_c=0
+ THRESHOLD=${DOVECOT_THRESHOLD}
+ # Reduce error count by 2 after restarting an unhealthy container
+ trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
+ while [ ${err_count} -lt ${THRESHOLD} ]; do
+ touch /tmp/dovecot-mailcow; echo "$(tail -50 /tmp/dovecot-mailcow)" > /tmp/dovecot-mailcow
+ host_ip=$(get_container_ip dovecot-mailcow)
+ err_c_cur=${err_count}
+ /usr/lib/nagios/plugins/check_smtp -4 -H ${host_ip} -p 24 -f "watchdog@invalid" -C "RCPT TO:<watchdog@invalid>" -L -R "User doesn't exist" 2>> /tmp/dovecot-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
+ /usr/lib/nagios/plugins/check_imap -4 -H ${host_ip} -p 993 -S -e "OK " 2>> /tmp/dovecot-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
+ /usr/lib/nagios/plugins/check_imap -4 -H ${host_ip} -p 143 -e "OK " 2>> /tmp/dovecot-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
+ /usr/lib/nagios/plugins/check_tcp -4 -H ${host_ip} -p 10001 -e "VERSION" 2>> /tmp/dovecot-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
+ /usr/lib/nagios/plugins/check_tcp -4 -H ${host_ip} -p 4190 -e "Dovecot ready" 2>> /tmp/dovecot-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
+ [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
+ [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
+ progress "Dovecot" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
+ if [[ $? == 10 ]]; then
+ diff_c=0
+ sleep 1
+ else
+ diff_c=0
+ sleep $(( ( RANDOM % 60 ) + 20 ))
+ fi
+ done
+ return 1
+}
+
+dovecot_repl_checks() {
+ err_count=0
+ diff_c=0
+ THRESHOLD=${DOVECOT_REPL_THRESHOLD}
+ D_REPL_STATUS=$(redis-cli -h redis -r GET DOVECOT_REPL_HEALTH)
+ # Reduce error count by 2 after restarting an unhealthy container
+ trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
+ while [ ${err_count} -lt ${THRESHOLD} ]; do
+ err_c_cur=${err_count}
+ D_REPL_STATUS=$(redis-cli --raw -h redis GET DOVECOT_REPL_HEALTH)
+ if [[ "${D_REPL_STATUS}" != "1" ]]; then
+ err_count=$(( ${err_count} + 1 ))
+ fi
+ [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
+ [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
+ progress "Dovecot replication" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
+ if [[ $? == 10 ]]; then
+ diff_c=0
+ sleep 60
+ else
+ diff_c=0
+ sleep $(( ( RANDOM % 60 ) + 20 ))
+ fi
+ done
+ return 1
+}
+
+cert_checks() {
+ err_count=0
+ diff_c=0
+ THRESHOLD=7
+ # Reduce error count by 2 after restarting an unhealthy container
+ trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
+ while [ ${err_count} -lt ${THRESHOLD} ]; do
+ touch /tmp/certcheck; echo "$(tail -50 /tmp/certcheck)" > /tmp/certcheck
+ host_ip_postfix=$(get_container_ip postfix)
+ host_ip_dovecot=$(get_container_ip dovecot)
+ err_c_cur=${err_count}
+ /usr/lib/nagios/plugins/check_smtp -H ${host_ip_postfix} -p 589 -4 -S -D 7 2>> /tmp/certcheck 1>&2; err_count=$(( ${err_count} + $? ))
+ /usr/lib/nagios/plugins/check_imap -H ${host_ip_dovecot} -p 993 -4 -S -D 7 2>> /tmp/certcheck 1>&2; err_count=$(( ${err_count} + $? ))
+ [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
+ [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
+ progress "Primary certificate expiry check" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
+ # Always sleep 5 minutes, mail notifications are limited
+ sleep 300
+ done
+ return 1
+}
+
+phpfpm_checks() {
+ err_count=0
+ diff_c=0
+ THRESHOLD=${PHPFPM_THRESHOLD}
+ # Reduce error count by 2 after restarting an unhealthy container
+ trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
+ while [ ${err_count} -lt ${THRESHOLD} ]; do
+ touch /tmp/php-fpm-mailcow; echo "$(tail -50 /tmp/php-fpm-mailcow)" > /tmp/php-fpm-mailcow
+ host_ip=$(get_container_ip php-fpm-mailcow)
+ err_c_cur=${err_count}
+ /usr/lib/nagios/plugins/check_tcp -H ${host_ip} -p 9001 2>> /tmp/php-fpm-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
+ /usr/lib/nagios/plugins/check_tcp -H ${host_ip} -p 9002 2>> /tmp/php-fpm-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
+ [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
+ [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
+ progress "PHP-FPM" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
+ if [[ $? == 10 ]]; then
+ diff_c=0
+ sleep 1
+ else
+ diff_c=0
+ sleep $(( ( RANDOM % 60 ) + 20 ))
+ fi
+ done
+ return 1
+}
+
+ratelimit_checks() {
+ err_count=0
+ diff_c=0
+ THRESHOLD=${RATELIMIT_THRESHOLD}
+ RL_LOG_STATUS=$(redis-cli -h redis LRANGE RL_LOG 0 0 | jq .qid)
+ # Reduce error count by 2 after restarting an unhealthy container
+ trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
+ while [ ${err_count} -lt ${THRESHOLD} ]; do
+ err_c_cur=${err_count}
+ RL_LOG_STATUS_PREV=${RL_LOG_STATUS}
+ RL_LOG_STATUS=$(redis-cli -h redis LRANGE RL_LOG 0 0 | jq .qid)
+ if [[ ${RL_LOG_STATUS_PREV} != ${RL_LOG_STATUS} ]]; then
+ err_count=$(( ${err_count} + 1 ))
+ echo 'Last 10 applied ratelimits (may overlap with previous reports).' > /tmp/ratelimit
+ echo 'Full ratelimit buckets can be emptied by deleting the ratelimit hash from within mailcow UI (see /debug -> Protocols -> Ratelimit):' >> /tmp/ratelimit
+ echo >> /tmp/ratelimit
+ redis-cli --raw -h redis LRANGE RL_LOG 0 10 | jq . >> /tmp/ratelimit
+ fi
+ [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
+ [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
+ progress "Ratelimit" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
+ if [[ $? == 10 ]]; then
+ diff_c=0
+ sleep 1
+ else
+ diff_c=0
+ sleep $(( ( RANDOM % 60 ) + 20 ))
+ fi
+ done
+ return 1
+}
+
+mailq_checks() {
+ err_count=0
+ diff_c=0
+ THRESHOLD=${MAILQ_THRESHOLD}
+ # Reduce error count by 2 after restarting an unhealthy container
+ trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
+ while [ ${err_count} -lt ${THRESHOLD} ]; do
+ touch /tmp/mail_queue_status; echo "$(tail -50 /tmp/mail_queue_status)" > /tmp/mail_queue_status
+ MAILQ_LOG_STATUS=$(find /var/spool/postfix/deferred -type f | wc -l)
+ echo "Mail queue contains ${MAILQ_LOG_STATUS} items (critical limit is ${MAILQ_CRIT}) at $(date)" >> /tmp/mail_queue_status
+ err_c_cur=${err_count}
+ if [ ${MAILQ_LOG_STATUS} -ge ${MAILQ_CRIT} ]; then
+ err_count=$(( ${err_count} + 1 ))
+ echo "Mail queue contains ${MAILQ_LOG_STATUS} items (critical limit is ${MAILQ_CRIT}) at $(date)" >> /tmp/mail_queue_status
+ fi
+ [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
+ [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
+ progress "Mail queue" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
+ if [[ $? == 10 ]]; then
+ diff_c=0
+ sleep 60
+ else
+ diff_c=0
+ sleep $(( ( RANDOM % 60 ) + 20 ))
+ fi
+ done
+ return 1
+}
+
+fail2ban_checks() {
+ err_count=0
+ diff_c=0
+ THRESHOLD=${FAIL2BAN_THRESHOLD}
+ F2B_LOG_STATUS=($(${REDIS_CMDLINE} --raw HKEYS F2B_ACTIVE_BANS))
+ F2B_RES=
+ # Reduce error count by 2 after restarting an unhealthy container
+ trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
+ while [ ${err_count} -lt ${THRESHOLD} ]; do
+ err_c_cur=${err_count}
+ F2B_LOG_STATUS_PREV=(${F2B_LOG_STATUS[@]})
+ F2B_LOG_STATUS=($(${REDIS_CMDLINE} --raw HKEYS F2B_ACTIVE_BANS))
+ array_diff F2B_RES F2B_LOG_STATUS F2B_LOG_STATUS_PREV
+ if [[ ! -z "${F2B_RES}" ]]; then
+ err_count=$(( ${err_count} + 1 ))
+ echo -n "${F2B_RES[@]}" | tr -cd "[a-fA-F0-9.:/] " | timeout 3s ${REDIS_CMDLINE} -x SET F2B_RES > /dev/null
+ if [ $? -ne 0 ]; then
+ ${REDIS_CMDLINE} -x DEL F2B_RES
+ fi
+ fi
+ [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
+ [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
+ progress "Fail2ban" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
+ if [[ $? == 10 ]]; then
+ diff_c=0
+ sleep 1
+ else
+ diff_c=0
+ sleep $(( ( RANDOM % 60 ) + 20 ))
+ fi
+ done
+ return 1
+}
+
+acme_checks() {
+ err_count=0
+ diff_c=0
+ THRESHOLD=${ACME_THRESHOLD}
+ ACME_LOG_STATUS=$(redis-cli -h redis GET ACME_FAIL_TIME)
+ if [[ -z "${ACME_LOG_STATUS}" ]]; then
+ ${REDIS_CMDLINE} SET ACME_FAIL_TIME 0
+ ACME_LOG_STATUS=0
+ fi
+ # Reduce error count by 2 after restarting an unhealthy container
+ trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
+ while [ ${err_count} -lt ${THRESHOLD} ]; do
+ err_c_cur=${err_count}
+ ACME_LOG_STATUS_PREV=${ACME_LOG_STATUS}
+ ACME_LC=0
+ until [[ ! -z ${ACME_LOG_STATUS} ]] || [ ${ACME_LC} -ge 3 ]; do
+ ACME_LOG_STATUS=$(redis-cli -h redis GET ACME_FAIL_TIME 2> /dev/null)
+ sleep 3
+ ACME_LC=$((ACME_LC+1))
+ done
+ if [[ ${ACME_LOG_STATUS_PREV} != ${ACME_LOG_STATUS} ]]; then
+ err_count=$(( ${err_count} + 1 ))
+ fi
+ [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
+ [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
+ progress "ACME" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
+ if [[ $? == 10 ]]; then
+ diff_c=0
+ sleep 1
+ else
+ diff_c=0
+ sleep $(( ( RANDOM % 60 ) + 20 ))
+ fi
+ done
+ return 1
+}
+
+ipv6nat_checks() {
+ err_count=0
+ diff_c=0
+ THRESHOLD=${IPV6NAT_THRESHOLD}
+ # Reduce error count by 2 after restarting an unhealthy container
+ trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
+ while [ ${err_count} -lt ${THRESHOLD} ]; do
+ err_c_cur=${err_count}
+ CONTAINERS=$(curl --silent --insecure https://dockerapi/containers/json)
+ IPV6NAT_CONTAINER_ID=$(echo ${CONTAINERS} | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | jq -rc "select( .name | tostring | contains(\"ipv6nat-mailcow\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id")
+ if [[ ! -z ${IPV6NAT_CONTAINER_ID} ]]; then
+ LATEST_STARTED="$(echo ${CONTAINERS} | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], StartedAt: .State.StartedAt}" | jq -rc "select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | select( .name | tostring | contains(\"ipv6nat-mailcow\") | not)" | jq -rc .StartedAt | xargs -n1 date +%s -d | sort | tail -n1)"
+ LATEST_IPV6NAT="$(echo ${CONTAINERS} | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], StartedAt: .State.StartedAt}" | jq -rc "select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | select( .name | tostring | contains(\"ipv6nat-mailcow\"))" | jq -rc .StartedAt | xargs -n1 date +%s -d | sort | tail -n1)"
+ DIFFERENCE_START_TIME=$(expr ${LATEST_IPV6NAT} - ${LATEST_STARTED} 2>/dev/null)
+ if [[ "${DIFFERENCE_START_TIME}" -lt 30 ]]; then
+ err_count=$(( ${err_count} + 1 ))
+ fi
+ fi
+ [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
+ [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
+ progress "IPv6 NAT" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
+ if [[ $? == 10 ]]; then
+ diff_c=0
+ sleep 30
+ else
+ diff_c=0
+ sleep 300
+ fi
+ done
+ return 1
+}
+
+
+rspamd_checks() {
+ err_count=0
+ diff_c=0
+ THRESHOLD=${RSPAMD_THRESHOLD}
+ # Reduce error count by 2 after restarting an unhealthy container
+ trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
+ while [ ${err_count} -lt ${THRESHOLD} ]; do
+ touch /tmp/rspamd-mailcow; echo "$(tail -50 /tmp/rspamd-mailcow)" > /tmp/rspamd-mailcow
+ host_ip=$(get_container_ip rspamd-mailcow)
+ err_c_cur=${err_count}
+ SCORE=$(echo 'To: null@localhost
+From: watchdog@localhost
+
+Empty
+' | usr/bin/curl --max-time 10 -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd/scan | jq -rc .default.required_score)
+ if [[ ${SCORE} != "9999" ]]; then
+ echo "Rspamd settings check failed, score returned: ${SCORE}" 2>> /tmp/rspamd-mailcow 1>&2
+ err_count=$(( ${err_count} + 1))
+ else
+ echo "Rspamd settings check succeeded, score returned: ${SCORE}" 2>> /tmp/rspamd-mailcow 1>&2
+ fi
+ # A dirty hack until a PING PONG event is implemented to worker proxy
+ # We expect an empty response, not a timeout
+ if [ "$(curl -s --max-time 10 ${host_ip}:9900 2> /dev/null ; echo $?)" == "28" ]; then
+ echo "Milter check failed" 2>> /tmp/rspamd-mailcow 1>&2; err_count=$(( ${err_count} + 1 ));
+ else
+ echo "Milter check succeeded" 2>> /tmp/rspamd-mailcow 1>&2
+ fi
+ [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
+ [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
+ progress "Rspamd" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
+ if [[ $? == 10 ]]; then
+ diff_c=0
+ sleep 1
+ else
+ diff_c=0
+ sleep $(( ( RANDOM % 60 ) + 20 ))
+ fi
+ done
+ return 1
+}
+
+olefy_checks() {
+ err_count=0
+ diff_c=0
+ THRESHOLD=${OLEFY_THRESHOLD}
+ # Reduce error count by 2 after restarting an unhealthy container
+ trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
+ while [ ${err_count} -lt ${THRESHOLD} ]; do
+ touch /tmp/olefy-mailcow; echo "$(tail -50 /tmp/olefy-mailcow)" > /tmp/olefy-mailcow
+ host_ip=$(get_container_ip olefy-mailcow)
+ err_c_cur=${err_count}
+ /usr/lib/nagios/plugins/check_tcp -4 -H ${host_ip} -p 10055 -s "PING\n" 2>> /tmp/olefy-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
+ [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
+ [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
+ progress "Olefy" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
+ if [[ $? == 10 ]]; then
+ diff_c=0
+ sleep 1
+ else
+ diff_c=0
+ sleep $(( ( RANDOM % 60 ) + 20 ))
+ fi
+ done
+ return 1
+}
+
+# Notify about start
+if [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]]; then
+ mail_error "watchdog-mailcow" "Watchdog started monitoring mailcow."
+fi
+
+# Create watchdog agents
+
+(
+while true; do
+ if ! nginx_checks; then
+ log_msg "Nginx hit error limit"
+ echo nginx-mailcow > /tmp/com_pipe
+ fi
+done
+) &
+PID=$!
+echo "Spawned nginx_checks with PID ${PID}"
+BACKGROUND_TASKS+=(${PID})
+
+if [[ ${WATCHDOG_EXTERNAL_CHECKS} =~ ^([yY][eE][sS]|[yY])+$ ]]; then
+(
+while true; do
+ if ! external_checks; then
+ log_msg "External checks hit error limit"
+ echo external_checks > /tmp/com_pipe
+ fi
+done
+) &
+PID=$!
+echo "Spawned external_checks with PID ${PID}"
+BACKGROUND_TASKS+=(${PID})
+fi
+
+if [[ ${WATCHDOG_MYSQL_REPLICATION_CHECKS} =~ ^([yY][eE][sS]|[yY])+$ ]]; then
+(
+while true; do
+ if ! mysql_repl_checks; then
+ log_msg "MySQL replication check hit error limit"
+ echo mysql_repl_checks > /tmp/com_pipe
+ fi
+done
+) &
+PID=$!
+echo "Spawned mysql_repl_checks with PID ${PID}"
+BACKGROUND_TASKS+=(${PID})
+fi
+
+(
+while true; do
+ if ! mysql_checks; then
+ log_msg "MySQL hit error limit"
+ echo mysql-mailcow > /tmp/com_pipe
+ fi
+done
+) &
+PID=$!
+echo "Spawned mysql_checks with PID ${PID}"
+BACKGROUND_TASKS+=(${PID})
+
+(
+while true; do
+ if ! redis_checks; then
+ log_msg "Local Redis hit error limit"
+ echo redis-mailcow > /tmp/com_pipe
+ fi
+done
+) &
+PID=$!
+echo "Spawned redis_checks with PID ${PID}"
+BACKGROUND_TASKS+=(${PID})
+
+(
+while true; do
+ if ! phpfpm_checks; then
+ log_msg "PHP-FPM hit error limit"
+ echo php-fpm-mailcow > /tmp/com_pipe
+ fi
+done
+) &
+PID=$!
+echo "Spawned phpfpm_checks with PID ${PID}"
+BACKGROUND_TASKS+=(${PID})
+
+if [[ "${SKIP_SOGO}" =~ ^([nN][oO]|[nN])+$ ]]; then
+(
+while true; do
+ if ! sogo_checks; then
+ log_msg "SOGo hit error limit"
+ echo sogo-mailcow > /tmp/com_pipe
+ fi
+done
+) &
+PID=$!
+echo "Spawned sogo_checks with PID ${PID}"
+BACKGROUND_TASKS+=(${PID})
+fi
+
+if [ ${CHECK_UNBOUND} -eq 1 ]; then
+(
+while true; do
+ if ! unbound_checks; then
+ log_msg "Unbound hit error limit"
+ echo unbound-mailcow > /tmp/com_pipe
+ fi
+done
+) &
+PID=$!
+echo "Spawned unbound_checks with PID ${PID}"
+BACKGROUND_TASKS+=(${PID})
+fi
+
+if [[ "${SKIP_CLAMD}" =~ ^([nN][oO]|[nN])+$ ]]; then
+(
+while true; do
+ if ! clamd_checks; then
+ log_msg "Clamd hit error limit"
+ echo clamd-mailcow > /tmp/com_pipe
+ fi
+done
+) &
+PID=$!
+echo "Spawned clamd_checks with PID ${PID}"
+BACKGROUND_TASKS+=(${PID})
+fi
+
+(
+while true; do
+ if ! postfix_checks; then
+ log_msg "Postfix hit error limit"
+ echo postfix-mailcow > /tmp/com_pipe
+ fi
+done
+) &
+PID=$!
+echo "Spawned postfix_checks with PID ${PID}"
+BACKGROUND_TASKS+=(${PID})
+
+(
+while true; do
+ if ! mailq_checks; then
+ log_msg "Mail queue hit error limit"
+ echo mail_queue_status > /tmp/com_pipe
+ fi
+done
+) &
+PID=$!
+echo "Spawned mailq_checks with PID ${PID}"
+BACKGROUND_TASKS+=(${PID})
+
+(
+while true; do
+ if ! dovecot_checks; then
+ log_msg "Dovecot hit error limit"
+ echo dovecot-mailcow > /tmp/com_pipe
+ fi
+done
+) &
+PID=$!
+echo "Spawned dovecot_checks with PID ${PID}"
+BACKGROUND_TASKS+=(${PID})
+
+(
+while true; do
+ if ! dovecot_repl_checks; then
+ log_msg "Dovecot hit error limit"
+ echo dovecot_repl_checks > /tmp/com_pipe
+ fi
+done
+) &
+PID=$!
+echo "Spawned dovecot_repl_checks with PID ${PID}"
+BACKGROUND_TASKS+=(${PID})
+
+(
+while true; do
+ if ! rspamd_checks; then
+ log_msg "Rspamd hit error limit"
+ echo rspamd-mailcow > /tmp/com_pipe
+ fi
+done
+) &
+PID=$!
+echo "Spawned rspamd_checks with PID ${PID}"
+BACKGROUND_TASKS+=(${PID})
+
+(
+while true; do
+ if ! ratelimit_checks; then
+ log_msg "Ratelimit hit error limit"
+ echo ratelimit > /tmp/com_pipe
+ fi
+done
+) &
+PID=$!
+echo "Spawned ratelimit_checks with PID ${PID}"
+BACKGROUND_TASKS+=(${PID})
+
+(
+while true; do
+ if ! fail2ban_checks; then
+ log_msg "Fail2ban hit error limit"
+ echo fail2ban > /tmp/com_pipe
+ fi
+done
+) &
+PID=$!
+echo "Spawned fail2ban_checks with PID ${PID}"
+BACKGROUND_TASKS+=(${PID})
+
+(
+while true; do
+ if ! cert_checks; then
+ log_msg "Cert check hit error limit"
+ echo certcheck > /tmp/com_pipe
+ fi
+done
+) &
+PID=$!
+echo "Spawned cert_checks with PID ${PID}"
+BACKGROUND_TASKS+=(${PID})
+
+(
+while true; do
+ if ! olefy_checks; then
+ log_msg "Olefy hit error limit"
+ echo olefy-mailcow > /tmp/com_pipe
+ fi
+done
+) &
+PID=$!
+echo "Spawned olefy_checks with PID ${PID}"
+BACKGROUND_TASKS+=(${PID})
+
+(
+while true; do
+ if ! acme_checks; then
+ log_msg "ACME client hit error limit"
+ echo acme-mailcow > /tmp/com_pipe
+ fi
+done
+) &
+PID=$!
+echo "Spawned acme_checks with PID ${PID}"
+BACKGROUND_TASKS+=(${PID})
+
+(
+while true; do
+ if ! ipv6nat_checks; then
+ log_msg "IPv6 NAT warning: ipv6nat-mailcow container was not started at least 30s after siblings (not an error)"
+ echo ipv6nat-mailcow > /tmp/com_pipe
+ fi
+done
+) &
+PID=$!
+echo "Spawned ipv6nat_checks with PID ${PID}"
+BACKGROUND_TASKS+=(${PID})
+
+# Monitor watchdog agents, stop script when agents fails and wait for respawn by Docker (restart:always:n)
+(
+while true; do
+ for bg_task in ${BACKGROUND_TASKS[*]}; do
+ if ! kill -0 ${bg_task} 1>&2; then
+ log_msg "Worker ${bg_task} died, stopping watchdog and waiting for respawn..."
+ kill -TERM 1
+ fi
+ sleep 10
+ done
+done
+) &
+
+# Monitor dockerapi
+(
+while true; do
+ while nc -z dockerapi 443; do
+ sleep 3
+ done
+ log_msg "Cannot find dockerapi-mailcow, waiting to recover..."
+ kill -STOP ${BACKGROUND_TASKS[*]}
+ until nc -z dockerapi 443; do
+ sleep 3
+ done
+ kill -CONT ${BACKGROUND_TASKS[*]}
+ kill -USR1 ${BACKGROUND_TASKS[*]}
+done
+) &
+
+# Actions when threshold limit is reached
+while true; do
+ CONTAINER_ID=
+ HAS_INITDB=
+ read com_pipe_answer </tmp/com_pipe
+ if [ -s "/tmp/${com_pipe_answer}" ]; then
+ cat "/tmp/${com_pipe_answer}"
+ fi
+ if [[ ${com_pipe_answer} == "ratelimit" ]]; then
+ log_msg "At least one ratelimit was applied"
+ [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}"
+ elif [[ ${com_pipe_answer} == "mail_queue_status" ]]; then
+ log_msg "Mail queue status is critical"
+ [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}"
+ elif [[ ${com_pipe_answer} == "external_checks" ]]; then
+ log_msg "Your mailcow is an open relay!"
+ # Define $2 to override message text, else print service was restarted at ...
+ [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}" "Please stop mailcow now and check your network configuration!"
+ elif [[ ${com_pipe_answer} == "mysql_repl_checks" ]]; then
+ log_msg "MySQL replication is not working properly"
+ # Define $2 to override message text, else print service was restarted at ...
+ # Once mail per 10 minutes
+ [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}" "Please check the SQL replication status" 600
+ elif [[ ${com_pipe_answer} == "dovecot_repl_checks" ]]; then
+ log_msg "Dovecot replication is not working properly"
+ # Define $2 to override message text, else print service was restarted at ...
+ # Once mail per 10 minutes
+ [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}" "Please check the Dovecot replicator status" 600
+ elif [[ ${com_pipe_answer} == "certcheck" ]]; then
+ log_msg "Certificates are about to expire"
+ # Define $2 to override message text, else print service was restarted at ...
+ # Only mail once a day
+ [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}" "Please renew your certificate" 86400
+ elif [[ ${com_pipe_answer} == "acme-mailcow" ]]; then
+ log_msg "acme-mailcow did not complete successfully"
+ # Define $2 to override message text, else print service was restarted at ...
+ [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}" "Please check acme-mailcow for further information."
+ elif [[ ${com_pipe_answer} == "fail2ban" ]]; then
+ F2B_RES=($(timeout 4s ${REDIS_CMDLINE} --raw GET F2B_RES 2> /dev/null))
+ if [[ ! -z "${F2B_RES}" ]]; then
+ ${REDIS_CMDLINE} DEL F2B_RES > /dev/null
+ host=
+ for host in "${F2B_RES[@]}"; do
+ log_msg "Banned ${host}"
+ rm /tmp/fail2ban 2> /dev/null
+ timeout 2s whois "${host}" > /tmp/fail2ban
+ [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && [[ ${WATCHDOG_NOTIFY_BAN} =~ ^([yY][eE][sS]|[yY])+$ ]] && mail_error "${com_pipe_answer}" "IP ban: ${host}"
+ done
+ fi
+ elif [[ ${com_pipe_answer} =~ .+-mailcow ]]; then
+ kill -STOP ${BACKGROUND_TASKS[*]}
+ sleep 10
+ CONTAINER_ID=$(curl --silent --insecure https://dockerapi/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | jq -rc "select( .name | tostring | contains(\"${com_pipe_answer}\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id")
+ if [[ ! -z ${CONTAINER_ID} ]]; then
+ if [[ "${com_pipe_answer}" == "php-fpm-mailcow" ]]; then
+ HAS_INITDB=$(curl --silent --insecure -XPOST https://dockerapi/containers/${CONTAINER_ID}/top | jq '.msg.Processes[] | contains(["php -c /usr/local/etc/php -f /web/inc/init_db.inc.php"])' | grep true)
+ fi
+ S_RUNNING=$(($(date +%s) - $(curl --silent --insecure https://dockerapi/containers/${CONTAINER_ID}/json | jq .State.StartedAt | xargs -n1 date +%s -d)))
+ if [ ${S_RUNNING} -lt 360 ]; then
+ log_msg "Container is running for less than 360 seconds, skipping action..."
+ elif [[ ! -z ${HAS_INITDB} ]]; then
+ log_msg "Database is being initialized by php-fpm-mailcow, not restarting but delaying checks for a minute..."
+ sleep 60
+ else
+ log_msg "Sending restart command to ${CONTAINER_ID}..."
+ curl --silent --insecure -XPOST https://dockerapi/containers/${CONTAINER_ID}/restart
+ if [[ ${com_pipe_answer} != "ipv6nat-mailcow" ]]; then
+ [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}"
+ fi
+ log_msg "Wait for restarted container to settle and continue watching..."
+ sleep 35
+ fi
+ fi
+ kill -CONT ${BACKGROUND_TASKS[*]}
+ sleep 1
+ kill -USR1 ${BACKGROUND_TASKS[*]}
+ fi
+done