git subrepo commit mailcow/src/mailcow-dockerized
subrepo: subdir: "mailcow/src/mailcow-dockerized"
merged: "308860af"
upstream: origin: "https://github.com/mailcow/mailcow-dockerized.git"
branch: "master"
commit: "3f1a5af8"
git-subrepo: version: "0.4.5"
origin: "???"
commit: "???"
Change-Id: I5d51c14b45db54fe706be40a591ddbfcea50d4b0
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/acme/Dockerfile b/mailcow/src/mailcow-dockerized/data/Dockerfiles/acme/Dockerfile
index 82369a8..571c3d0 100644
--- a/mailcow/src/mailcow-dockerized/data/Dockerfiles/acme/Dockerfile
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/acme/Dockerfile
@@ -1,4 +1,4 @@
-FROM alpine:3.14
+FROM alpine:3.17
LABEL maintainer "Andre Peters <andre.peters@servercow.de>"
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/acme/obtain-certificate.sh b/mailcow/src/mailcow-dockerized/data/Dockerfiles/acme/obtain-certificate.sh
index a151dff..7434411 100644
--- a/mailcow/src/mailcow-dockerized/data/Dockerfiles/acme/obtain-certificate.sh
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/acme/obtain-certificate.sh
@@ -80,7 +80,7 @@
printf "[SAN]\nsubjectAltName=" > /tmp/_SAN
printf "DNS:%s," "${CERT_DOMAINS[@]}" >> /tmp/_SAN
sed -i '$s/,$//' /tmp/_SAN
-openssl req -new -sha256 -key ${KEY} -subj "/" -reqexts SAN -config <(cat /etc/ssl/openssl.cnf /tmp/_SAN) > ${CSR}
+openssl req -new -sha256 -key ${KEY} -subj "/" -reqexts SAN -config <(cat "$(openssl version -d | sed 's/.*"\(.*\)"/\1/g')/openssl.cnf" /tmp/_SAN) > ${CSR}
# acme-tiny writes info to stderr and ceritifcate to stdout
# The redirects will do the following:
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/backup/Dockerfile b/mailcow/src/mailcow-dockerized/data/Dockerfiles/backup/Dockerfile
new file mode 100644
index 0000000..f9d849b
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/backup/Dockerfile
@@ -0,0 +1,3 @@
+FROM debian:bullseye-slim
+
+RUN apt update && apt install pigz
\ No newline at end of file
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/clamd/Dockerfile b/mailcow/src/mailcow-dockerized/data/Dockerfiles/clamd/Dockerfile
index e14858f..91716b8 100644
--- a/mailcow/src/mailcow-dockerized/data/Dockerfiles/clamd/Dockerfile
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/clamd/Dockerfile
@@ -1,60 +1,21 @@
-FROM debian:buster-slim
+FROM clamav/clamav:1.0_base
LABEL maintainer "André Peters <andre.peters@servercow.de>"
-ARG CLAMAV=0.103.4
-RUN apt-get update && apt-get install -y --no-install-recommends \
- ca-certificates \
- zlib1g-dev \
- libcurl4-openssl-dev \
- libncurses5-dev \
- libzip-dev \
- libpcre2-dev \
- libxml2-dev \
- libssl-dev \
- build-essential \
- libjson-c-dev \
- curl \
- bash \
- wget \
- tzdata \
- dnsutils \
+RUN apk upgrade --no-cache \
+ && apk add --update --no-cache \
rsync \
- dos2unix \
- netcat \
- && rm -rf /var/lib/apt/lists/* \
- && wget -O - https://fossies.org/linux/misc/clamav-${CLAMAV}.tar.gz | tar xfvz - \
- && cd clamav-${CLAMAV} \
- && ./configure \
- --prefix=/usr \
- --libdir=/usr/lib \
- --sysconfdir=/etc/clamav \
- --mandir=/usr/share/man \
- --infodir=/usr/share/info \
- --disable-llvm \
- --with-user=clamav \
- --with-group=clamav \
- --with-dbdir=/var/lib/clamav \
- --enable-clamdtop \
- --enable-bigstack \
- --with-pcre \
- && make -j4 \
- && make install \
- && make clean \
- && cd .. && rm -rf clamav-${CLAMAV} \
- && apt-get -y --auto-remove purge build-essential \
- && apt-get -y purge zlib1g-dev \
- libncurses5-dev \
- libzip-dev \
- libpcre2-dev \
- libxml2-dev \
- libssl-dev \
- libjson-c-dev \
- && addgroup --system --gid 700 clamav \
- && adduser --system --no-create-home --home /var/lib/clamav --uid 700 --gid 700 --disabled-login clamav \
- && rm -rf /tmp/* /var/tmp/*
+ bind-tools \
+ bash
-COPY clamd.sh ./
-COPY tini /sbin/tini
+# init
+COPY clamd.sh /clamd.sh
+RUN chmod +x /sbin/tini
-CMD ["/sbin/tini", "-g", "--", "/clamd.sh"]
+# healthcheck
+COPY healthcheck.sh /healthcheck.sh
+RUN chmod +x /healthcheck.sh
+HEALTHCHECK --start-period=6m CMD "/healthcheck.sh"
+
+ENTRYPOINT []
+CMD ["/sbin/tini", "-g", "--", "/clamd.sh"]
\ No newline at end of file
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/clamd/healthcheck.sh b/mailcow/src/mailcow-dockerized/data/Dockerfiles/clamd/healthcheck.sh
new file mode 100755
index 0000000..6c18ac0
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/clamd/healthcheck.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+if [[ "${SKIP_CLAMD}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
+ echo "SKIP_CLAMD=y, skipping ClamAV..."
+ exit 0
+fi
+
+# run clamd healthcheck
+/usr/local/bin/clamdcheck.sh
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/clamd/tini b/mailcow/src/mailcow-dockerized/data/Dockerfiles/clamd/tini
deleted file mode 100755
index 03af82f..0000000
--- a/mailcow/src/mailcow-dockerized/data/Dockerfiles/clamd/tini
+++ /dev/null
Binary files differ
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/dockerapi/Dockerfile b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dockerapi/Dockerfile
index b6eef46..97c3808 100644
--- a/mailcow/src/mailcow-dockerized/data/Dockerfiles/dockerapi/Dockerfile
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dockerapi/Dockerfile
@@ -1,4 +1,4 @@
-FROM alpine:3.14
+FROM alpine:3.17
LABEL maintainer "Andre Peters <andre.peters@servercow.de>"
@@ -8,11 +8,14 @@
py3-pip \
openssl \
tzdata \
+ py3-psutil \
&& pip3 install --upgrade pip \
- docker \
- flask \
- flask-restful
+ fastapi \
+ uvicorn \
+ aiodocker \
+ redis
+COPY docker-entrypoint.sh /app/
COPY dockerapi.py /app/
-CMD ["python3", "-u", "/app/dockerapi.py"]
+ENTRYPOINT ["/bin/sh", "/app/docker-entrypoint.sh"]
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/dockerapi/docker-entrypoint.sh b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dockerapi/docker-entrypoint.sh
new file mode 100755
index 0000000..aab6cd5
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dockerapi/docker-entrypoint.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+`openssl req -x509 -newkey rsa:4096 -sha256 -days 3650 -nodes \
+ -keyout /app/dockerapi_key.pem \
+ -out /app/dockerapi_cert.pem \
+ -subj /CN=dockerapi/O=mailcow \
+ -addext subjectAltName=DNS:dockerapi`
+
+`uvicorn --host 0.0.0.0 --port 443 --ssl-certfile=/app/dockerapi_cert.pem --ssl-keyfile=/app/dockerapi_key.pem dockerapi:app`
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/dockerapi/dockerapi.py b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dockerapi/dockerapi.py
index 32824a2..34f9b06 100644
--- a/mailcow/src/mailcow-dockerized/data/Dockerfiles/dockerapi/dockerapi.py
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dockerapi/dockerapi.py
@@ -1,228 +1,377 @@
-#!/usr/bin/env python3
-
-from flask import Flask
-from flask_restful import Resource, Api
-from flask import jsonify
-from flask import Response
-from flask import request
-from threading import Thread
-import docker
-import uuid
-import signal
+from fastapi import FastAPI, Response, Request
+import aiodocker
+import psutil
+import sys
+import re
import time
import os
-import re
-import sys
-import ssl
-import socket
-import subprocess
-import traceback
+import json
+import asyncio
+import redis
+from datetime import datetime
-docker_client = docker.DockerClient(base_url='unix://var/run/docker.sock', version='auto')
-app = Flask(__name__)
-api = Api(app)
-class containers_get(Resource):
- def get(self):
- containers = {}
+containerIds_to_update = []
+host_stats_isUpdating = False
+app = FastAPI()
+
+
+@app.get("/host/stats")
+async def get_host_update_stats():
+ global host_stats_isUpdating
+
+ if host_stats_isUpdating == False:
+ print("start host stats task")
+ asyncio.create_task(get_host_stats())
+ host_stats_isUpdating = True
+
+ while True:
+ if redis_client.exists('host_stats'):
+ break
+ print("wait for host_stats results")
+ await asyncio.sleep(1.5)
+
+
+ print("host stats pulled")
+ stats = json.loads(redis_client.get('host_stats'))
+ return Response(content=json.dumps(stats, indent=4), media_type="application/json")
+
+@app.get("/containers/{container_id}/json")
+async def get_container(container_id : str):
+ if container_id and container_id.isalnum():
try:
- for container in docker_client.containers.list(all=True):
- containers.update({container.attrs['Id']: container.attrs})
- return containers
+ for container in (await async_docker_client.containers.list()):
+ if container._id == container_id:
+ container_info = await container.show()
+ return Response(content=json.dumps(container_info, indent=4), media_type="application/json")
+
+ res = {
+ "type": "danger",
+ "msg": "no container found"
+ }
+ return Response(content=json.dumps(res, indent=4), media_type="application/json")
except Exception as e:
- return jsonify(type='danger', msg=str(e))
+ res = {
+ "type": "danger",
+ "msg": str(e)
+ }
+ return Response(content=json.dumps(res, indent=4), media_type="application/json")
+ else:
+ res = {
+ "type": "danger",
+ "msg": "no or invalid id defined"
+ }
+ return Response(content=json.dumps(res, indent=4), media_type="application/json")
-class container_get(Resource):
- def get(self, container_id):
- if container_id and container_id.isalnum():
- try:
- for container in docker_client.containers.list(all=True, filters={"id": container_id}):
- return container.attrs
- except Exception as e:
- return jsonify(type='danger', msg=str(e))
- else:
- return jsonify(type='danger', msg='no or invalid id defined')
+@app.get("/containers/json")
+async def get_containers():
+ containers = {}
+ try:
+ for container in (await async_docker_client.containers.list()):
+ container_info = await container.show()
+ containers.update({container_info['Id']: container_info})
+ return Response(content=json.dumps(containers, indent=4), media_type="application/json")
+ except Exception as e:
+ res = {
+ "type": "danger",
+ "msg": str(e)
+ }
+ return Response(content=json.dumps(res, indent=4), media_type="application/json")
-class container_post(Resource):
- def post(self, container_id, post_action):
- if container_id and container_id.isalnum() and post_action:
- try:
- """Dispatch container_post api call"""
- if post_action == 'exec':
- if not request.json or not 'cmd' in request.json:
- return jsonify(type='danger', msg='cmd is missing')
- if not request.json or not 'task' in request.json:
- return jsonify(type='danger', msg='task is missing')
+@app.post("/containers/{container_id}/{post_action}")
+async def post_containers(container_id : str, post_action : str, request: Request):
+ try :
+ request_json = await request.json()
+ except Exception as err:
+ request_json = {}
- api_call_method_name = '__'.join(['container_post', str(post_action), str(request.json['cmd']), str(request.json['task']) ])
- else:
- api_call_method_name = '__'.join(['container_post', str(post_action) ])
+ if container_id and container_id.isalnum() and post_action:
+ try:
+ """Dispatch container_post api call"""
+ if post_action == 'exec':
+ if not request_json or not 'cmd' in request_json:
+ res = {
+ "type": "danger",
+ "msg": "cmd is missing"
+ }
+ return Response(content=json.dumps(res, indent=4), media_type="application/json")
+ if not request_json or not 'task' in request_json:
+ res = {
+ "type": "danger",
+ "msg": "task is missing"
+ }
+ return Response(content=json.dumps(res, indent=4), media_type="application/json")
- api_call_method = getattr(self, api_call_method_name, lambda container_id: jsonify(type='danger', msg='container_post - unknown api call'))
+ api_call_method_name = '__'.join(['container_post', str(post_action), str(request_json['cmd']), str(request_json['task']) ])
+ else:
+ api_call_method_name = '__'.join(['container_post', str(post_action) ])
+
+ docker_utils = DockerUtils(async_docker_client)
+ api_call_method = getattr(docker_utils, api_call_method_name, lambda container_id: Response(content=json.dumps({'type': 'danger', 'msg':'container_post - unknown api call' }, indent=4), media_type="application/json"))
- print("api call: %s, container_id: %s" % (api_call_method_name, container_id))
- return api_call_method(container_id)
- except Exception as e:
- print("error - container_post: %s" % str(e))
- return jsonify(type='danger', msg=str(e))
+ print("api call: %s, container_id: %s" % (api_call_method_name, container_id))
+ return await api_call_method(container_id, request_json)
+ except Exception as e:
+ print("error - container_post: %s" % str(e))
+ res = {
+ "type": "danger",
+ "msg": str(e)
+ }
+ return Response(content=json.dumps(res, indent=4), media_type="application/json")
- else:
- return jsonify(type='danger', msg='invalid container id or missing action')
+ else:
+ res = {
+ "type": "danger",
+ "msg": "invalid container id or missing action"
+ }
+ return Response(content=json.dumps(res, indent=4), media_type="application/json")
+@app.post("/container/{container_id}/stats/update")
+async def post_container_update_stats(container_id : str):
+ global containerIds_to_update
+
+ # start update task for container if no task is running
+ if container_id not in containerIds_to_update:
+ asyncio.create_task(get_container_stats(container_id))
+ containerIds_to_update.append(container_id)
+
+ while True:
+ if redis_client.exists(container_id + '_stats'):
+ break
+ await asyncio.sleep(1.5)
+
+ stats = json.loads(redis_client.get(container_id + '_stats'))
+ return Response(content=json.dumps(stats, indent=4), media_type="application/json")
+
+
+
+
+class DockerUtils:
+ def __init__(self, docker_client):
+ self.docker_client = docker_client
# api call: container_post - post_action: stop
- def container_post__stop(self, container_id):
- for container in docker_client.containers.list(all=True, filters={"id": container_id}):
- container.stop()
- return jsonify(type='success', msg='command completed successfully')
-
+ async def container_post__stop(self, container_id, request_json):
+ for container in (await self.docker_client.containers.list()):
+ if container._id == container_id:
+ await container.stop()
+ res = {
+ 'type': 'success',
+ 'msg': 'command completed successfully'
+ }
+ return Response(content=json.dumps(res, indent=4), media_type="application/json")
# api call: container_post - post_action: start
- def container_post__start(self, container_id):
- for container in docker_client.containers.list(all=True, filters={"id": container_id}):
- container.start()
- return jsonify(type='success', msg='command completed successfully')
+ async def container_post__start(self, container_id, request_json):
+ for container in (await self.docker_client.containers.list()):
+ if container._id == container_id:
+ await container.start()
+ res = {
+ 'type': 'success',
+ 'msg': 'command completed successfully'
+ }
+ return Response(content=json.dumps(res, indent=4), media_type="application/json")
# api call: container_post - post_action: restart
- def container_post__restart(self, container_id):
- for container in docker_client.containers.list(all=True, filters={"id": container_id}):
- container.restart()
- return jsonify(type='success', msg='command completed successfully')
+ async def container_post__restart(self, container_id, request_json):
+ for container in (await self.docker_client.containers.list()):
+ if container._id == container_id:
+ await container.restart()
+ res = {
+ 'type': 'success',
+ 'msg': 'command completed successfully'
+ }
+ return Response(content=json.dumps(res, indent=4), media_type="application/json")
# api call: container_post - post_action: top
- def container_post__top(self, container_id):
- for container in docker_client.containers.list(all=True, filters={"id": container_id}):
- return jsonify(type='success', msg=container.top())
+ async def container_post__top(self, container_id, request_json):
+ for container in (await self.docker_client.containers.list()):
+ if container._id == container_id:
+ ps_exec = await container.exec("ps")
+ async with ps_exec.start(detach=False) as stream:
+ ps_return = await stream.read_out()
-
- # api call: container_post - post_action: stats
- def container_post__stats(self, container_id):
- for container in docker_client.containers.list(all=True, filters={"id": container_id}):
- for stat in container.stats(decode=True, stream=True):
- return jsonify(type='success', msg=stat )
+ exec_details = await ps_exec.inspect()
+ if exec_details["ExitCode"] == None or exec_details["ExitCode"] == 0:
+ res = {
+ 'type': 'success',
+ 'msg': ps_return.data.decode('utf-8')
+ }
+ return Response(content=json.dumps(res, indent=4), media_type="application/json")
+ else:
+ res = {
+ 'type': 'danger',
+ 'msg': ''
+ }
+ return Response(content=json.dumps(res, indent=4), media_type="application/json")
# api call: container_post - post_action: exec - cmd: mailq - task: delete
- def container_post__exec__mailq__delete(self, container_id):
- if 'items' in request.json:
+ async def container_post__exec__mailq__delete(self, container_id, request_json):
+ if 'items' in request_json:
r = re.compile("^[0-9a-fA-F]+$")
- filtered_qids = filter(r.match, request.json['items'])
+ filtered_qids = filter(r.match, request_json['items'])
if filtered_qids:
flagged_qids = ['-d %s' % i for i in filtered_qids]
- sanitized_string = str(' '.join(flagged_qids));
+ sanitized_string = str(' '.join(flagged_qids))
- for container in docker_client.containers.list(filters={"id": container_id}):
- postsuper_r = container.exec_run(["/bin/bash", "-c", "/usr/sbin/postsuper " + sanitized_string])
- return exec_run_handler('generic', postsuper_r)
+ for container in (await self.docker_client.containers.list()):
+ if container._id == container_id:
+ postsuper_r_exec = await container.exec(["/bin/bash", "-c", "/usr/sbin/postsuper " + sanitized_string])
+ return await exec_run_handler('generic', postsuper_r_exec)
# api call: container_post - post_action: exec - cmd: mailq - task: hold
- def container_post__exec__mailq__hold(self, container_id):
- if 'items' in request.json:
+ async def container_post__exec__mailq__hold(self, container_id, request_json):
+ if 'items' in request_json:
r = re.compile("^[0-9a-fA-F]+$")
- filtered_qids = filter(r.match, request.json['items'])
+ filtered_qids = filter(r.match, request_json['items'])
if filtered_qids:
flagged_qids = ['-h %s' % i for i in filtered_qids]
- sanitized_string = str(' '.join(flagged_qids));
+ sanitized_string = str(' '.join(flagged_qids))
- for container in docker_client.containers.list(filters={"id": container_id}):
- postsuper_r = container.exec_run(["/bin/bash", "-c", "/usr/sbin/postsuper " + sanitized_string])
- return exec_run_handler('generic', postsuper_r)
+ for container in (await self.docker_client.containers.list()):
+ if container._id == container_id:
+ postsuper_r_exec = await container.exec(["/bin/bash", "-c", "/usr/sbin/postsuper " + sanitized_string])
+ return await exec_run_handler('generic', postsuper_r_exec)
# api call: container_post - post_action: exec - cmd: mailq - task: cat
- def container_post__exec__mailq__cat(self, container_id):
- if 'items' in request.json:
+ async def container_post__exec__mailq__cat(self, container_id, request_json):
+ if 'items' in request_json:
r = re.compile("^[0-9a-fA-F]+$")
- filtered_qids = filter(r.match, request.json['items'])
+ filtered_qids = filter(r.match, request_json['items'])
if filtered_qids:
- sanitized_string = str(' '.join(filtered_qids));
+ sanitized_string = str(' '.join(filtered_qids))
- for container in docker_client.containers.list(filters={"id": container_id}):
- postcat_return = container.exec_run(["/bin/bash", "-c", "/usr/sbin/postcat -q " + sanitized_string], user='postfix')
- if not postcat_return:
- postcat_return = 'err: invalid'
- return exec_run_handler('utf8_text_only', postcat_return)
+ for container in (await self.docker_client.containers.list()):
+ if container._id == container_id:
+ postcat_exec = await container.exec(["/bin/bash", "-c", "/usr/sbin/postcat -q " + sanitized_string], user='postfix')
+ return await exec_run_handler('utf8_text_only', postcat_exec)
# api call: container_post - post_action: exec - cmd: mailq - task: unhold
- def container_post__exec__mailq__unhold(self, container_id):
- if 'items' in request.json:
+ async def container_post__exec__mailq__unhold(self, container_id, request_json):
+ if 'items' in request_json:
r = re.compile("^[0-9a-fA-F]+$")
- filtered_qids = filter(r.match, request.json['items'])
+ filtered_qids = filter(r.match, request_json['items'])
if filtered_qids:
flagged_qids = ['-H %s' % i for i in filtered_qids]
- sanitized_string = str(' '.join(flagged_qids));
+ sanitized_string = str(' '.join(flagged_qids))
- for container in docker_client.containers.list(filters={"id": container_id}):
- postsuper_r = container.exec_run(["/bin/bash", "-c", "/usr/sbin/postsuper " + sanitized_string])
- return exec_run_handler('generic', postsuper_r)
+ for container in (await self.docker_client.containers.list()):
+ if container._id == container_id:
+ postsuper_r_exec = await container.exec(["/bin/bash", "-c", "/usr/sbin/postsuper " + sanitized_string])
+ return await exec_run_handler('generic', postsuper_r_exec)
# api call: container_post - post_action: exec - cmd: mailq - task: deliver
- def container_post__exec__mailq__deliver(self, container_id):
- if 'items' in request.json:
+ async def container_post__exec__mailq__deliver(self, container_id, request_json):
+ if 'items' in request_json:
r = re.compile("^[0-9a-fA-F]+$")
- filtered_qids = filter(r.match, request.json['items'])
+ filtered_qids = filter(r.match, request_json['items'])
if filtered_qids:
flagged_qids = ['-i %s' % i for i in filtered_qids]
- for container in docker_client.containers.list(filters={"id": container_id}):
- for i in flagged_qids:
- postqueue_r = container.exec_run(["/bin/bash", "-c", "/usr/sbin/postqueue " + i], user='postfix')
- # todo: check each exit code
- return jsonify(type='success', msg=str("Scheduled immediate delivery"))
+ for container in (await self.docker_client.containers.list()):
+ if container._id == container_id:
+ for i in flagged_qids:
+ postsuper_r_exec = await container.exec(["/bin/bash", "-c", "/usr/sbin/postqueue " + i], user='postfix')
+ async with postsuper_r_exec.start(detach=False) as stream:
+ postsuper_r_return = await stream.read_out()
+ # todo: check each exit code
+ res = {
+ 'type': 'success',
+ 'msg': 'Scheduled immediate delivery'
+ }
+ return Response(content=json.dumps(res, indent=4), media_type="application/json")
# api call: container_post - post_action: exec - cmd: mailq - task: list
- def container_post__exec__mailq__list(self, container_id):
- for container in docker_client.containers.list(filters={"id": container_id}):
- mailq_return = container.exec_run(["/usr/sbin/postqueue", "-j"], user='postfix')
- return exec_run_handler('utf8_text_only', mailq_return)
+ async def container_post__exec__mailq__list(self, container_id, request_json):
+ for container in (await self.docker_client.containers.list()):
+ if container._id == container_id:
+ mailq_exec = await container.exec(["/usr/sbin/postqueue", "-j"], user='postfix')
+ return await exec_run_handler('utf8_text_only', mailq_exec)
# api call: container_post - post_action: exec - cmd: mailq - task: flush
- def container_post__exec__mailq__flush(self, container_id):
- for container in docker_client.containers.list(filters={"id": container_id}):
- postqueue_r = container.exec_run(["/usr/sbin/postqueue", "-f"], user='postfix')
- return exec_run_handler('generic', postqueue_r)
+ async def container_post__exec__mailq__flush(self, container_id, request_json):
+ for container in (await self.docker_client.containers.list()):
+ if container._id == container_id:
+ postsuper_r_exec = await container.exec(["/usr/sbin/postqueue", "-f"], user='postfix')
+ return await exec_run_handler('generic', postsuper_r_exec)
# api call: container_post - post_action: exec - cmd: mailq - task: super_delete
- def container_post__exec__mailq__super_delete(self, container_id):
- for container in docker_client.containers.list(filters={"id": container_id}):
- postsuper_r = container.exec_run(["/usr/sbin/postsuper", "-d", "ALL"])
- return exec_run_handler('generic', postsuper_r)
+ async def container_post__exec__mailq__super_delete(self, container_id, request_json):
+ for container in (await self.docker_client.containers.list()):
+ if container._id == container_id:
+ postsuper_r_exec = await container.exec(["/usr/sbin/postsuper", "-d", "ALL"])
+ return await exec_run_handler('generic', postsuper_r_exec)
# api call: container_post - post_action: exec - cmd: system - task: fts_rescan
- def container_post__exec__system__fts_rescan(self, container_id):
- if 'username' in request.json:
- for container in docker_client.containers.list(filters={"id": container_id}):
- rescan_return = container.exec_run(["/bin/bash", "-c", "/usr/bin/doveadm fts rescan -u '" + request.json['username'].replace("'", "'\\''") + "'"], user='vmail')
- if rescan_return.exit_code == 0:
- return jsonify(type='success', msg='fts_rescan: rescan triggered')
- else:
- return jsonify(type='warning', msg='fts_rescan error')
+ async def container_post__exec__system__fts_rescan(self, container_id, request_json):
+ if 'username' in request_json:
+ for container in (await self.docker_client.containers.list()):
+ if container._id == container_id:
+ rescan_exec = await container.exec(["/bin/bash", "-c", "/usr/bin/doveadm fts rescan -u '" + request_json['username'].replace("'", "'\\''") + "'"], user='vmail')
+ async with rescan_exec.start(detach=False) as stream:
+ rescan_return = await stream.read_out()
- if 'all' in request.json:
- for container in docker_client.containers.list(filters={"id": container_id}):
- rescan_return = container.exec_run(["/bin/bash", "-c", "/usr/bin/doveadm fts rescan -A"], user='vmail')
- if rescan_return.exit_code == 0:
- return jsonify(type='success', msg='fts_rescan: rescan triggered')
- else:
- return jsonify(type='warning', msg='fts_rescan error')
+ exec_details = await rescan_exec.inspect()
+ if exec_details["ExitCode"] == None or exec_details["ExitCode"] == 0:
+ res = {
+ 'type': 'success',
+ 'msg': 'fts_rescan: rescan triggered'
+ }
+ return Response(content=json.dumps(res, indent=4), media_type="application/json")
+ else:
+ res = {
+ 'type': 'warning',
+ 'msg': 'fts_rescan error'
+ }
+ return Response(content=json.dumps(res, indent=4), media_type="application/json")
+
+ if 'all' in request_json:
+ for container in (await self.docker_client.containers.list()):
+ if container._id == container_id:
+ rescan_exec = await container.exec(["/bin/bash", "-c", "/usr/bin/doveadm fts rescan -A"], user='vmail')
+ async with rescan_exec.start(detach=False) as stream:
+ rescan_return = await stream.read_out()
+
+ exec_details = await rescan_exec.inspect()
+ if exec_details["ExitCode"] == None or exec_details["ExitCode"] == 0:
+ res = {
+ 'type': 'success',
+ 'msg': 'fts_rescan: rescan triggered'
+ }
+ return Response(content=json.dumps(res, indent=4), media_type="application/json")
+ else:
+ res = {
+ 'type': 'warning',
+ 'msg': 'fts_rescan error'
+ }
+ return Response(content=json.dumps(res, indent=4), media_type="application/json")
# api call: container_post - post_action: exec - cmd: system - task: df
- def container_post__exec__system__df(self, container_id):
- if 'dir' in request.json:
- for container in docker_client.containers.list(filters={"id": container_id}):
- df_return = container.exec_run(["/bin/bash", "-c", "/bin/df -H '" + request.json['dir'].replace("'", "'\\''") + "' | /usr/bin/tail -n1 | /usr/bin/tr -s [:blank:] | /usr/bin/tr ' ' ','"], user='nobody')
- if df_return.exit_code == 0:
- return df_return.output.decode('utf-8').rstrip()
- else:
- return "0,0,0,0,0,0"
+ async def container_post__exec__system__df(self, container_id, request_json):
+ if 'dir' in request_json:
+ for container in (await self.docker_client.containers.list()):
+ if container._id == container_id:
+ df_exec = await container.exec(["/bin/bash", "-c", "/bin/df -H '" + request_json['dir'].replace("'", "'\\''") + "' | /usr/bin/tail -n1 | /usr/bin/tr -s [:blank:] | /usr/bin/tr ' ' ','"], user='nobody')
+ async with df_exec.start(detach=False) as stream:
+ df_return = await stream.read_out()
+
+ print(df_return)
+ print(await df_exec.inspect())
+ exec_details = await df_exec.inspect()
+ if exec_details["ExitCode"] == None or exec_details["ExitCode"] == 0:
+ return df_return.data.decode('utf-8').rstrip()
+ else:
+ return "0,0,0,0,0,0"
# api call: container_post - post_action: exec - cmd: system - task: mysql_upgrade
@@ -234,168 +383,188 @@
return jsonify(type='success', msg='mysql_tzinfo_to_sql: not touching fake MySQL', text='')
# api call: container_post - post_action: exec - cmd: reload - task: dovecot
- def container_post__exec__reload__dovecot(self, container_id):
- for container in docker_client.containers.list(filters={"id": container_id}):
- reload_return = container.exec_run(["/bin/bash", "-c", "/usr/sbin/dovecot reload"])
- return exec_run_handler('generic', reload_return)
+ async def container_post__exec__reload__dovecot(self, container_id, request_json):
+ for container in (await self.docker_client.containers.list()):
+ if container._id == container_id:
+ reload_exec = await container.exec(["/bin/bash", "-c", "/usr/sbin/dovecot reload"])
+ return await exec_run_handler('generic', reload_exec)
# api call: container_post - post_action: exec - cmd: reload - task: postfix
- def container_post__exec__reload__postfix(self, container_id):
- for container in docker_client.containers.list(filters={"id": container_id}):
- reload_return = container.exec_run(["/bin/bash", "-c", "/usr/sbin/postfix reload"])
- return exec_run_handler('generic', reload_return)
+ async def container_post__exec__reload__postfix(self, container_id, request_json):
+ for container in (await self.docker_client.containers.list()):
+ if container._id == container_id:
+ reload_exec = await container.exec(["/bin/bash", "-c", "/usr/sbin/postfix reload"])
+ return await exec_run_handler('generic', reload_exec)
# api call: container_post - post_action: exec - cmd: reload - task: nginx
- def container_post__exec__reload__nginx(self, container_id):
- for container in docker_client.containers.list(filters={"id": container_id}):
- reload_return = container.exec_run(["/bin/sh", "-c", "/usr/sbin/nginx -s reload"])
- return exec_run_handler('generic', reload_return)
+ async def container_post__exec__reload__nginx(self, container_id, request_json):
+ for container in (await self.docker_client.containers.list()):
+ if container._id == container_id:
+ reload_exec = await container.exec(["/bin/sh", "-c", "/usr/sbin/nginx -s reload"])
+ return await exec_run_handler('generic', reload_exec)
# api call: container_post - post_action: exec - cmd: sieve - task: list
- def container_post__exec__sieve__list(self, container_id):
- if 'username' in request.json:
- for container in docker_client.containers.list(filters={"id": container_id}):
- sieve_return = container.exec_run(["/bin/bash", "-c", "/usr/bin/doveadm sieve list -u '" + request.json['username'].replace("'", "'\\''") + "'"])
- return exec_run_handler('utf8_text_only', sieve_return)
+ async def container_post__exec__sieve__list(self, container_id, request_json):
+ if 'username' in request_json:
+ for container in (await self.docker_client.containers.list()):
+ if container._id == container_id:
+ sieve_exec = await container.exec(["/bin/bash", "-c", "/usr/bin/doveadm sieve list -u '" + request_json['username'].replace("'", "'\\''") + "'"])
+ return await exec_run_handler('utf8_text_only', sieve_exec)
# api call: container_post - post_action: exec - cmd: sieve - task: print
- def container_post__exec__sieve__print(self, container_id):
- if 'username' in request.json and 'script_name' in request.json:
- for container in docker_client.containers.list(filters={"id": container_id}):
- cmd = ["/bin/bash", "-c", "/usr/bin/doveadm sieve get -u '" + request.json['username'].replace("'", "'\\''") + "' '" + request.json['script_name'].replace("'", "'\\''") + "'"]
- sieve_return = container.exec_run(cmd)
- return exec_run_handler('utf8_text_only', sieve_return)
+ async def container_post__exec__sieve__print(self, container_id, request_json):
+ if 'username' in request_json and 'script_name' in request_json:
+ for container in (await self.docker_client.containers.list()):
+ if container._id == container_id:
+ cmd = ["/bin/bash", "-c", "/usr/bin/doveadm sieve get -u '" + request_json['username'].replace("'", "'\\''") + "' '" + request_json['script_name'].replace("'", "'\\''") + "'"]
+ sieve_exec = await container.exec(cmd)
+ return await exec_run_handler('utf8_text_only', sieve_exec)
# api call: container_post - post_action: exec - cmd: maildir - task: cleanup
- def container_post__exec__maildir__cleanup(self, container_id):
- if 'maildir' in request.json:
- for container in docker_client.containers.list(filters={"id": container_id}):
- sane_name = re.sub(r'\W+', '', request.json['maildir'])
- cmd = ["/bin/bash", "-c", "if [[ -d '/var/vmail/" + request.json['maildir'].replace("'", "'\\''") + "' ]]; then /bin/mv '/var/vmail/" + request.json['maildir'].replace("'", "'\\''") + "' '/var/vmail/_garbage/" + str(int(time.time())) + "_" + sane_name + "'; fi"]
- maildir_cleanup = container.exec_run(cmd, user='vmail')
- return exec_run_handler('generic', maildir_cleanup)
-
-
+ async def container_post__exec__maildir__cleanup(self, container_id, request_json):
+ if 'maildir' in request_json:
+ for container in (await self.docker_client.containers.list()):
+ if container._id == container_id:
+ sane_name = re.sub(r'\W+', '', request_json['maildir'])
+ cmd = ["/bin/bash", "-c", "if [[ -d '/var/vmail/" + request_json['maildir'].replace("'", "'\\''") + "' ]]; then /bin/mv '/var/vmail/" + request_json['maildir'].replace("'", "'\\''") + "' '/var/vmail/_garbage/" + str(int(time.time())) + "_" + sane_name + "'; fi"]
+ maildir_cleanup_exec = await container.exec(cmd, user='vmail')
+ return await exec_run_handler('generic', maildir_cleanup_exec)
# api call: container_post - post_action: exec - cmd: rspamd - task: worker_password
- def container_post__exec__rspamd__worker_password(self, container_id):
- if 'raw' in request.json:
- for container in docker_client.containers.list(filters={"id": container_id}):
- cmd = "/usr/bin/rspamadm pw -e -p '" + request.json['raw'].replace("'", "'\\''") + "' 2> /dev/null"
- cmd_response = exec_cmd_container(container, cmd, user="_rspamd")
- matched = False
- for line in cmd_response.split("\n"):
- if '$2$' in line:
- hash = line.strip()
- hash_out = re.search('\$2\$.+$', hash).group(0)
- rspamd_passphrase_hash = re.sub('[^0-9a-zA-Z\$]+', '', hash_out.rstrip())
+ async def container_post__exec__rspamd__worker_password(self, container_id, request_json):
+ if 'raw' in request_json:
+ for container in (await self.docker_client.containers.list()):
+ if container._id == container_id:
+
+ cmd = "./set_worker_password.sh '" + request_json['raw'].replace("'", "'\\''") + "' 2> /dev/null"
+ rspamd_password_exec = await container.exec(cmd, user='_rspamd')
+ async with rspamd_password_exec.start(detach=False) as stream:
+ rspamd_password_return = await stream.read_out()
- rspamd_password_filename = "/etc/rspamd/override.d/worker-controller-password.inc"
- cmd = '''/bin/echo 'enable_password = "%s";' > %s && cat %s''' % (rspamd_passphrase_hash, rspamd_password_filename, rspamd_password_filename)
- cmd_response = exec_cmd_container(container, cmd, user="_rspamd")
+ matched = False
+ if "OK" in rspamd_password_return.data.decode('utf-8'):
+ matched = True
+ await container.restart()
- if rspamd_passphrase_hash.startswith("$2$") and rspamd_passphrase_hash in cmd_response:
- container.restart()
- matched = True
+ if matched:
+ res = {
+ 'type': 'success',
+ 'msg': 'command completed successfully'
+ }
+ return Response(content=json.dumps(res, indent=4), media_type="application/json")
+ else:
+ res = {
+ 'type': 'danger',
+ 'msg': 'command did not complete'
+ }
+ return Response(content=json.dumps(res, indent=4), media_type="application/json")
- if matched:
- return jsonify(type='success', msg='command completed successfully')
- else:
- return jsonify(type='danger', msg='command did not complete')
-def exec_cmd_container(container, cmd, user, timeout=2, shell_cmd="/bin/bash"):
- def recv_socket_data(c_socket, timeout):
- c_socket.setblocking(0)
- total_data=[];
- data='';
- begin=time.time()
- while True:
- if total_data and time.time()-begin > timeout:
- break
- elif time.time()-begin > timeout*2:
- break
- try:
- data = c_socket.recv(8192)
- if data:
- total_data.append(data.decode('utf-8'))
- #change the beginning time for measurement
- begin=time.time()
- else:
- #sleep for sometime to indicate a gap
- time.sleep(0.1)
- break
- except:
- pass
- return ''.join(total_data)
+async def exec_run_handler(type, exec_obj):
+ async with exec_obj.start(detach=False) as stream:
+ exec_return = await stream.read_out()
- try :
- socket = container.exec_run([shell_cmd], stdin=True, socket=True, user=user).output._sock
- if not cmd.endswith("\n"):
- cmd = cmd + "\n"
- socket.send(cmd.encode('utf-8'))
- data = recv_socket_data(socket, timeout)
- socket.close()
- return data
+ if exec_return == None:
+ exec_return = ""
+ else:
+ exec_return = exec_return.data.decode('utf-8')
- except Exception as e:
- print("error - exec_cmd_container: %s" % str(e))
- traceback.print_exc(file=sys.stdout)
-
-def exec_run_handler(type, output):
- if type == 'generic':
- if output.exit_code == 0:
- return jsonify(type='success', msg='command completed successfully')
+ if type == 'generic':
+ exec_details = await exec_obj.inspect()
+ if exec_details["ExitCode"] == None or exec_details["ExitCode"] == 0:
+ res = {
+ "type": "success",
+ "msg": "command completed successfully"
+ }
+ return Response(content=json.dumps(res, indent=4), media_type="application/json")
else:
- return jsonify(type='danger', msg='command failed: ' + output.output.decode('utf-8'))
+ res = {
+ "type": "success",
+ "msg": "'command failed: " + exec_return
+ }
+ return Response(content=json.dumps(res, indent=4), media_type="application/json")
if type == 'utf8_text_only':
- r = Response(response=output.output.decode('utf-8'), status=200, mimetype="text/plain")
- r.headers["Content-Type"] = "text/plain; charset=utf-8"
- return r
+ return Response(content=exec_return, media_type="text/plain")
-class GracefulKiller:
- kill_now = False
- def __init__(self):
- signal.signal(signal.SIGINT, self.exit_gracefully)
- signal.signal(signal.SIGTERM, self.exit_gracefully)
+async def get_host_stats(wait=5):
+ global host_stats_isUpdating
- def exit_gracefully(self, signum, frame):
- self.kill_now = True
-
-def create_self_signed_cert():
- process = subprocess.Popen(
- "openssl req -x509 -newkey rsa:4096 -sha256 -days 3650 -nodes -keyout /app/dockerapi_key.pem -out /app/dockerapi_cert.pem -subj /CN=dockerapi/O=mailcow -addext subjectAltName=DNS:dockerapi".split(),
- stdout = subprocess.PIPE, stderr = subprocess.PIPE, shell=False
- )
- process.wait()
-
-def startFlaskAPI():
- create_self_signed_cert()
try:
- ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
- ctx.check_hostname = False
- ctx.load_cert_chain(certfile='/app/dockerapi_cert.pem', keyfile='/app/dockerapi_key.pem')
- except:
- print ("Cannot initialize TLS, retrying in 5s...")
- time.sleep(5)
- app.run(debug=False, host='0.0.0.0', port=443, threaded=True, ssl_context=ctx)
+ system_time = datetime.now()
+ host_stats = {
+ "cpu": {
+ "cores": psutil.cpu_count(),
+ "usage": psutil.cpu_percent()
+ },
+ "memory": {
+ "total": psutil.virtual_memory().total,
+ "usage": psutil.virtual_memory().percent,
+ "swap": psutil.swap_memory()
+ },
+ "uptime": time.time() - psutil.boot_time(),
+ "system_time": system_time.strftime("%d.%m.%Y %H:%M:%S")
+ }
-api.add_resource(containers_get, '/containers/json')
-api.add_resource(container_get, '/containers/<string:container_id>/json')
-api.add_resource(container_post, '/containers/<string:container_id>/<string:post_action>')
+ redis_client.set('host_stats', json.dumps(host_stats), ex=10)
+ except Exception as e:
+ res = {
+ "type": "danger",
+ "msg": str(e)
+ }
+ print(json.dumps(res, indent=4))
-if __name__ == '__main__':
- api_thread = Thread(target=startFlaskAPI)
- api_thread.daemon = True
- api_thread.start()
- killer = GracefulKiller()
- while True:
- time.sleep(1)
- if killer.kill_now:
- break
- print ("Stopping dockerapi-mailcow")
+ await asyncio.sleep(wait)
+ host_stats_isUpdating = False
+
+
+async def get_container_stats(container_id, wait=5, stop=False):
+ global containerIds_to_update
+
+ if container_id and container_id.isalnum():
+ try:
+ for container in (await async_docker_client.containers.list()):
+ if container._id == container_id:
+ res = await container.stats(stream=False)
+
+ if redis_client.exists(container_id + '_stats'):
+ stats = json.loads(redis_client.get(container_id + '_stats'))
+ else:
+ stats = []
+ stats.append(res[0])
+ if len(stats) > 3:
+ del stats[0]
+ redis_client.set(container_id + '_stats', json.dumps(stats), ex=60)
+ except Exception as e:
+ res = {
+ "type": "danger",
+ "msg": str(e)
+ }
+ print(json.dumps(res, indent=4))
+ else:
+ res = {
+ "type": "danger",
+ "msg": "no or invalid id defined"
+ }
+ print(json.dumps(res, indent=4))
+
+ await asyncio.sleep(wait)
+ if stop == True:
+ # update task was called second time, stop
+ containerIds_to_update.remove(container_id)
+ else:
+ # call update task a second time
+ await get_container_stats(container_id, wait=0, stop=True)
+
+
+if os.environ['REDIS_SLAVEOF_IP'] != "":
+ redis_client = redis.Redis(host=os.environ['REDIS_SLAVEOF_IP'], port=os.environ['REDIS_SLAVEOF_PORT'], db=0)
+else:
+ redis_client = redis.Redis(host='redis-mailcow', port=6379, db=0)
+
+async_docker_client = aiodocker.Docker(url='unix:///var/run/docker.sock')
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/Dockerfile b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/Dockerfile
index 1fc4000..4e90052 100644
--- a/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/Dockerfile
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/Dockerfile
@@ -1,10 +1,10 @@
-FROM debian:buster-slim
+FROM debian:bullseye-slim
LABEL maintainer "Andre Peters <andre.peters@servercow.de>"
ARG DEBIAN_FRONTEND=noninteractive
-ARG DOVECOT=2.3.17
+ARG DOVECOT=2.3.19.1
ENV LC_ALL C
-ENV GOSU_VERSION 1.12
+ENV GOSU_VERSION 1.14
# Add groups and users before installing Dovecot to not break compatibility
RUN groupadd -g 5000 vmail \
@@ -89,7 +89,7 @@
&& chmod +x /usr/local/bin/gosu \
&& gosu nobody true \
&& apt-key adv --fetch-keys https://repo.dovecot.org/DOVECOT-REPO-GPG \
- && echo "deb https://repo.dovecot.org/ce-${DOVECOT}/debian/buster buster main" > /etc/apt/sources.list.d/dovecot.list \
+ && echo "deb https://repo.dovecot.org/ce-${DOVECOT}/debian/bullseye bullseye main" > /etc/apt/sources.list.d/dovecot.list \
&& apt-get update \
&& apt-get -y --no-install-recommends install \
dovecot-lua \
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/docker-entrypoint.sh b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/docker-entrypoint.sh
index 9ac2dc6..18746de 100755
--- a/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/docker-entrypoint.sh
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/docker-entrypoint.sh
@@ -307,6 +307,7 @@
}
EOF
+
cat <<EOF > /etc/dovecot/sogo_trusted_ip.conf
# Autogenerated by mailcow
remote ${IPV4_NETWORK}.248 {
@@ -349,6 +350,14 @@
sievec /usr/lib/dovecot/sieve/report-spam.sieve
sievec /usr/lib/dovecot/sieve/report-ham.sieve
+for file in /var/vmail/*/*/sieve/*.sieve ; do
+ if [[ "$file" == "/var/vmail/*/*/sieve/*.sieve" ]]; then
+ continue
+ fi
+ sievec "$file" "$(dirname "$file")/../.dovecot.svbin"
+ chown vmail:vmail "$(dirname "$file")/../.dovecot.svbin"
+done
+
# Fix permissions
chown root:root /etc/dovecot/sql/*.conf
chown root:dovecot /etc/dovecot/sql/dovecot-dict-sql-sieve* /etc/dovecot/sql/dovecot-dict-sql-quota* /etc/dovecot/lua/passwd-verify.lua
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/imapsync b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/imapsync
index 07cf58e..0d34504 100755
--- a/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/imapsync
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/imapsync
@@ -1,6 +1,6 @@
#!/usr/bin/env perl
-# $Id: imapsync,v 2.148 2021/07/22 14:21:09 gilles Exp gilles $
+# $Id: imapsync,v 2.178 2022/01/12 21:28:37 gilles Exp gilles $
# structure
# pod documentation
# use pragmas
@@ -25,7 +25,7 @@
=head1 VERSION
-This documentation refers to Imapsync $Revision: 2.148 $
+This documentation refers to Imapsync $Revision: 2.178 $
=head1 USAGE
@@ -212,16 +212,6 @@
--sslargs2 str : Pass any ssl parameter for host2 ssl or tls connection.
See --sslargs1
- --timeout1 flo : Connection timeout in seconds for host1.
- Default is 120 and 0 means no timeout at all.
- --timeout2 flo : Connection timeout in seconds for host2.
- Default is 120 and 0 means no timeout at all.
-
- Caveat, under CGI context, you may encounter a timeout
- from the webserver, killing imapsync and the imap connexions.
- See the document INSTALL.OnlineUI.txt and search
- for "Timeout" for how to deal with this issue.
-
=head2 OPTIONS/authentication
@@ -243,7 +233,7 @@
--domain2 str : Domain on host2 (NTLM authentication).
--oauthaccesstoken1 str : The access token to authenticate with OAUTH2.
- It will be combined with the --user1 value to form the
+ It will be combined with the --user1 value to form the
string to pass with XOAUTH2 authentication.
The password given by --password1 or --passfile1
is ignored.
@@ -252,18 +242,18 @@
If the value is a file, imapsync reads its first line
and take this line as the access token. The advantage
of the file is that if the access token changes then
- imapsync can read it again when it needs to reconnect
+ imapsync can read it again when it needs to reconnect
during a run.
- --oauthaccesstoken2 str : same thing as --oauthaccesstoken1
+ --oauthaccesstoken2 str : same thing as --oauthaccesstoken1
--oauthdirect1 str : The direct string to pass with XOAUTH2 authentication.
- The password given by --password1 or --passfile1 and
+ The password given by --password1 or --passfile1 and
the user given by --user1 are ignored.
--oauthdirect2 str : same thing as oauthdirect1
-
+
=head2 OPTIONS/folders
@@ -432,6 +422,12 @@
by spaces.
--regexmess reg : and this one, etc.
+ --truncmess int : truncates messages when their size exceed the int
+ value, specified in bytes. Good to sync too big
+ messages or to "suppress" attachments.
+ Have in mind that this way, messages become
+ uncoherent somehow.
+
=head2 OPTIONS/labels
Gmail present labels as folders in imap. Imapsync can accelerate the sync
@@ -639,7 +635,27 @@
=head2 OPTIONS/behavior
- --maxmessagespersecond flo : limits the average number of messages
+ --timeout1 flo : Connection timeout in seconds for host1.
+ Default is 120 and 0 means no timeout at all.
+ --timeout2 flo : Connection timeout in seconds for host2.
+ Default is 120 and 0 means no timeout at all.
+
+ Caveat, under CGI context, you may encounter a timeout
+ from the webserver, killing imapsync and the imap connexions.
+ See the document INSTALL.OnlineUI.txt and search
+ for "Timeout" for how to deal with this issue.
+
+ --keepalive1 : https://metacpan.org/pod/Mail::IMAPClient#Keepalive
+ Some firewalls and network gears like to timeout connections
+ prematurely if the connection sits idle.
+ This option enables SO_KEEPALIVE on the host1 socket.
+ --keepalive1 is on by default since imapsync release 2.169
+ Use --nokeepalive1 to disable it.
+
+ --keepalive2 : Same as --keepalive2 but for host2.
+ Use --nokeepalive2 to disable it.
+
+ --maxmessagespersecond flo : limits the average number of messages
transferred per second.
--maxbytespersecond int : limits the average transfer rate per second.
@@ -946,6 +962,7 @@
use warnings ;
use Carp ;
use Cwd ;
+use Compress::Zlib ;
use Data::Dumper ;
use Digest::HMAC_SHA1 qw( hmac_sha1 hmac_sha1_hex ) ;
use Digest::MD5 qw( md5 md5_hex md5_base64 ) ;
@@ -1033,6 +1050,7 @@
Readonly my $EXIT_TRANSFER_EXCEEDED => 118 ;
Readonly my $EXIT_ERR_APPEND_VIRUS => 119 ;
+Readonly my $EXIT_ERR_FLAGS => 120 ;
Readonly my $EXIT_TESTS_FAILED => 254 ; # Like Test::More API
@@ -1063,6 +1081,7 @@
$EXIT_ERR_APPEND => 'EXIT_ERR_APPEND',
$EXIT_ERR_APPEND_VIRUS => 'EXIT_ERR_APPEND_VIRUS',
$EXIT_ERR_FETCH => 'EXIT_ERR_FETCH',
+ $EXIT_ERR_FLAGS => 'EXIT_ERR_FLAGS',
$EXIT_ERR_CREATE => 'EXIT_ERR_CREATE',
$EXIT_ERR_SELECT => 'EXIT_ERR_SELECT',
$EXIT_TESTS_FAILED => 'EXIT_TESTS_FAILED',
@@ -1082,6 +1101,7 @@
ERR_CREATE => $EXIT_ERR_CREATE,
ERR_SELECT => $EXIT_ERR_SELECT,
ERR_Host1_FETCH => $EXIT_ERR_FETCH,
+ ERR_FLAGS => $EXIT_ERR_FLAGS,
ERR_UNCLASSIFIED => $EXIT_WITH_ERRORS,
ERR_NOTHING_REPORTED => $EXIT_WITH_ERRORS,
ERR_TRANSFER_EXCEEDED => $EXIT_TRANSFER_EXCEEDED,
@@ -1093,6 +1113,103 @@
) ;
+
+Readonly my %COMMENT_OF_ERR_TYPE => (
+ ERR_APPEND_SIZE => \&comment_err_append_size,
+ ERR_OVERQUOTA => \&comment_err_overquota,
+ ERR_APPEND => \&comment_err_blank,
+ ERR_APPEND_VIRUS => \&comment_err_blank,
+ ERR_CREATE => \&comment_err_blank,
+ ERR_SELECT => \&comment_err_blank,
+ ERR_Host1_FETCH => \&comment_err_blank,
+ ERR_FLAGS => \&comment_err_flags,
+ ERR_UNCLASSIFIED => \&comment_err_blank,
+ ERR_NOTHING_REPORTED => \&comment_err_blank,
+ ERR_TRANSFER_EXCEEDED => \&comment_err_transfer_exceeded,
+ ERR_CONNECTION_FAILURE_HOST1 => \&comment_err_connection_failure_host1,
+ ERR_CONNECTION_FAILURE_HOST2 => \&comment_err_connection_failure_host2,
+ ERR_AUTHENTICATION_FAILURE_USER1 => \&comment_err_authentication_failure_host1,
+ ERR_AUTHENTICATION_FAILURE_USER2 => \&comment_err_authentication_failure_host2,
+ ERR_EXIT_TLS_FAILURE => \&comment_err_blank,
+) ;
+
+
+sub comment_err_blank
+{
+ return '' ;
+}
+
+
+sub comment_err_append_size
+{
+ my $mysync = shift @ARG ;
+
+ my $comment = "The destination server refuses too big messages. Use --truncmess option. Read https://imapsync.lamiral.info/FAQ.d/FAQ.Messages_Too_Big.txt" ;
+ return $comment ;
+}
+
+
+sub comment_err_authentication_failure_host1
+{
+ my $mysync = shift @ARG ;
+
+ my $comment = "Check the credentials for $mysync->{ user1 }." ;
+ return $comment ;
+}
+
+sub comment_err_authentication_failure_host2
+{
+ my $mysync = shift @ARG ;
+
+ my $comment = "Check the credentials for $mysync->{ user2 }." ;
+ return $comment ;
+}
+
+
+sub comment_err_connection_failure_host1
+{
+ my $mysync = shift @ARG ;
+
+ my $comment = "Check that host1 $mysync->{ host1 } on port $mysync->{ port1 } is the right IMAP server to be contacted for your mailbox." ;
+ return $comment ;
+}
+
+sub comment_err_connection_failure_host2
+{
+ my $mysync = shift @ARG ;
+
+ my $comment = "Check that host1 $mysync->{ host2 } on port $mysync->{ port2 } is the right IMAP server to be contacted for your mailbox." ;
+ return $comment ;
+}
+
+sub comment_err_overquota
+{
+ my $mysync = shift @ARG ;
+
+ my $comment = 'The destination mailbox is 100% full, get free space on it and then resume the sync.' ;
+ return $comment ;
+}
+
+
+sub comment_err_transfer_exceeded
+{
+ my $mysync = shift @ARG ;
+
+ my $size_limit_human = bytes_display_string_dec( $mysync->{ exitwhenover } ) ;
+ my $comment = "The maximum transfer size for a single sync is reached ( over $size_limit_human ). Relaunch the sync to sync more." ;
+ return $comment ;
+}
+
+sub comment_err_flags
+{
+ my $mysync = shift @ARG ;
+
+ my $comment = 'Many STORE errors with FLAGS. Retry with the option --noresyncflags' ;
+ return $comment ;
+}
+
+
+
Readonly my $DEFAULT_LOGDIR => 'LOG_imapsync' ;
Readonly my $ERRORS_MAX => 50 ; # exit after 50 errors.
@@ -1177,9 +1294,8 @@
my(
$sync, $acc1, $acc2,
- $debugcontent, $debugflags,
+ $debugflags,
$debuglist, $debugdev, $debugmaxlinelength, $debugcgi,
-
@include, @exclude, @folderrec,
@folderfirst, @folderlast,
@h1_folders_all, %h1_folders_all,
@@ -1191,41 +1307,26 @@
%h1_subscribed_folder, %h2_subscribed_folder,
%h2_folders_from_1_wanted,
%h2_folders_from_1_several,
-
$prefix1, $prefix2,
@regexmess, @skipmess, @pipemess, $pipemesscheck,
$syncflagsaftercopy,
$syncinternaldates,
$idatefromheader,
-
$minsize, $maxage, $minage,
$search,
@useheader, %useheader,
$skipsize, $allowsizemismatch, $buffersize,
-
-
$authmd5, $authmd51, $authmd52,
$subscribed, $subscribe, $subscribeall,
$help,
-
- $fast,
-
$nb_msg_skipped_dry_mode,
-
$h2_nb_msg_noheader,
-
$h1_bytes_processed,
-
$h1_nb_msg_end, $h1_bytes_end,
$h2_nb_msg_end, $h2_bytes_end,
-
$timestart_int,
-
$uid1, $uid2,
-
-
$split1, $split2,
-
$modulesversion,
$delete2folders, $delete2foldersonly, $delete2foldersbutnot,
$usecache, $debugcache, $cacheaftercopy,
@@ -1244,7 +1345,6 @@
$cgidir,
%month_abrev,
$SSL_VERIFY_POLICY,
- $warn_release,
) ;
single_sync( $sync, $acc1, $acc2 );
@@ -1263,7 +1363,7 @@
# or $acc1->{variable_name}
# or $acc1->{variable_name}
-#
+#
$acc1 = {} ;
$acc2 = {} ;
$sync->{ acc1 } = $acc1 ;
@@ -1271,10 +1371,12 @@
$acc1->{ Side } = 'Host1' ;
$acc2->{ Side } = 'Host2' ;
+$acc1->{ N } = '1' ;
+$acc2->{ N } = '2' ;
$sync->{timestart} = time ; # Is a float because of use Time::HiRres
-$sync->{rcs} = q{$Id: imapsync,v 2.148 2021/07/22 14:21:09 gilles Exp gilles $} ;
+$sync->{rcs} = q{$Id: imapsync,v 2.178 2022/01/12 21:28:37 gilles Exp gilles $} ;
$sync->{ memory_consumption_at_start } = memory_consumption( ) || 0 ;
@@ -1383,6 +1485,8 @@
# Under CGI environment, fix caveat emptor potential issues
cgisetcontext( $sync ) ;
+get_options_extra( $sync ) ;
+
# --gmail --gmail --exchange --office etc.
easyany( $sync ) ;
@@ -1397,7 +1501,7 @@
if ( condition_to_leave_after_tests( $sync ) )
{
- return $unittestssuite ;
+ return $unittestssuite ;
}
# init live varaiables
@@ -1458,7 +1562,7 @@
$modulesversion = defined $modulesversion ? $modulesversion : 1 ;
-$warn_release = ( $sync->{releasecheck} ) ? check_last_release( ) : $STR_use_releasecheck ;
+$sync->{ warn_release } = ( $sync->{ releasecheck } ) ? check_last_release( ) : $STR_use_releasecheck ;
$wholeheaderifneeded = defined $wholeheaderifneeded ? $wholeheaderifneeded : 1;
@@ -1557,7 +1661,7 @@
-if ( $sync->{ justbanner } )
+if ( $sync->{ justbanner } )
{
myprint( "Exiting because of --justbanner\n" ) ;
exit_clean( $sync, $EX_OK ) ;
@@ -1766,6 +1870,11 @@
$sync->{ acc2 }->{timeout} = defined $sync->{ acc2 }->{timeout} ? $sync->{ acc2 }->{timeout} : $sync->{ timeout } ;
myprint( "Host2: imap connection timeout is $sync->{ acc2 }->{timeout} seconds\n" ) ;
+
+keepalive1( $sync ) ;
+keepalive2( $sync ) ;
+
+
if ( under_cgi_context( $sync ) )
{
myprint( "Under CGI context, a timeout can occur from the webserver, see https://imapsync.lamiral.info/INSTALL.d/INSTALL.OnlineUI.txt\n" ) ;
@@ -1773,7 +1882,7 @@
$sync->{ syncacls } = defined $sync->{ syncacls } ? $sync->{ syncacls } : 0 ;
-# No folders sizes if --justfolders, unless really wanted.
+# No folders sizes at the beginning if --justfolders, unless really wanted.
if (
$sync->{ justfolders }
and not defined $sync->{ foldersizes }
@@ -1786,7 +1895,8 @@
$sync->{ foldersizes } = ( defined $sync->{ foldersizes } ) ? $sync->{ foldersizes } : 1 ;
$sync->{ foldersizesatend } = ( defined $sync->{ foldersizesatend } ) ? $sync->{ foldersizesatend } : $sync->{ foldersizes } ;
-$sync->{ checknoabletosearch } = ( defined $sync->{ checknoabletosearch } ) ? $sync->{ checknoabletosearch } : 1 ;
+#$sync->{ checknoabletosearch } = ( defined $sync->{ checknoabletosearch } ) ? $sync->{ checknoabletosearch } : 1 ;
+set_checknoabletosearch( $sync ) ;
$acc1->{ fastio } = defined $acc1->{ fastio } ? $acc1->{ fastio } : 0 ;
@@ -1796,6 +1906,11 @@
$acc1->{ reconnectretry } = defined $acc1->{ reconnectretry } ? $acc1->{ reconnectretry } : $DEFAULT_NB_RECONNECT_PER_IMAP_COMMAND ;
$acc2->{ reconnectretry } = defined $acc2->{ reconnectretry } ? $acc2->{ reconnectretry } : $DEFAULT_NB_RECONNECT_PER_IMAP_COMMAND ;
+# IMAP compression on by default
+#$acc1->{ compress } = defined $acc1->{ compress } ? $acc1->{ compress } : 0 ;
+#$acc2->{ compress } = defined $acc2->{ compress } ? $acc2->{ compress } : 0 ;
+
+
if ( ! @useheader ) { @useheader = qw( Message-Id Received ) ; }
@@ -1936,6 +2051,9 @@
maxsize_setting( $sync ) ;
+acc_compress_imap( $acc1 ) ;
+acc_compress_imap( $acc2 ) ;
+
if ( $sync->{ justlogin } ) {
$sync->{imap1}->logout( ) ;
$sync->{imap2}->logout( ) ;
@@ -2330,7 +2448,7 @@
my $h2_fold_nb_messages = count_from_select( @select_results ) ;
myprint( "Host2: folder [$h2_fold] has $h2_fold_nb_messages messages in total (mentioned by SELECT)\n" ) ;
- my $permanentflags2 = permanentflags( @select_results ) ;
+ my $permanentflags2 = permanentflags( $sync, @select_results ) ;
myprint( "Host2: folder [$h2_fold] permanentflags: $permanentflags2\n" ) ;
if ( $sync->{ expunge1 } )
@@ -2874,22 +2992,21 @@
do_and_print_stats( $sync ) ;
-if ( $sync->{errorsdump} and $sync->{nb_errors} )
+if ( $sync->{ nb_errors } )
{
myprint( errors_listing( $sync ) ) ;
}
-if ( $sync->{testslive} or $sync->{testslive6} )
+if ( $sync->{ testslive } or $sync->{ testslive6 } )
{
- tests_live_result( $sync->{nb_errors} ) ;
+ tests_live_result( $sync->{ nb_errors } ) ;
}
-
-if ( $sync->{nb_errors} )
+if ( $sync->{ nb_errors } )
{
- my $exit_value = $EXIT_VALUE_OF_ERR_TYPE{ $sync->{most_common_error} } || $EXIT_CATCH_ALL ;
+ my $exit_value = exit_value( $sync, $sync->{ most_common_error } ) ;
exit_clean( $sync, $exit_value ) ;
}
else
@@ -3020,16 +3137,16 @@
output( $mysync, "Hello\n" ) ;
is( "Hello\n", print_output_if_needed( $mysync ), 'print_output_if_needed: Hello => Hello' ) ;
-
+
$mysync->{ dockercontext } = 1 ;
is( "Hello\n", print_output_if_needed( $mysync ), 'print_output_if_needed: dockercontext + Hello => Hello' ) ;
-
+
$mysync->{ version } = 1 ;
is( q{}, print_output_if_needed( $mysync ), 'print_output_if_needed: dockercontext + Hello + --version => ""' ) ;
-
+
$mysync->{ dockercontext } = 0 ;
is( "Hello\n", print_output_if_needed( $mysync ), 'print_output_if_needed: Hello + --version => Hello' ) ;
-
+
note( 'Leaving tests_print_output_if_needed()' ) ;
return ;
}
@@ -3037,11 +3154,11 @@
sub print_output_if_needed
{
-
+
my $mysync = shift @ARG ;
if ( ! defined $mysync ) { return ; }
my $output = output( $mysync ) ;
-
+
if ( $mysync->{ version } && under_docker_context( $mysync ) )
{
return q{} ;
@@ -3051,7 +3168,7 @@
myprint( $output ) ;
return $output ;
}
-
+
}
@@ -3415,7 +3532,7 @@
sub under_docker_context
{
my $mysync = shift ;
-
+
if ( ! defined $mysync ) { return ; }
if ( defined $mysync->{ dockercontext } )
@@ -3436,7 +3553,7 @@
}
-sub docker_context
+sub docker_context
{
my $mysync = shift ;
@@ -3447,7 +3564,7 @@
output( $mysync, "Docker context detected with the file /.dockerenv\n" ) ;
# No pidfile by default
-
+
$mysync->{ pidfile } = defined( $mysync->{ pidfile } ) ? $mysync->{ pidfile } : q{} ;
# No log by default
if ( defined( $mysync->{ log } ) )
@@ -3461,8 +3578,11 @@
}
# In case something is written relatively to .
- output( $mysync, "Changing current directory to /var/tmp/\n" ) ;
- chdir '/var/tmp/' ;
+ my $tmp_dir = "/var/tmp/uid_$EFFECTIVE_USER_ID" ;
+ mkpath( $tmp_dir ) ; # silly? No. it is for imapsync --version being ok.
+ do_valid_directory( $tmp_dir ) ;
+ output( $mysync, "Changing current directory to $tmp_dir\n" ) ;
+ chdir $tmp_dir ;
return ;
}
@@ -3520,7 +3640,7 @@
return ;
}
-sub cgibuildheader
+sub cgibuildheader
{
my $mysync = shift ;
if ( ! under_cgi_context( $mysync ) ) { return ; }
@@ -3689,15 +3809,15 @@
if ( ! under_cgi_context( $mysync ) ) { return ; }
output( $mysync, "Under cgi context\n" ) ;
-
-
+
+
set_umask( $mysync ) ;
# Remove all content in unsafe evaled options
@{ $mysync->{ regextrans2 } } = ( ) ;
@{ $mysync->{ regexflag } } = buggyflagsregex( ) ;
-
+
@regexmess = ( ) ;
@skipmess = ( ) ;
@pipemess = ( ) ;
@@ -3738,7 +3858,7 @@
-d $cgidir or mkpath $cgidir or die "Can not create $cgidir: $OS_ERROR\n" ;
$mysync->{ tmpdir } = $cgidir ;
$mysync->{ logdir } = '' ;
-
+
chdir $cgidir or die "Can not cd to $cgidir: $OS_ERROR\n" ;
cgioutputenvcontext( $mysync ) ;
$mysync->{ debug } and output( $mysync, 'Current directory is ' . getcwd( ) . "\n" ) ;
@@ -3758,7 +3878,7 @@
# addheader on by default
$mysync->{ addheader } = defined $mysync->{ addheader } ? $mysync->{ addheader } : 1 ;
-
+
# sync duplicates by default in cgi context
$mysync->{ syncduplicates } = defined $mysync->{ syncduplicates } ? $mysync->{ syncduplicates } : 1 ;
@@ -3780,7 +3900,7 @@
return ;
}
-sub announcelogfile
+sub announcelogfile
{
my $mysync = shift ;
@@ -3792,7 +3912,7 @@
else
{
myprint( "No log file because of option --nolog\n" ) ;
- }
+ }
return ;
}
@@ -3802,7 +3922,7 @@
my $mysync = shift ;
if ( ! $mysync->{ loglogfile } ) { return ; }
if ( ! $mysync->{ log } ) { return ; }
-
+
my $cwd = getcwd( ) ;
my $absolutelogfilepath ;
# Fixme: add case when the logfile name is already absolute
@@ -3822,10 +3942,10 @@
}
-sub checkselectable
+sub checkselectable
{
- my $mysync = shift ;
-
+ my $mysync = shift ;
+
if ( $mysync->{ checkselectable } ) {
my @h1_folders_wanted_selectable ;
myprint( "Host1: Checking wanted folders are selectable. Use --nocheckselectable to avoid this check.\n" ) ;
@@ -3842,7 +3962,7 @@
}
}
@{ $mysync->{ h1_folders_wanted } } = @h1_folders_wanted_selectable ;
- ( $mysync->{ debug } or $mysync->{ debugfolders } )
+ ( $mysync->{ debug } or $mysync->{ debugfolders } )
and myprint( 'Host1: checking folders took ', timenext( $mysync ), " s\n" ) ;
}
else
@@ -3852,14 +3972,14 @@
return ;
}
-sub setcheckselectable
+sub setcheckselectable
{
my $mysync = shift ;
-
+
my $h1_folders_wanted_nb = scalar @{ $mysync->{ h1_folders_wanted } } ;
# 152 because 98% of host1 accounts have less than 152 folders on /X service.
- # command to get this value:
- # datamash_file_op_index G_Host1_Nb_folders.txt perc:98 4 %16.1f
+ # command to get this value:
+ # datamash_file_op_index G_Host1_Nb_folders.txt perc:98 4 %16.1f
if ( ! defined $mysync->{ checkselectable } )
{
if ( 152 >= $h1_folders_wanted_nb )
@@ -4014,8 +4134,8 @@
}
myprintf( "%s Nb folders: %11s folders\n", $side, $nb_folders ) ;
myprintf( "%s Nb messages: %11s messages\n", $side, $total_nb ) ;
- myprintf( "%s Total size: %11s bytes (%s)\n", $side, $total_size, bytes_display_string( $total_size ) ) ;
- myprintf( "%s Biggest message: %11s bytes (%s)\n", $side, $biggest_in_all, bytes_display_string( $biggest_in_all ) ) ;
+ myprintf( "%s Total size: %11s bytes (%s)\n", $side, $total_size, bytes_display_string_bin( $total_size ) ) ;
+ myprintf( "%s Biggest message: %11s bytes (%s)\n", $side, $biggest_in_all, bytes_display_string_bin( $biggest_in_all ) ) ;
myprintf( "%s Time spent on sizing: %11.1f seconds\n", $side, timenext( $mysync ) ) ;
return( $total_nb, $total_size ) ;
}
@@ -4319,11 +4439,11 @@
myprintf( "Host1 Nb messages: %11s messages\n", $total_nb_1 ) ;
myprintf( "Host2 Nb messages: %11s messages\n", $total_nb_2 ) ;
myprint( "\n" ) ;
- myprintf( "Host1 Total size: %11s bytes (%s)\n", $total_size_1, bytes_display_string( $total_size_1 ) ) ;
- myprintf( "Host2 Total size: %11s bytes (%s)\n", $total_size_2, bytes_display_string( $total_size_2 ) ) ;
+ myprintf( "Host1 Total size: %11s bytes (%s)\n", $total_size_1, bytes_display_string_bin( $total_size_1 ) ) ;
+ myprintf( "Host2 Total size: %11s bytes (%s)\n", $total_size_2, bytes_display_string_bin( $total_size_2 ) ) ;
myprint( "\n" ) ;
- myprintf( "Host1 Biggest message: %11s bytes (%s)\n", $biggest_in_all_1, bytes_display_string( $biggest_in_all_1 ) ) ;
- myprintf( "Host2 Biggest message: %11s bytes (%s)\n", $biggest_in_all_2, bytes_display_string( $biggest_in_all_2 ) ) ;
+ myprintf( "Host1 Biggest message: %11s bytes (%s)\n", $biggest_in_all_1, bytes_display_string_bin( $biggest_in_all_1 ) ) ;
+ myprintf( "Host2 Biggest message: %11s bytes (%s)\n", $biggest_in_all_2, bytes_display_string_bin( $biggest_in_all_2 ) ) ;
myprint( "\n" ) ;
myprintf( "Time spent on sizing: %11.1f seconds\n", timenext( $mysync ) ) ;
@@ -5326,7 +5446,7 @@
vendor => 'Gilles LAMIRAL',
'support-url' => 'https://imapsync.lamiral.info/',
# Example of date-time: 19-Sep-2015 08:56:07
- date => date_from_rcs( q{$Date: 2021/07/22 14:21:09 $ } ),
+ date => date_from_rcs( q{$Date: 2022/01/12 21:28:37 $ } ),
} ;
my $imapsync_id_github = {
@@ -5335,7 +5455,7 @@
os => $OSNAME,
vendor => 'github',
'support-url' => 'https://github.com/imapsync/imapsync',
- date => date_from_rcs( q{$Date: 2021/07/22 14:21:09 $ } ),
+ date => date_from_rcs( q{$Date: 2022/01/12 21:28:37 $ } ),
} ;
$imapsync_id = $imapsync_id_lamiral ;
@@ -5412,11 +5532,13 @@
my $Side = $side{ $side } ;
my $debug_before = $imap->Debug( ) ;
$imap->Debug( 1 ) ;
- if ( not $imap->has_capability( 'QUOTA' ) ) {
+ if ( not $imap->has_capability( 'QUOTA' ) )
+ {
+ myprint( "$Side: No QUOTA capability found, skipping it.\n" ) ;
$imap->Debug( $debug_before ) ;
return ;
} ;
- myprint( "\n$Side: found quota, presented in raw IMAP\n" ) ;
+ myprint( "\n$Side: QUOTA capability found, presented in raw IMAP on next lines\n" ) ;
my $getquotaroot = $imap->getquotaroot( 'INBOX' ) ;
# Gmail INBOX quotaroot is "" but with it Mail::IMAPClient does a literal GETQUOTA {2} \n ""
#$imap->quota( 'ROOT' ) ;
@@ -5517,7 +5639,7 @@
$mysync->{h2_special} = special_from_folders_hash( $mysync, $mysync->{imap2}, 'Host2' ) ;
build_possible_special( $mysync ) ;
- build_guess_special( $mysync ) ;
+ build_guess_special( $mysync ) ;
build_automap( $mysync ) ;
return ;
@@ -5653,7 +5775,7 @@
'Junk E-Mail', 'Junk Email'] ;
$possible_special->{'\Sent'} = [ 'Sent', 'Sent Messages', 'Sent Items',
'Gesendete Elemente', 'Gesendete Objekte',
- '&AMk-l&AOk-ments envoy&AOk-s', 'Envoy&AOk-', 'Objets envoy&AOk-s',
+ '&AMk-l&AOk-ments envoy&AOk-s', 'E&AwE-le&AwE-ments envoye&AwE-s', 'Envoy&AOk-', 'Objets envoy&AOk-s',
'Elementos enviados',
'&kAFP4W4IMH8wojCkMMYw4A-',
'&BB4EQgQ,BEAEMAQyBDsENQQ9BD0ESwQ1-',
@@ -5728,32 +5850,6 @@
return( \%special ) ;
}
-sub errors_incr
-{
- my ( $mysync, @error ) = @ARG ;
- $mysync->{nb_errors}++ ;
-
- if ( @error ) {
- errors_log( $mysync, @error ) ;
- myprint( @error ) ;
- }
-
- $mysync->{errorsmax} ||= $ERRORS_MAX ;
- if ( $mysync->{nb_errors} >= $mysync->{errorsmax} ) {
- myprint( "Maximum number of errors $mysync->{errorsmax} reached ( you can change $mysync->{errorsmax} to any value, for example 100 with --errorsmax 100 ). Exiting.\n" ) ;
- my $most_common_error = errorsanalyse( errors_log( $mysync ) ) ;
- if ( $mysync->{errorsdump} ) {
- myprint( errorsdump( errors_log( $mysync ) ) ) ;
- myprint( "The most frequent error is $most_common_error\n" ) ;
- # again since errorsdump( ) can be very verbose and masquerade previous warning
- myprint( "Maximum number of errors $mysync->{errorsmax} reached ( you can change $mysync->{errorsmax} to any value, for example 100 with --errorsmax 100 ). Exiting.\n" ) ;
- }
- my $exit_value = $EXIT_VALUE_OF_ERR_TYPE{ $most_common_error } || $EXIT_CATCH_ALL ;
- #exit_clean( $mysync, $EXIT_WITH_ERRORS_MAX ) ;
- exit_clean( $mysync, $exit_value ) ;
- }
- return ;
-}
sub tests_errors_log
{
@@ -5790,6 +5886,52 @@
}
+
+sub tests_comment_of_error_type
+{
+ note( 'Entering tests_comment_of_error_type()' ) ;
+
+ is( undef, comment_of_error_type( ), 'comment_of_error_type: no args => undef' ) ;
+
+ my $mysync = { } ;
+ is( undef, comment_of_error_type( $mysync ), 'comment_of_error_type: undef => undef' ) ;
+
+ is( "", comment_of_error_type( $mysync, '' ), 'comment_of_error_type: "" => ""' ) ;
+ is( "", comment_of_error_type( $mysync, 'blabla' ), 'comment_of_error_type: blabla => ""' ) ;
+
+ is( "", comment_of_error_type( $mysync, 'ERR_UNCLASSIFIED' ), 'comment_of_error_type: ERR_UNCLASSIFIED => ""' ) ;
+
+ like( comment_of_error_type( $mysync, 'ERR_OVERQUOTA' ), qr{100% full}, 'comment_of_error_type: ERR_OVERQUOTA => matches 100% full' ) ;
+
+
+
+ note( 'Leaving tests_comment_of_error_type()' ) ;
+ return ;
+}
+
+sub comment_of_error_type
+{
+ my $mysync = shift @ARG ;
+ my $error_type = shift @ARG ;
+
+ if ( ! defined $mysync ) { return ; }
+ if ( ! defined $error_type ) { return ; }
+
+ my $comment ;
+
+ if ( exists( $COMMENT_OF_ERR_TYPE{ $error_type } ) )
+ {
+ $comment = $COMMENT_OF_ERR_TYPE{ $error_type }->( $mysync ) ;
+ }
+ else
+ {
+ $comment = "" ;
+ }
+ return $comment ;
+}
+
+
+
sub tests_error_type
{
note( 'Entering tests_error_type()' ) ;
@@ -5851,6 +5993,13 @@
'error_type: could not append ... virus => ERR_APPEND_VIRUS'
) ;
+
+ is( 'ERR_FLAGS',
+ error_type( 'Host2: flags msg INBOX/957910 could not add flags [PasGlop \PasGlopRe]: 33 NO Error in IMAP command received by server.' ),
+ 'error_type: could not add flags => ERR_FLAGS'
+ ) ;
+
+
note( 'Leaving tests_error_type()' ) ;
return ;
}
@@ -5902,6 +6051,10 @@
# could not append .*NO header limit reached
if ( $error =~ m{could not append} ) { return 'ERR_APPEND' ; } ;
+ # could not add flags
+ if ( $error =~ m{could not add flags} ) { return 'ERR_FLAGS' ; } ;
+
+
# Could not create folder .*Invalid mailbox name
if ( $error =~ m{Could not create folder} ) { return 'ERR_CREATE' ; } ;
@@ -5996,6 +6149,7 @@
if ( !%{ $errors_counted_ref } ) { return 'ERR_NOTHING_REPORTED' ; }
# non empty hash
+ # in case of equality the winner error is the first in alphabetic order
my $most_common_error = ( sort
{
$errors_counted_ref->{$b} <=> $errors_counted_ref->{$a}
@@ -6084,17 +6238,69 @@
sub errors_listing
{
my $mysync = shift ;
- $mysync->{most_common_error} = errorsanalyse( errors_log( $sync ) ) ;
+ $mysync->{ most_common_error } = errorsanalyse( errors_log( $mysync ) ) ;
- my $errors_listing = join( '',
- "++++ Listing $mysync->{nb_errors} errors encountered during the sync ( avoid this listing with --noerrorsdump ).\n",
- errorsdump( errors_log( $mysync ) ),
- "The most frequent error is $mysync->{most_common_error}\n",
+ my $errors_listing = '' ;
+
+ if ( $mysync->{ errorsdump } )
+ {
+ $errors_listing = join( '',
+ "++++ Listing $mysync->{nb_errors} errors encountered during the sync ( avoid this listing with --noerrorsdump ).\n",
+ errorsdump( errors_log( $mysync ) ),
+ ) ;
+ }
+
+ $errors_listing .= join( '',
+ "The most frequent error is $mysync->{ most_common_error }. ",
+ comment_of_error_type( $mysync, $mysync->{ most_common_error } ),
+ "\n",
) ;
+
return $errors_listing ;
}
+sub errors_incr
+{
+ my ( $mysync, @error ) = @ARG ;
+ $mysync->{ nb_errors }++ ;
+
+ if ( @error ) {
+ errors_log( $mysync, @error ) ;
+ myprint( @error ) ;
+ }
+
+ $mysync->{ errorsmax } ||= $ERRORS_MAX ;
+
+
+ if ( $mysync->{ nb_errors } >= $mysync->{ errorsmax } )
+ {
+ myprint( errorsmax_msg( $mysync ) ) ;
+ myprint( errors_listing( $mysync ) ) ;
+
+ if ( $mysync->{ errorsdump } )
+ {
+ # again since errorsdump( ) can be very verbose and masquerade previous warning
+ myprint( errorsmax_msg( $mysync ) ) ;
+ }
+ my $exit_value = exit_value( $mysync, $mysync->{ most_common_error } ) ;
+ exit_clean( $mysync, $exit_value ) ;
+ }
+ return ;
+}
+
+
+
+sub errorsmax_msg
+{
+ my $mysync = shift @ARG ;
+ my $msg = "Maximum number of errors $mysync->{errorsmax} reached "
+ . "( you can change $mysync->{errorsmax} to any value, for example 100 with --errorsmax 100 ). "
+ . "Exiting.\n" ;
+ return $msg ;
+}
+
+
sub tests_live_result
@@ -6713,17 +6919,21 @@
sub cleanup_before_exit
{
my $mysync = shift ;
+
remove_tmp_files( $mysync ) ;
+
if ( $mysync->{imap1} and $mysync->{imap1}->IsConnected() )
{
myprint( "Disconnecting from host1 $mysync->{ host1 } user1 $mysync->{ user1 }\n" ) ;
$mysync->{imap1}->logout( ) ;
}
+
if ( $mysync->{imap2} and $mysync->{imap2}->IsConnected() )
{
myprint( "Disconnecting from host2 $mysync->{ host2 } user2 $mysync->{ user2 }\n" ) ;
$mysync->{imap2}->logout( ) ;
}
+
if ( $mysync->{log} ) {
myprint( "Log file is $mysync->{logfile} ( to change it, use --logfile filepath ; or use --nolog to turn off logging )\n" ) ;
}
@@ -6736,16 +6946,50 @@
#print( "Closing $mysync->{ logfile }\n" ) ;
teefinish( $mysync ) ;
}
+
return ;
}
+sub tests_exit_value
+{
+ note( 'Entering tests_exit_value()' ) ;
+
+ is( $EXIT_CATCH_ALL, exit_value( ), 'exit_value: no args => EXIT_CATCH_ALL' ) ;
+
+ my $mysync = { } ;
+ is( $EXIT_CATCH_ALL, exit_value( $mysync ), 'exit_value: undef => EXIT_CATCH_ALL' ) ;
+
+ is( $EXIT_CATCH_ALL, exit_value( $mysync, 'Blabla_unknown' ), 'exit_value: Blabla => EXIT_CATCH_ALL' ) ;
+ is( $EXIT_CATCH_ALL, exit_value( $mysync, '' ), 'exit_value: empty => EXIT_CATCH_ALL' ) ;
+
+
+ is( $EXIT_OVERQUOTA, exit_value( $mysync, 'ERR_OVERQUOTA' ), 'exit_value: ERR_OVERQUOTA => EXIT_OVERQUOTA' ) ;
+ is( $EXIT_TRANSFER_EXCEEDED, exit_value( $mysync, 'ERR_TRANSFER_EXCEEDED' ), 'exit_value: ERR_TRANSFER_EXCEEDED => EXIT_TRANSFER_EXCEEDED' ) ;
+
+ note( 'Leaving tests_exit_value()' ) ;
+ return ;
+}
+
+sub exit_value
+{
+ my $mysync = shift @ARG ;
+ my $most_common_error = shift @ARG ;
+
+ if ( ! defined $most_common_error ) { return $EXIT_CATCH_ALL ; }
+ my $exit_value = $EXIT_VALUE_OF_ERR_TYPE{ $most_common_error } || $EXIT_CATCH_ALL ;
+
+ return $exit_value ;
+}
+
+
+
sub exit_most_errors
{
my $mysync = shift @ARG ;
myprint( errors_listing( $mysync ) ) ;
- my $exit_value = $EXIT_VALUE_OF_ERR_TYPE{ $mysync->{most_common_error} } || $EXIT_CATCH_ALL ;
+ my $exit_value = exit_value( $mysync, $mysync->{ most_common_error } ) ;
exit_clean( $mysync, $exit_value ) ;
return ;
}
@@ -7072,8 +7316,9 @@
sub skip_macosx
{
#return ;
- # hostname used to be macosx.polarhome.com
- return( 'macosx' eq hostname( ) && ( 'darwin' eq $OSNAME ) ) ;
+ # hostname is sometimes "macosx.polarhome.com" sometimes "macosx"
+ return( ( ( 'macosx.polarhome.com' eq hostname( ) ) || ( 'macosx' eq hostname( ) ) )
+ && ( 'darwin' eq $OSNAME ) ) ;
}
sub skip_macosx_binary
@@ -7140,7 +7385,7 @@
is( 1, $imap->Debug( 1 ), 'mailimapclient_connect ipv4 + ssl: setting Debug( 1 )' ) ;
# It sounds stupid but it avoids failures on the next test about $imap->connect
- is( '2a01:e34:ecde:70d0:223:54ff:fec2:36d7', resolv( 'petiteipv6.lamiral.info' ), 'resolv: petiteipv6.lamiral.info => 2001:41d0:8:bebd::1' ) ;
+ is( '2a01:e34:ecde:70d0:223:54ff:fec2:36d7', resolv( 'petiteipv6.lamiral.info' ), 'resolv: petiteipv6.lamiral.info => 2a01:e34:ecde:70d0:223:54ff:fec2:36d7' ) ;
like( ref( $imap->connect( ) ), qr/IO::Socket::SSL/, 'mailimapclient_connect ipv6 + ssl: connect to petiteipv6.lamiral.info' ) ;
# This one is ok on petite, not on ks2, do not know why, so commented.
@@ -7340,6 +7585,9 @@
$imap->Debug( $acc->{ debugimap } ) ;
$imap->Timeout( $acc->{ timeout } ) ;
+ #$imap->Keepalive( $acc->{ keepalive } ) ;
+
+
my $side = lc $acc->{ Side } ;
myprint( "$acc->{ Side }: connecting on $side [$host] port [$port]\n" ) ;
@@ -7375,7 +7623,145 @@
return( $imap ) ;
}
+sub tests_compress_ssl
+{
+ note( 'Entering tests_compress_ssl()' ) ;
+ SKIP: {
+ if ( skip_macosx( ) )
+ {
+ skip( 'Tests avoided on host polarhome macosx, no clue "ssl3_get_server_certificate:certificate verify failed"', 12 ) ;
+ }
+ else
+ {
+ my $myimap ;
+ my $acc = {} ;
+ $acc->{ Side } = 'HostK' ;
+ $acc->{ authmech } = 'LOGIN' ;
+ $acc->{ debugimap } = 1 ;
+ $acc->{ compress } = 1 ;
+ $acc->{ N } = 'K' ;
+
+ ok(
+ $myimap = login_imap( 'test1.lamiral.info', 993, 'test1', 'secret1',
+ 1, undef,
+ 1, 100, $acc, {},
+ ), 'acc_compress_imap: test1.lamiral.info test1 ssl' ) ;
+ ok( defined( $myimap ) && $myimap->IsAuthenticated( ), 'acc_compress_imap: test1.lamiral.info test1 ssl IsAuthenticated' ) ;
+
+
+ is( $acc->{ imap }, acc_compress_imap( $acc ), "acc_compress_imap: test1.lamiral.info ok" ) ;
+ is( undef, acc_compress_imap( $acc ), "acc_compress_imap: test1.lamiral.info 2nd nok" ) ;
+
+ ok(
+ $myimap = login_imap( 'test1.lamiral.info', 143, 'test1', 'secret1',
+ 0, undef,
+ 1, 100, $acc, {},
+ ), 'acc_compress_imap: test1.lamiral.info test1 tls' ) ;
+ ok( $myimap && $myimap->IsAuthenticated( ), 'acc_compress_imap: test1.lamiral.info test1 tls IsAuthenticated' ) ;
+
+ is( $acc->{ imap }, acc_compress_imap( $acc ), "acc_compress_imap: test1.lamiral.info tls ok" ) ;
+ is( undef, acc_compress_imap( $acc ), "acc_compress_imap: test1.lamiral.info tls 2nd nok" ) ;
+
+ # Third, no compression
+ $acc->{ compress } = 0 ;
+ ok(
+ $myimap = login_imap( 'test1.lamiral.info', 993, 'test1', 'secret1',
+ 1, undef,
+ 1, 100, $acc, {},
+ ), 'acc_compress_imap: test1.lamiral.info test1 ssl' ) ;
+ ok( defined( $myimap ) && $myimap->IsAuthenticated( ), 'acc_compress_imap: test1.lamiral.info test1 ssl IsAuthenticated' ) ;
+
+
+ is( undef, acc_compress_imap( $acc ), "acc_compress_imap: test1.lamiral.info off ok" ) ;
+ is( undef, acc_compress_imap( $acc ), "acc_compress_imap: test1.lamiral.info 2nd off ok" ) ;
+
+ }
+ }
+ note( 'Leaving tests_compress_ssl()' ) ;
+ return ;
+}
+
+sub tests_compress
+{
+ note( 'Entering tests_compress()' ) ;
+
+ my $myimap ;
+ my $acc = {} ;
+ $acc->{ Side } = 'HostK' ;
+ $acc->{ authmech } = 'LOGIN' ;
+ $acc->{ debugimap } = 1 ;
+ $acc->{ compress } = 1 ;
+ $acc->{ N } = 'K' ;
+
+ ok(
+ $myimap = login_imap( 'test1.lamiral.info', 143, 'test1', 'secret1',
+ 0, 0,
+ 1, 100, $acc, {},
+ ), 'acc_compress_imap: test1.lamiral.info test1' ) ;
+ ok( defined( $myimap ) && $myimap->IsAuthenticated( ), 'acc_compress_imap: test1.lamiral.info test1 IsAuthenticated' ) ;
+
+
+ is( $acc->{ imap }, acc_compress_imap( $acc ), "acc_compress_imap: test1.lamiral.info ok" ) ;
+ is( undef, acc_compress_imap( $acc ), "acc_compress_imap: test1.lamiral.info 2nd nok" ) ;
+
+ ok(
+ $myimap = login_imap( 'test1.lamiral.info', 143, 'test1', 'secret1',
+ 0, 0,
+ 1, 100, $acc, {},
+ ), 'acc_compress_imap: test1.lamiral.info test1 tls' ) ;
+ ok( $myimap && $myimap->IsAuthenticated( ), 'acc_compress_imap: test1.lamiral.info test1 tls IsAuthenticated' ) ;
+
+ is( $acc->{ imap }, acc_compress_imap( $acc ), "acc_compress_imap: test1.lamiral.info tls ok" ) ;
+ is( undef, acc_compress_imap( $acc ), "acc_compress_imap: test1.lamiral.info tls 2nd nok" ) ;
+
+ # Third, no compression
+ $acc->{ compress } = 0 ;
+ ok(
+ $myimap = login_imap( 'test1.lamiral.info', 143, 'test1', 'secret1',
+ 0, 0,
+ 1, 100, $acc, {},
+ ), 'acc_compress_imap: test1.lamiral.info test1 ssl' ) ;
+ ok( defined( $myimap ) && $myimap->IsAuthenticated( ), 'acc_compress_imap: test1.lamiral.info test1 ssl IsAuthenticated' ) ;
+
+
+ is( undef, acc_compress_imap( $acc ), "acc_compress_imap: test1.lamiral.info off ok" ) ;
+ is( undef, acc_compress_imap( $acc ), "acc_compress_imap: test1.lamiral.info 2nd off ok" ) ;
+
+ note( 'Leaving tests_compress()' ) ;
+ return ;
+}
+
+
+sub acc_compress_imap
+{
+ my $acc = shift ;
+
+ if ( ! defined( $acc ) ) { return ; }
+
+ my $ret ;
+ my $imap = $acc->{ imap } ;
+ if ( ! defined $imap ) { return ; }
+
+ if ( $imap && $acc->{ compress } )
+ {
+ myprint( "$acc->{ Side }: Trying to turn imap compression on. Use --nocompress" . $acc->{ N } . " to avoid compression on " . lc( $acc->{ Side } ) . "\n" ) ;
+ if ( $ret = $imap->compress() )
+ {
+ myprint( "$acc->{ Side }: Compression is on now\n" ) ;
+ }
+ else
+ {
+ myprint( "$acc->{ Side }: Failed to turn compression on\n" ) ;
+ }
+ }
+ else
+ {
+ myprint( "$acc->{ Side }: Compression is off. Use --compress" . $acc->{ N } . " to allow compression on " . lc( $acc->{ Side } ) . "\n" ) ;
+ }
+ # $ret is $acc->{ imap } on success, undef on failure or when there is nothing to do.
+ return $ret ;
+}
sub tests_login_imap
{
@@ -7384,12 +7770,12 @@
is( undef, login_imap( ), 'login_imap: no args => undef' ) ;
SKIP: {
- if ( skip_macosx_binary( ) )
+ if ( skip_macosx( ) )
{
- skip( 'Tests avoided only on binary on host polarhome macosx, no clue "ssl3_get_server_certificate:certificate verify failed"', 11 ) ;
+ skip( 'Tests avoided only on binary on host polarhome macosx, no clue "ssl3_get_server_certificate:certificate verify failed"', 15 ) ;
}
else{
-
+
my $myimap ;
my $acc = {} ;
$acc->{ Side } = 'HostK' ;
@@ -7399,8 +7785,14 @@
# echo | openssl s_client -crlf -connect test1.lamiral.info:993
# ...
# certificate has expired
- # Fix:
- # ssh root@test1.lamiral.info 'apt update && apt upgrade && /etc/init.d/dovecot restart'
+ # Fix: ssh root@test1.lamiral.info 'apt update && apt upgrade && /etc/init.d/dovecot restart'
+ #
+ # or
+ # echo | openssl s_client -crlf -connect test1.lamiral.info:993
+ # ...
+ # Verify return code: 9 (certificate is not yet valid)
+ # Fix: /etc/init.d/openntpd restart
+ # 2021_09_04 done
ok(
$myimap = login_imap( 'test1.lamiral.info', 993, 'test1', 'secret1',
1, undef,
@@ -7408,12 +7800,15 @@
), 'login_imap: test1.lamiral.info test1 ssl' ) ;
ok( defined( $myimap ) && $myimap->IsAuthenticated( ), 'login_imap: test1.lamiral.info test1 ssl IsAuthenticated' ) ;
+ is( $myimap, $acc->{ imap }, "login_imap: acc->{ imap } ok test1 ssl") ;
+
ok(
$myimap = login_imap( 'test1.lamiral.info', 143, 'test1', 'secret1',
0, undef,
1, 100, $acc, {},
), 'login_imap: test1.lamiral.info test1 tls' ) ;
ok( $myimap && $myimap->IsAuthenticated( ), 'login_imap: test1.lamiral.info test1 tls IsAuthenticated' ) ;
+ is( $myimap, $acc->{ imap }, "login_imap: acc->{ imap } ok test1 tls") ;
#$IO::Socket::SSL::DEBUG = 4 ;
$acc->{sslargs} = { SSL_version => 'SSLv2' } ;
@@ -7425,7 +7820,7 @@
), 'login_imap: test1.lamiral.info test1 tls SSLv2 not supported' ) ;
#SSL_verify_mode => 1
#SSL_version => 'TLSv1_1'
-
+ is( undef, $acc->{ imap }, "login_imap: acc->{ imap } test1 tls error => undef") ;
# I have left ? exit_clean to be replaced by errors_incr( $mysync, 'error message' )
@@ -7443,6 +7838,7 @@
), 'login_imap: noresol.lamiral.info undef' ) ;
is( 'ERR_CONNECTION_FAILURE_HOST2', errorsanalyse( errors_log( $mysync ) ), 'login_imap: Host2 noresol.lamiral.info => ERR_CONNECTION_FAILURE_HOST2' ) ;
+ is( undef, $acc->{ imap }, "login_imap: acc->{ imap } noresol error => undef") ;
# authentication failure for user2
$mysync = {} ;
@@ -7469,7 +7865,7 @@
}
}
-
+
note( 'Leaving tests_login_imap()' ) ;
return ;
}
@@ -7488,7 +7884,7 @@
sub tests_login_imap_oauth
{
note( 'Entering tests_login_imap_oauth()' ) ;
-
+
oauthgenerateaccess() ;
SKIP: {
@@ -7552,54 +7948,65 @@
}
}
- # oauthdirect authentication success for user1
+ # oauthdirect authentication success for user1
SKIP: {
- if ( ! -r 'oauth2/D_oauth2_oauthdirect_imapsync.gl0@gmail.com.txt' )
+ if ( ! -r 'oauth2/D_oauth2_oauthdirect_imapsync.gl0@gmail.com.txt' )
{
- skip( 'oauthdirect: no oauthdirect file', 2 ) ;
+ skip( 'oauthdirect: no oauthdirect file', 6 ) ;
}
my $myimap ;
- my $mysync = {} ;
- my $acc = {} ;
- $acc->{ Side } = 'Host1' ;
- $acc->{ oauthdirect } = 'oauth2/D_oauth2_oauthdirect_imapsync.gl0@gmail.com.txt' ;
- $acc->{ debugimap } = 1 ;
- $mysync->{ showpasswords } = 1 ;
- $acc->{ authmech } = 'QQQ' ;
- isa_ok(
- $myimap = login_imap( 'imap.gmail.com', 993, 'user_useless', 'password_useless',
- 1, undef,
- 1, 100, $acc, $mysync,
- ), 'Mail::IMAPClient', 'login_imap: user1 good oauthdirect => Mail::IMAPClient' ) ;
+ my $mysync = {} ;
+ my $acc = {} ;
+ $acc->{ Side } = 'Host1' ;
+ $acc->{ oauthdirect } = 'oauth2/D_oauth2_oauthdirect_imapsync.gl0@gmail.com.txt' ;
+ $acc->{ debugimap } = 1 ;
+ $mysync->{ showpasswords } = 1 ;
+ $acc->{ authmech } = 'QQQ' ;
+ isa_ok(
+ $myimap = login_imap( 'imap.gmail.com', 993, 'user_useless', 'password_useless',
+ 1, undef,
+ 1, 100, $acc, $mysync,
+ ), 'Mail::IMAPClient', 'login_imap: user1 good oauthdirect => Mail::IMAPClient' ) ;
- ok( defined( $myimap ) && $myimap->IsAuthenticated( ), 'login_imap: gmail oauth2 IsAuthenticated' ) ;
+ ok( defined( $myimap ) && $myimap->IsAuthenticated( ), 'login_imap: gmail oauth2 oauthdirect IsAuthenticated' ) ;
+
+ ok( defined( $myimap ) && $myimap->logout( ), 'login_imap: gmail oauth2 oauthdirect logout' ) ;
+ ok( defined( $myimap ) && ! $myimap->IsAuthenticated( ), 'login_imap: gmail oauth2 oauthdirect not IsAuthenticated after logout' ) ;
+ ok( defined( $myimap ) && $myimap->reconnect( ), 'login_imap: gmail oauth2 oauthdirect reconnect ok' ) ;
+ ok( defined( $myimap ) && $myimap->IsAuthenticated( ), 'login_imap: gmail oauth2 oauthdirect IsAuthenticated after reconnect' ) ;
}
-
- # oauthaccesstoken authentication success for user1
+
+
+
+ # oauthaccesstoken authentication success for user1
SKIP: {
- if ( ! -r 'oauth2/D_oauth2_access_token_imapsync.gl0@gmail.com.txt' )
+ if ( ! -r 'oauth2/D_oauth2_access_token_imapsync.gl0@gmail.com.txt' )
{
- skip( 'oauthaccesstoken: no access_token file', 2 ) ;
+ skip( 'oauthaccesstoken: no access_token file', 6 ) ;
}
my $myimap ;
- my $mysync = {} ;
- my $acc = {} ;
- $acc->{ Side } = 'Host1' ;
- $acc->{ oauthaccesstoken } = 'oauth2/D_oauth2_access_token_imapsync.gl0@gmail.com.txt' ;
- $acc->{ debugimap } = 1 ;
- $mysync->{ showpasswords } = 1 ;
- $acc->{ authmech } = 'QQQ' ;
- isa_ok(
- $myimap = login_imap( 'imap.gmail.com', 993, 'imapsync.gl0@gmail.com', 'password_useless',
- 1, undef,
- 1, 100, $acc, $mysync,
- ), 'Mail::IMAPClient', 'login_imap: user1 good oauthaccesstoken => Mail::IMAPClient' ) ;
+ my $mysync = {} ;
+ my $acc = {} ;
+ $acc->{ Side } = 'Host1' ;
+ $acc->{ oauthaccesstoken } = 'oauth2/D_oauth2_access_token_imapsync.gl0@gmail.com.txt' ;
+ $acc->{ debugimap } = 1 ;
+ $mysync->{ showpasswords } = 1 ;
+ $acc->{ authmech } = 'QQQ' ;
+ isa_ok(
+ $myimap = login_imap( 'imap.gmail.com', 993, 'imapsync.gl0@gmail.com', 'password_useless',
+ 1, undef,
+ 1, 100, $acc, $mysync,
+ ), 'Mail::IMAPClient', 'login_imap: user1 good oauthaccesstoken => Mail::IMAPClient' ) ;
- ok( defined( $myimap ) && $myimap->IsAuthenticated( ), 'login_imap: gmail oauth2 oauthaccesstoken IsAuthenticated' ) ;
+ ok( defined( $myimap ) && $myimap->IsAuthenticated( ), 'login_imap: gmail oauth2 oauthaccesstoken IsAuthenticated' ) ;
+ ok( defined( $myimap ) && $myimap->logout( ), 'login_imap: gmail oauth2 oauthaccesstoken logout' ) ;
+ ok( defined( $myimap ) && ! $myimap->IsAuthenticated( ), 'login_imap: gmail oauth2 oauthaccesstoken not IsAuthenticated after logout' ) ;
+ ok( defined( $myimap ) && $myimap->reconnect( ), 'login_imap: gmail oauth2 oauthaccesstoken reconnect ok' ) ;
+ ok( defined( $myimap ) && $myimap->IsAuthenticated( ), 'login_imap: gmail oauth2 oauthaccesstoken IsAuthenticated after reconnect' ) ;
}
-
-
+
+
note( 'Leaving tests_login_imap_oauth()' ) ;
return ;
}
@@ -7614,6 +8021,8 @@
$ssl, $tls,
$uid, $split, $acc, $mysync ) = @allargs ;
+ $acc->{ imap } = undef ;
+
if ( ! all_defined( $host, $port, $user, $acc->{ Side } ) )
{
return ;
@@ -7676,6 +8085,7 @@
if ( authenticate_imap( $imap, @allargs ) )
{
myprint( "$acc->{ Side }: success login on [$host] with user [$user] auth [$acc->{ authmech }] or [LOGIN]\n" ) ;
+ $acc->{ imap } = $imap ;
return( $imap ) ;
}
else
@@ -7715,19 +8125,27 @@
$imap->Buffer( $buffersize || $DEFAULT_BUFFER_SIZE ) ;
$imap->Uid( $uid ) ;
-
$imap->Peek( 1 ) ;
$imap->Debug( $acc->{ debugimap } ) ;
if ( $mysync->{ showpasswords } ) {
$imap->Showcredentials( 1 ) ;
}
- defined $acc->{ timeout } and $imap->Timeout( $acc->{ timeout } ) ;
+ if ( defined( $acc->{ timeout } ) )
+ {
+ $imap->Timeout( $acc->{ timeout } ) ;
+ }
+
+ if ( defined $acc->{ keepalive } )
+ {
+ $imap->Keepalive( $acc->{ keepalive } ) ;
+ }
if ( defined $acc->{ reconnectretry } )
{
$imap->Reconnectretry( $acc->{ reconnectretry } ) ;
}
+
$imap->{IMAPSYNC_RECONNECT_COUNT} = 0 ;
$imap->Ignoresizeerrors( $allowsizemismatch ) ;
$split and $imap->Maxcommandlength( $SPLIT_FACTOR * $split ) ;
@@ -7768,15 +8186,15 @@
$acc->{ authmech } = 'XOAUTH2 direct' ;
return( oauthdirect( $mysync, $acc, $imap, $host, $user ) ) ;
}
-
-
+
+
if ( defined $acc->{ oauthaccesstoken } )
{
$acc->{ authmech } = 'XOAUTH2 accesstoken' ;
return( oauthaccesstoken( $mysync, $acc, $imap, $host, $user ) ) ;
}
-
-
+
+
if ( $acc->{ proxyauth } ) {
@@ -7877,7 +8295,12 @@
{
$oauthdirect_str = $acc->{ oauthdirect } || 'Please define oauthdirect value' ;
}
- if ( $imap->authenticate('XOAUTH2', sub { return $oauthdirect_str } ) )
+
+ $imap->Authmechanism( 'XOAUTH2' ) ;
+ $imap->Authcallback( sub { return $oauthdirect_str } ) ;
+
+ #if ( $imap->authenticate('XOAUTH2', sub { return $oauthdirect_str } ) )
+ if ( $imap->login( ) )
{
return 1 ;
}
@@ -7905,7 +8328,7 @@
{
$oauthaccesstoken_str = $acc->{ oauthaccesstoken } || 'Please define oauthaccesstoken value' ;
}
-
+
my $oauth_string = "user=" . $user . "\x01auth=Bearer ". $oauthaccesstoken_str . "\x01\x01" ;
#myprint "oauth_string: $oauth_string\n" ;
@@ -7913,8 +8336,12 @@
#myprint "oauth_string_base64: $oauth_string_base64\n" ;
my $oauthdirect_str = $oauth_string_base64 ;
-
- if ( $imap->authenticate('XOAUTH2', sub { return $oauthdirect_str } ) )
+
+ $imap->Authmechanism( 'XOAUTH2' ) ;
+ $imap->Authcallback( sub { return $oauthdirect_str } ) ;
+
+ #if ( $imap->authenticate('XOAUTH2', sub { return $oauthdirect_str } ) )
+ if ( $imap->login( ) )
{
return 1 ;
}
@@ -8027,7 +8454,7 @@
my $imap = shift;
my $string = mysprintf("%s\x00%s\x00%s", $imap->User,
- $imap->Authuser, $imap->Password);
+ defined $imap->Authuser ? $imap->Authuser : "", $imap->Password);
return encode_base64("$string", q{});
}
@@ -8203,6 +8630,38 @@
return ;
}
+sub keepalive1
+{
+ my $mysync = shift ;
+
+ $mysync->{ acc1 }->{ keepalive } = defined $mysync->{ acc1 }->{ keepalive } ? $mysync->{ acc1 }->{ keepalive } : 1 ;
+
+ if ( $mysync->{ acc1 }->{ keepalive } )
+ {
+ myprint( "Host1: imap connection keepalive is on on host1. Use --nokeepalive1 to disable it.\n" ) ;
+ }
+ else
+ {
+ myprint( "Host1: imap connection keepalive is off on host1. Use --keepalive1 to enable it.\n" ) ;
+ }
+}
+
+sub keepalive2
+{
+ my $mysync = shift ;
+
+ $mysync->{ acc2 }->{ keepalive } = defined $mysync->{ acc2 }->{ keepalive } ? $mysync->{ acc2 }->{ keepalive } : 1 ;
+
+ if ( $mysync->{ acc2 }->{ keepalive } )
+ {
+ myprint( "Host2: imap connection keepalive is on on host2. Use --nokeepalive2 to disable it.\n" ) ;
+ }
+ else
+ {
+ myprint( "Host2: imap connection keepalive is off on host2. Use --keepalive2 to enable it.\n" ) ;
+ }
+}
+
sub banner_imapsync
@@ -8212,8 +8671,8 @@
my $banner_imapsync = join q{},
q{$RCSfile: imapsync,v $ },
- q{$Revision: 2.148 $ },
- q{$Date: 2021/07/22 14:21:09 $ },
+ q{$Revision: 2.178 $ },
+ q{$Date: 2022/01/12 21:28:37 $ },
"\n",
"Command line used, run by $EXECUTABLE_NAME:\n",
"$PROGRAM_NAME ", command_line_nopassword( $mysync, @argv ), "\n" ;
@@ -8342,16 +8801,16 @@
{
#
my $pid_filename = shift @ARG ;
-
+
#myprint( "In remove_pidfile_not_running $pid_filename\n" ) ;
if ( ! $pid_filename ) { myprint( "No variable pid_filename\n" ) ; return } ;
if ( ! -e $pid_filename )
- {
+ {
myprint( "File $pid_filename does not exist\n" ) ;
return ;
}
#myprint( "Still In remove_pidfile_not_running $pid_filename\n" ) ;
-
+
if ( ! -f $pid_filename ) { myprint( "File $pid_filename is not a file\n" ) ; return } ;
my $pid = firstline( $pid_filename ) ;
@@ -8437,7 +8896,7 @@
if ( ! $tail ) { return ; }
if ( ! -e $pidfile ) { return ; }
-
+
my $pidtotail = firstline( $pidfile ) ;
if ( ! $pidtotail ) { return ; }
@@ -8550,7 +9009,7 @@
{
# returns undef if something is considered fatal
# returns 1 otherwise
-
+
#myprint( "In write_pidfile\n" ) ;
if ( ! @ARG ) { return 1 ; }
@@ -8912,6 +9371,7 @@
return( create_folder_old( $mysync, $myimap2 , $h2_fold , $h1_fold ) ) ;
}
+ # $imap->exists() calls $imap->status() that does an IMAP STATUS folder
myprint( "Creating folder [$h2_fold] on host2\n" ) ;
if ( ( 'INBOX' eq uc $h2_fold )
and ( $myimap2->exists( $h2_fold ) ) ) {
@@ -8944,9 +9404,12 @@
"Could not create folder [$h2_fold] from [$h1_fold]: " ,
$myimap2->LastError( ), "\n" ;
errors_incr( $mysync, $error ) ;
- # success if folder exists ("already exists" error)
- return( 1 ) if $myimap2->exists( $h2_fold ) ;
- # failure since create failed
+ # success if folder exists ("already exists" error) or selectable
+ if ( $myimap2->exists( $h2_fold ) or select_folder( $mysync, $myimap2, $h2_fold, 'Host2' ) )
+ {
+ return( 1 ) ;
+ }
+ # failure since create failed + not exist + not selectable
return( 0 ) ;
}else{
#create succeeded
@@ -10119,8 +10582,8 @@
{
note( 'Entering tests_regexflags()' ) ;
- my $mysync = {} ;
-
+ my $mysync = {} ;
+
ok( q{} eq regexflags( $mysync, q{} ), 'regexflags, null string q{}' ) ;
ok( q{\Seen NonJunk $Spam} eq regexflags( $mysync, q{\Seen NonJunk $Spam} ), q{regexflags, nothing to do} ) ;
@@ -10214,14 +10677,14 @@
$mysync->{ regexflag } = [ ] ;
$mysync->{ filterbuggyflags } = 1 ;
filterbuggyflags( $mysync ) ;
-
+
is( '\Deleted \Answered \Draft \Flagged',
regexflags( $mysync, '\\Deleted \\Answered \\RECEIPTCHECKED \\Draft \\Indexed \\Flagged' ),
'regexflags: remove famous /X 1' ) ;
is( '\\Deleted \\Flagged \\Answered \\Draft',
regexflags( $mysync, '\\Deleted \\RECEIPTCHECKED \\Flagged \\Answered \\Indexed \\Draft' ),
- 'regexflags: remove famous /X 2' ) ;
+ 'regexflags: remove famous /X 2' ) ;
is( '\ ', '\\ ', 'regexflags: \ is \\ ' ) ;
is( '\\ ', '\\ ', 'regexflags: \\ is \\ ' ) ;
@@ -10234,7 +10697,7 @@
{
my $mysync = shift ;
my $flags = shift ;
-
+
foreach my $regexflag ( @{ $mysync->{ regexflag } } )
{
my $flags_orig = $flags ;
@@ -10347,18 +10810,21 @@
{
note( 'Entering tests_permanentflags()' ) ;
- my $string;
- ok(q{} eq permanentflags(' * OK [PERMANENTFLAGS (\* \Draft \Answered)] Limited'),
- 'permanentflags \*');
- ok('\Draft \Answered' eq permanentflags(' * OK [PERMANENTFLAGS (\Draft \Answered)] Limited'),
- 'permanentflags \Draft \Answered');
- ok('\Draft \Answered'
- eq permanentflags('Blabla',
+ my $mysync = { } ;
+ ok( q{} eq permanentflags( $mysync, ' * OK [PERMANENTFLAGS (\* \Draft \Answered)] Limited' ),
+ 'permanentflags \*' ) ;
+
+ ok( '\Draft \Answered' eq permanentflags( $mysync, ' * OK [PERMANENTFLAGS (\Draft \Answered)] Limited' ),
+ 'permanentflags \Draft \Answered' ) ;
+
+ ok( '\Draft \Answered'
+ eq permanentflags( $mysync, 'Blabla',
' * OK [PERMANENTFLAGS (\Draft \Answered)] Limited',
- 'Blabla'),
+ 'Blabla' ),
'permanentflags \Draft \Answered'
- );
- ok(q{} eq permanentflags('Blabla'), 'permanentflags nothing');
+ ) ;
+
+ ok( q{} eq permanentflags( $mysync, 'Blabla' ), 'permanentflags nothing' ) ;
note( 'Leaving tests_permanentflags()' ) ;
return ;
@@ -10366,7 +10832,9 @@
sub permanentflags
{
- my @lines = @_ ;
+ my $mysync = shift ;
+
+ my @lines = @_ ;
foreach my $line (@lines) {
if ( $line =~ m{\[PERMANENTFLAGS\s\(([^)]+?)\)\]}x ) {
@@ -10449,37 +10917,37 @@
note( 'Entering tests_flags_for_host2()' ) ;
is( undef, flags_for_host2( ), 'flags_for_host2: no args => undef' ) ;
-
+
my $mysync ;
is( undef, flags_for_host2( $mysync ), 'flags_for_host2: undef => undef' ) ;
-
+
$mysync = { } ;
is( undef, flags_for_host2( $mysync ), 'flags_for_host2: nothing => undef' ) ;
-
+
is( q{}, flags_for_host2( $mysync, '' ), 'flags_for_host2: no flags => empty string' ) ;
-
+
is( q{}, flags_for_host2( $mysync, '\Recent' ), 'flags_for_host2: \Recent => empty string' ) ;
-
+
is( q{\Seen}, flags_for_host2( $mysync, '\Recent \Seen' ), 'flags_for_host2: \Recent \Seen => \Seen' ) ;
-
+
is( q{\Deleted \Seen}, flags_for_host2( $mysync, '\Deleted \Recent \Seen' ), 'flags_for_host2: \Deleted \Recent \Seen => \Deleted \Seen' ) ;
-
+
$mysync->{ flagscase } = 0 ;
is( q{\DELETED \Seen}, flags_for_host2( $mysync, '\DELETED \Seen' ), 'flags_for_host2: flagscase = 0 \DELETED \Seen => \DELETED \Seen' ) ;
-
+
$mysync->{ flagscase } = 1 ;
is( q{\Deleted \Seen}, flags_for_host2( $mysync, '\DELETED \Seen' ), 'flags_for_host2: flagscase = 1 \DELETED \Seen => \Deleted \Seen' ) ;
-
+
$mysync->{ filterflags } = 0 ;
is( q{\Seen \Blabla}, flags_for_host2( $mysync, '\Seen \Blabla', '\Seen \Junk' ), 'flags_for_host2: filterflags = 0 \Seen \Blabla among \Seen \Junk => \Seen \Blabla' ) ;
-
+
$mysync->{ filterflags } = 1 ;
is( q{\Seen}, flags_for_host2( $mysync, '\Seen \Blabla', '\Seen \Junk' ), 'flags_for_host2: filterflags = 1 \Seen \Blabla among \Seen \Junk => \Seen' ) ;
-
+
$mysync->{ filterflags } = 1 ;
is( q{\Seen \Blabla}, flags_for_host2( $mysync, '\Seen \Blabla', '' ), 'flags_for_host2: filterflags = 1 \Seen \Blabla among "" => \Seen \Blabla' ) ;
-
-
+
+
note( 'Leaving tests_flags_for_host2()' ) ;
return ;
}
@@ -10492,27 +10960,27 @@
my $mysync = shift ;
my $h1_flags = shift ;
my $permanentflags2 = shift ;
-
- if ( ! all_defined( $mysync, $h1_flags ) ) { return ; } ;
-
+
+ if ( ! all_defined( $mysync, $h1_flags ) ) { return ; } ;
+
# RFC 2060: This flag can not be altered by any client
$h1_flags =~ s@\\Recent\s?@@xgi ;
-
+
my $h1_flags_re ;
if ( $mysync->{ regexflag } and defined( $h1_flags_re = regexflags( $mysync, $h1_flags ) ) ) {
$h1_flags = $h1_flags_re ;
}
-
+
if ( $mysync->{ flagscase } )
{
$h1_flags = flagscase( $h1_flags ) ;
}
-
+
if ( $permanentflags2 and $mysync->{ filterflags } )
{
$h1_flags = flags_filter( $h1_flags, $permanentflags2 ) ;
}
-
+
return( $h1_flags ) ;
}
@@ -11052,17 +11520,59 @@
$string_len = length_ref( $string_ref ) ;
- $debugcontent and myprint(
- q{=} x $STD_CHAR_PER_LINE, "\n",
- "F message content begin next line ($string_len characters long)\n",
- ${ $string_ref },
- "\nF message content ended on previous line\n", q{=} x $STD_CHAR_PER_LINE, "\n" ) ;
+ $mysync->{ debugcontent } and myprint( debugcontent( $mysync, $string_ref ) ) ;
myprint( debugmemory( $mysync, " at M3" ) ) ;
return $string_len ;
}
+sub tests_debugcontent
+{
+ note( 'Entering tests_debugcontent()' ) ;
+
+ is( undef, debugcontent( ), 'debugcontent: no args => undef' ) ;
+ my $mysync = { } ;
+ is( undef, debugcontent( $mysync ), 'debugcontent: undef => undef' ) ;
+ is( undef, debugcontent( $mysync, 'mm' ), 'debugcontent: undef, mm => undef' ) ;
+ #my $string_ref = \'zztop' ;
+ my $string = '================================================================================
+F message content begin next line (2 characters long)
+mm
+F message content ended on previous line
+================================================================================
+' ;
+ is( $string, debugcontent( $mysync, \'mm' ), 'debugcontent: undef, mm => mm' ) ;
+
+ note( 'Leaving tests_debugcontent()' ) ;
+ return ;
+}
+
+sub debugcontent
+{
+ my $mysync = shift @ARG ;
+ if ( ! defined $mysync ) { return ; }
+
+ my $string_ref = shift @ARG ;
+ if ( ! defined $string_ref ) { return ; }
+ if ( 'SCALAR' ne ref( $string_ref ) ) { return ; }
+
+ my $string_len = length_ref( $string_ref ) ;
+
+ my $string = join( '',
+ q{=} x $STD_CHAR_PER_LINE, "\n",
+ "F message content begin next line ($string_len characters long)\n",
+ ${ $string_ref },
+ "\nF message content ended on previous line\n", q{=} x $STD_CHAR_PER_LINE, "\n",
+ ) ;
+
+ return $string ;
+}
+
+
+
+
+
sub tests_truncmess
{
note( 'Entering tests_truncmess()' ) ;
@@ -11809,9 +12319,9 @@
$mysync->{ biggest_message_transferred } = max( $string_len, $mysync->{ biggest_message_transferred } ) ;
my $time_spent = timesince( $mysync->{begin_transfer_time} ) ;
- my $rate = bytes_display_string( $mysync->{total_bytes_transferred} / $time_spent ) ;
+ my $rate = bytes_display_string_bin( $mysync->{total_bytes_transferred} / $time_spent ) ;
my $eta = eta( $mysync ) ;
- my $amount_transferred = bytes_display_string( $mysync->{total_bytes_transferred} ) ;
+ my $amount_transferred = bytes_display_string_bin( $mysync->{total_bytes_transferred} ) ;
myprintf( "msg %s/%-19s copied to %s/%-10s %.2f msgs/s %s/s %s copied %s\n",
$h1_fold, "$h1_msg {$string_len}", $h2_fold, $new_id, $mysync->{nb_msg_transferred}/$time_spent, $rate,
$amount_transferred,
@@ -14052,42 +14562,72 @@
-sub tests_bytes_display_string
+sub tests_bytes_display_string_bin
{
- note( 'Entering tests_bytes_display_string()' ) ;
+ note( 'Entering tests_bytes_display_string_bin()' ) ;
+
+ is( 'NA', bytes_display_string_bin( ), 'bytes_display_string_bin: no args => NA' ) ;
+ is( 'NA', bytes_display_string_bin( undef ), 'bytes_display_string_bin: undef => NA' ) ;
+ is( 'NA', bytes_display_string_bin( 'blabla' ), 'bytes_display_string_bin: blabla => NA' ) ;
+
+ is( '0.000 KiB', bytes_display_string_bin( 0 ), 'bytes_display_string_bin: 0 => 0.000 KiB' ) ;
+ is( '0.001 KiB', bytes_display_string_bin( 1 ), 'bytes_display_string_bin: 1 => 0.001 KiB' ) ;
+ is( '0.010 KiB', bytes_display_string_bin( 10 ), 'bytes_display_string_bin: 10 => 0.010 KiB' ) ;
+ is( '0.976 KiB', bytes_display_string_bin( 999 ), 'bytes_display_string_bin: 999 => 0.976 KiB' ) ;
+ note( bytes_display_string_bin( 999 ) ) ;
+
+ is( '0.999 KiB', bytes_display_string_bin( 1023 ), 'bytes_display_string_bin: 1023 => 0.999 KiB' ) ;
+ note( bytes_display_string_bin( 1023 ) ) ;
+ is( '1.000 KiB', bytes_display_string_bin( 1024 ), 'bytes_display_string_bin: 1024 => 1.000 KiB' ) ;
+ note( bytes_display_string_bin( 1024 ) ) ;
+ is( '1.001 KiB', bytes_display_string_bin( 1025 ), 'bytes_display_string_bin: 1025 => 1.001 KiB' ) ;
+
+ is( '9.999 KiB', bytes_display_string_bin( 10_239 ), 'bytes_display_string_bin: 10_239 => 9.999 KiB' ) ;
+ note( bytes_display_string_bin( 10_239 ) ) ;
+
+ is( '10.000 KiB', bytes_display_string_bin( 10_240 ), 'bytes_display_string_bin: 10_240 => 10.000 KiB' ) ;
+ note( bytes_display_string_bin( 10_240 ) ) ;
+
+ is( '999.999 KiB', bytes_display_string_bin( 1_023_999 ), 'bytes_display_string_bin: 1_023_999 => 999.999 KiB' ) ;
+ note( bytes_display_string_bin( 1_023_999 ) ) ;
+
+ is( '0.977 MiB', bytes_display_string_bin( 1_024_000 ), 'bytes_display_string_bin: 1_024_000 => 0.977 MiB' ) ;
+ note( bytes_display_string_bin( 1_024_000 ) ) ;
+
+ is( '0.999 MiB', bytes_display_string_bin( 1_047_527 ), 'bytes_display_string_bin: 1_047_527 => 0.999 MiB' ) ;
+ note( bytes_display_string_bin( 1_047_527 ) ) ;
+
+ is( '0.999 MiB', bytes_display_string_bin( 1_048_051 ), 'bytes_display_string_bin: 1_048_051 => 0.999 MiB' ) ;
+ note( bytes_display_string_bin( 1_048_051 ) ) ;
+
+ is( '1.000 MiB', bytes_display_string_bin( 1_048_052 ), 'bytes_display_string_bin: 1_048_052 => 1.000 MiB' ) ;
+ note( bytes_display_string_bin( 1_048_052 ) ) ;
+
+ is( '1.000 MiB', bytes_display_string_bin( 1_048_575 ), 'bytes_display_string_bin: 1_048_575 => 1.000 MiB' ) ;
+ is( '1.000 MiB', bytes_display_string_bin( 1_048_576 ), 'bytes_display_string_bin: 1_048_576 => 1.000 MiB' ) ;
+
+ is( '1.000 GiB', bytes_display_string_bin( 1_073_741_823 ), 'bytes_display_string_bin: 1_073_741_823 => 1.000 GiB' ) ;
+ is( '1.000 GiB', bytes_display_string_bin( 1_073_741_824 ), 'bytes_display_string_bin: 1_073_741_824 => 1.000 GiB' ) ;
- is( 'NA', bytes_display_string( ), 'bytes_display_string: no args => NA' ) ;
- is( 'NA', bytes_display_string( undef ), 'bytes_display_string: undef => NA' ) ;
- is( 'NA', bytes_display_string( 'blabla' ), 'bytes_display_string: blabla => NA' ) ;
+ is( '1.000 TiB', bytes_display_string_bin( 1_099_511_627_775 ), 'bytes_display_string_bin: 1_099_511_627_775 => 1.000 TiB' ) ;
+ is( '1.000 TiB', bytes_display_string_bin( 1_099_511_627_776 ), 'bytes_display_string_bin: 1_099_511_627_776 => 1.000 TiB' ) ;
- is( '0.000 KiB', bytes_display_string( 0 ), 'bytes_display_string: 0' ) ;
- is( '0.001 KiB', bytes_display_string( 1 ), 'bytes_display_string: 1' ) ;
- is( '0.010 KiB', bytes_display_string( 10 ), 'bytes_display_string: 10' ) ;
- is( '1.000 MiB', bytes_display_string( 1_048_575 ), 'bytes_display_string: 1_048_575' ) ;
- is( '1.000 MiB', bytes_display_string( 1_048_576 ), 'bytes_display_string: 1_048_576' ) ;
+ is( '1.000 PiB', bytes_display_string_bin( 1_125_899_906_842_623 ), 'bytes_display_string_bin: 1_125_899_906_842_623 => 1.000 PiB' ) ;
+ is( '1.000 PiB', bytes_display_string_bin( 1_125_899_906_842_624 ), 'bytes_display_string_bin: 1_125_899_906_842_624 => 1.000 PiB' ) ;
- is( '1.000 GiB', bytes_display_string( 1_073_741_823 ), 'bytes_display_string: 1_073_741_823 ' ) ;
- is( '1.000 GiB', bytes_display_string( 1_073_741_824 ), 'bytes_display_string: 1_073_741_824 ' ) ;
+ is( '1024.000 PiB', bytes_display_string_bin( 1_152_921_504_606_846_975 ), 'bytes_display_string_bin: 1_152_921_504_606_846_975 => 1024.000 PiB' ) ;
+ is( '1024.000 PiB', bytes_display_string_bin( 1_152_921_504_606_846_976 ), 'bytes_display_string_bin: 1_152_921_504_606_846_976 => 1024.000 PiB' ) ;
- is( '1.000 TiB', bytes_display_string( 1_099_511_627_775 ), 'bytes_display_string: 1_099_511_627_775' ) ;
- is( '1.000 TiB', bytes_display_string( 1_099_511_627_776 ), 'bytes_display_string: 1_099_511_627_776' ) ;
-
- is( '1.000 PiB', bytes_display_string( 1_125_899_906_842_623 ), 'bytes_display_string: 1_125_899_906_842_623' ) ;
- is( '1.000 PiB', bytes_display_string( 1_125_899_906_842_624 ), 'bytes_display_string: 1_125_899_906_842_624' ) ;
-
- is( '1024.000 PiB', bytes_display_string( 1_152_921_504_606_846_975 ), 'bytes_display_string: 1_152_921_504_606_846_975' ) ;
- is( '1024.000 PiB', bytes_display_string( 1_152_921_504_606_846_976 ), 'bytes_display_string: 1_152_921_504_606_846_976' ) ;
-
- is( '1048576.000 PiB', bytes_display_string( 1_180_591_620_717_411_303_424 ), 'bytes_display_string: 1_180_591_620_717_411_303_424' ) ;
-
- #myprint( bytes_display_string( 1_180_591_620_717_411_303_424 ), "\n" ) ;
- note( 'Leaving tests_bytes_display_string()' ) ;
+ is( '1048576.000 PiB', bytes_display_string_bin( 1_180_591_620_717_411_303_424 ), 'bytes_display_string_bin: 1_180_591_620_717_411_303_424 => 1048576.000 PiB' ) ;
+ note( bytes_display_string_bin( 1_180_591_620_717_411_303_424 ) ) ;
+ note( bytes_display_string_bin( 3_000_000_000 ) ) ;
+ note( 'Leaving tests_bytes_display_string_bin()' ) ;
return ;
}
-sub bytes_display_string
+sub bytes_display_string_bin
{
my ( $bytes ) = @_ ;
@@ -14128,6 +14668,95 @@
return( $readable_value ) ;
}
+sub tests_bytes_display_string_dec
+{
+ note( 'Entering tests_bytes_display_string_dec()' ) ;
+
+ is( 'NA', bytes_display_string_dec( ), 'bytes_display_string_dec: no args => NA' ) ;
+ is( 'NA', bytes_display_string_dec( undef ), 'bytes_display_string_dec: undef => NA' ) ;
+ is( 'NA', bytes_display_string_dec( 'blabla' ), 'bytes_display_string_dec: blabla => NA' ) ;
+
+ is( '0 bytes', bytes_display_string_dec( 0 ), 'bytes_display_string_dec: 0 => 0 bytes' ) ;
+ is( '1 bytes', bytes_display_string_dec( 1 ), 'bytes_display_string_dec: 1 => 1 bytes' ) ;
+ is( '10 bytes', bytes_display_string_dec( 10 ), 'bytes_display_string_dec: 10 => 10 bytes' ) ;
+ is( '999 bytes', bytes_display_string_dec( 999 ), 'bytes_display_string_dec: 999 => 999 bytes' ) ;
+
+ is( '1.000 KB', bytes_display_string_dec( 1000 ), 'bytes_display_string_dec: 1000 => 1.000 KB' ) ;
+ is( '1.001 KB', bytes_display_string_dec( 1001 ), 'bytes_display_string_dec: 1000 => 1.1001 KB' ) ;
+
+ is( '999.999 KB', bytes_display_string_dec( 999_999 ), 'bytes_display_string_dec: 999_999 => 999.999 KB' ) ;
+
+ is( '1.000 MB', bytes_display_string_dec( 1_000_000 ), 'bytes_display_string_dec: 1_000_000 => 1.000 MB' ) ;
+ is( '1.000 MB', bytes_display_string_dec( 1_000_500 ), 'bytes_display_string_dec: 1_000_500 => 1.000 MB' ) ;
+ is( '1.001 MB', bytes_display_string_dec( 1_000_501 ), 'bytes_display_string_dec: 1_000_501 => 1.001 MB' ) ;
+ is( '999.999 MB', bytes_display_string_dec( 999_999_000 ), 'bytes_display_string_dec: 999_999_000 => 999.999 MB' ) ;
+ is( '999.999 MB', bytes_display_string_dec( 999_999_499 ), 'bytes_display_string_dec: 999_999_499 => 999.999 MB' ) ;
+ is( '1.000 GB', bytes_display_string_dec( 999_999_500 ), 'bytes_display_string_dec: 999_999_500 => 1.000 GB' ) ;
+
+ is( '1.000 GB', bytes_display_string_dec( 1_000_000_000 ), 'bytes_display_string_dec: 1_000_000_000 => 1.000 GB' ) ;
+ is( '1.000 GB', bytes_display_string_dec( 1_000_500_000 ), 'bytes_display_string_dec: 1_000_500_000 => 1.000 GB' ) ;
+ is( '1.001 GB', bytes_display_string_dec( 1_000_500_001 ), 'bytes_display_string_dec: 1_000_501_000 => 1.001 GB' ) ;
+ is( '999.999 GB', bytes_display_string_dec( 999_999_000_000 ), 'bytes_display_string_dec: 999_999_000_000 => 999.999 GB' ) ;
+ is( '999.999 GB', bytes_display_string_dec( 999_999_499_999 ), 'bytes_display_string_dec: 999_999_499_999 => 999.999 GB' ) ;
+ is( '1.000 TB', bytes_display_string_dec( 999_999_500_000 ), 'bytes_display_string_dec: 999_999_500_000 => 1.000 TB' ) ;
+
+ is( '1.000 TB', bytes_display_string_dec( 1_000_000_000_000 ), 'bytes_display_string_dec: 1_000_000_000_000 => 1.000 TB' ) ;
+ is( '1.000 TB', bytes_display_string_dec( 1_000_500_000_000 ), 'bytes_display_string_dec: 1_000_500_000_000 => 1.000 TB' ) ;
+ is( '1.001 TB', bytes_display_string_dec( 1_000_500_000_001 ), 'bytes_display_string_dec: 1_000_500_000_000 => 1.000 TB' ) ;
+ is( '999.999 TB', bytes_display_string_dec( 999_999_000_000_000 ), 'bytes_display_string_dec: 999_999_000_000_000 => 999.999 TB' ) ;
+ is( '999.999 TB', bytes_display_string_dec( 999_999_499_999_999 ), 'bytes_display_string_dec: 999_999_499_999_999 => 999.999 TB' ) ;
+ is( '1.000 PB', bytes_display_string_dec( 999_999_500_000_000 ), 'bytes_display_string_dec: 999_999_500_000_000 => 1.000 PB' ) ;
+
+ is( '3.000 GB', bytes_display_string_dec( 3_000_000_000 ), 'bytes_display_string_dec: 3_000_000_000 => 3.000 GB' ) ;
+
+ note( 'Leaving tests_bytes_display_string_dec()' ) ;
+ return ;
+}
+
+sub bytes_display_string_dec
+{
+ my ( $bytes ) = @_ ;
+
+ my $readable_value = q{} ;
+
+ if ( ! defined( $bytes ) ) {
+ return( 'NA' ) ;
+ }
+
+ if ( not match_number( $bytes ) ) {
+ return( 'NA' ) ;
+ }
+
+ SWITCH: {
+ if ( abs( $bytes ) < ( 1000 ) ) {
+ $readable_value = mysprintf( '%.0f bytes', $bytes ) ;
+ last SWITCH ;
+ }
+ if ( abs( $bytes ) < ( 1000**2 ) ) {
+ $readable_value = mysprintf( '%.3f KB', $bytes / 1000 ) ;
+ last SWITCH ;
+ }
+ if ( abs( $bytes ) < ( 999_999_500 ) ) {
+ $readable_value = mysprintf( '%.3f MB', $bytes / ( 1000**2 ) ) ;
+ last SWITCH ;
+ }
+ if ( abs( $bytes ) < ( 999_999_500_000 ) ) {
+ $readable_value = mysprintf( '%.3f GB', $bytes / ( 1000**3 ) ) ;
+ last SWITCH ;
+ }
+ if ( abs( $bytes ) < ( 999_999_500_000_000 ) ) {
+ $readable_value = mysprintf( '%.3f TB', $bytes / ( 1000**4 ) ) ;
+ last SWITCH ;
+ } else {
+ $readable_value = mysprintf( '%.3f PB', $bytes / ( 1000**5 ) ) ;
+ }
+ # if you have exabytes (EiB) of email to transfer, you have too much email!
+ }
+ #myprint( "$bytes = $readable_value\n" ) ;
+
+ return( $readable_value ) ;
+}
+
sub tests_useheader_suggestion
{
@@ -14209,10 +14838,10 @@
myprint( "Messages deleted on host2 : $mysync->{ acc2 }->{ nb_msg_deleted }\n" ) ;
myprintf( "Total bytes transferred : %s (%s)\n",
$mysync->{total_bytes_transferred},
- bytes_display_string( $mysync->{total_bytes_transferred} ) ) ;
+ bytes_display_string_bin( $mysync->{total_bytes_transferred} ) ) ;
myprintf( "Total bytes skipped : %s (%s)\n",
$mysync->{ total_bytes_skipped },
- bytes_display_string( $mysync->{ total_bytes_skipped } ) ) ;
+ bytes_display_string_bin( $mysync->{ total_bytes_skipped } ) ) ;
$timediff ||= 1 ; # No division per 0
myprintf("Message rate : %.1f messages/s\n", $mysync->{nb_msg_transferred} / $timediff ) ;
myprintf("Average bandwidth rate : %.1f KiB/s\n", $mysync->{total_bytes_transferred} / $KIBI / $timediff ) ;
@@ -14225,7 +14854,7 @@
myprint( "CPU time and %cpu : $cpu_time sec $cpu_percent %cpu $cpu_percent_global %allcpus\n" ) ;
myprintf("Biggest message transferred : %s bytes (%s)\n",
$mysync->{ biggest_message_transferred },
- bytes_display_string( $mysync->{ biggest_message_transferred } ) ) ;
+ bytes_display_string_bin( $mysync->{ biggest_message_transferred } ) ) ;
myprint( "Memory/biggest message ratio : $memory_ratio\n" ) ;
if ( $mysync->{ foldersizesatend } and $mysync->{ foldersizes } ) {
@@ -14235,21 +14864,21 @@
myprintf("Start difference host2 - host1 : %s messages, %s bytes (%s)\n", $nb_msg_start_diff,
$bytes_start_diff,
- bytes_display_string( $bytes_start_diff ) ) ;
+ bytes_display_string_bin( $bytes_start_diff ) ) ;
my $nb_msg_end_diff = diff_or_NA( $h2_nb_msg_end, $h1_nb_msg_end ) ;
my $bytes_end_diff = diff_or_NA( $h2_bytes_end, $h1_bytes_end ) ;
myprintf("Final difference host2 - host1 : %s messages, %s bytes (%s)\n", $nb_msg_end_diff,
$bytes_end_diff,
- bytes_display_string( $bytes_end_diff ) ) ;
+ bytes_display_string_bin( $bytes_end_diff ) ) ;
}
comment_on_final_diff_in_1_not_in_2( $mysync ) ;
comment_on_final_diff_in_2_not_in_1( $mysync ) ;
myprint( "Detected $mysync->{nb_errors} errors\n\n" ) ;
- myprint( $warn_release, "\n" ) ;
+ myprint( $mysync->{ warn_release }, "\n" ) ;
myprint( homepage( ), "\n" ) ;
return ;
}
@@ -14696,7 +15325,7 @@
if ( ! -e $file ) { return ; }
if ( ! -f $file ) { return ; }
if ( ! -r $file ) { return ; }
-
+
my @string ;
if ( open my $FILE, '<', $file )
@@ -14741,7 +15370,7 @@
if ( ! -e $file ) { return ; }
if ( ! -f $file ) { return ; }
if ( ! -r $file ) { return ; }
-
+
return( join q{}, file_to_array( $file ) ) ;
}
@@ -15236,7 +15865,7 @@
}
is( 4, cpu_number( ), "cpu_number: on i005 (FreeBSD) => 4" ) ;
} ;
-
+
SKIP: {
if ( ! ( 'petite' eq hostname() ) )
{
@@ -15252,7 +15881,7 @@
}
is( 2, cpu_number( ), "cpu_number: on polarhome macosx (Darwin MacOS X 10.7.5 Lion) => 2" ) ;
} ;
-
+
SKIP: {
if ( ! ( 'pcHPDV7-HP' eq hostname() ) )
{
@@ -15260,7 +15889,7 @@
}
is( 2, cpu_number( ), "cpu_number: on pcHPDV7-HP (Windows 7, 64bits) => 2" ) ;
} ;
-
+
SKIP: {
if ( ! ( 'CUILLERE' eq hostname() ) )
{
@@ -15268,7 +15897,7 @@
}
is( 1, cpu_number( ), "cpu_number: on CUILLERE (Windows XP, 32bits) => 1" ) ;
} ;
-
+
note( 'Leaving tests_cpu_number()' ) ;
return ;
@@ -15288,33 +15917,33 @@
$cpu_number = $ENV{"NUMBER_OF_PROCESSORS"} ;
#myprint( "Number of processors found by env var NUMBER_OF_PROCESSORS: $cpu_number\n" ) ;
}
-
+
if ( 'darwin' eq $OSNAME )
{
$cpu_number = backtick( "sysctl -n hw.ncpu" ) ;
chomp( $cpu_number ) ;
#myprint( "Number of processors found by cmd 'sysctl -n hw.ncpu': $cpu_number\n" ) ;
}
-
+
if ( 'freebsd' eq $OSNAME )
{
$cpu_number = backtick( "sysctl -n kern.smp.cpus" ) ;
chomp( $cpu_number ) ;
#myprint( "Number of processors found by cmd 'sysctl -n kern.smp.cpus': $cpu_number\n" ) ;
}
-
+
if ( 'linux' eq $OSNAME && -e '/proc/cpuinfo' )
{
@cpuinfo = file_to_array( '/proc/cpuinfo' ) ;
$cpu_number = grep { /^processor/mxs } @cpuinfo ;
#myprint( "Number of processors found via /proc/cpuinfo: $cpu_number\n" ) ;
}
-
+
if ( defined $cpu_number_forced )
{
$cpu_number = $cpu_number_forced ;
}
-
+
return( integer_or_1( $cpu_number ) ) ;
}
@@ -15375,13 +16004,16 @@
{
note( 'Entering tests_loadavg()' ) ;
-
SKIP: {
- skip( 'Tests for darwin', 2 ) if ('darwin' ne $OSNAME) ;
+ skip( 'Tests for darwin', 3 ) if ('darwin' ne $OSNAME) ;
is( undef, loadavg( '/noexist' ), 'loadavg: /noexist => undef' ) ;
- is_deeply( [ '0.11', '0.22', '0.33' ],
- [ loadavg( 'W/t/loadavg.out' ) ],
- 'loadavg W/t/loadavg.out => 0.11 0.22 0.33' ) ;
+ is_deeply(
+ [ '0.11', '0.22', '0.33' ],
+ [ loadavg( 'vm.loadavg: { 0.11 0.22 0.33 }' ) ],
+ 'loadavg: "vm.loadavg: { 0.11 0.22 0.33 }" => 0.11 0.22 0.33'
+ ) ;
+ note( join( " ", "loadavg:", loadavg( ) ) ) ;
+ is( 3, scalar( my @loadavg = loadavg( ) ), 'loadavg: 3 values' ) ;
} ;
SKIP: {
@@ -15399,7 +16031,6 @@
is_deeply( [ 0 ],
[ loadavg( ) ],
'loadavg on MSWin32 => 0' ) ;
-
} ;
note( 'Leaving tests_loadavg()' ) ;
@@ -15422,7 +16053,6 @@
return ( loadavg_windows( @ARG ) ) ;
}
return( 'unknown' ) ;
-
}
sub loadavg_linux
@@ -15466,24 +16096,25 @@
sub loadavg_darwin
{
- my $file = shift ;
+ my $line = shift ;
# Example of output of command "sysctl vm.loadavg":
# vm.loadavg: { 0.15 0.08 0.08 }
my $loadavg ;
- if ( ! defined $file ) {
+ if ( ! defined $line ) {
eval {
- $loadavg = `/usr/sbin/sysctl vm.loadavg` ;
+ # $loadavg = `/usr/sbin/sysctl vm.loadavg` ;
+ $loadavg = `LANG= /usr/sbin/sysctl vm.loadavg` ;
#myprint( "LOADAVG DARWIN: $loadavg\n" ) ;
} ;
if ( $EVAL_ERROR ) { myprint( "[$EVAL_ERROR]\n" ) ; return ; }
}else{
- $loadavg = firstline( $file ) or return ;
+ $loadavg = $line ;
}
my ( $avg_1_min, $avg_5_min, $avg_15_min )
= $loadavg =~ /vm\.loadavg\s*[:=]\s*\{?\s*(\d+\.?\d*)\s+(\d+\.?\d*)\s+(\d+\.?\d*)/mxs ;
- $sync->{ debug } and myprint( "System load: $avg_1_min $avg_5_min $avg_15_min\n" ) ;
+ #$sync->{ debug } and myprint( "System load: $avg_1_min $avg_5_min $avg_15_min\n" ) ;
return ( $avg_1_min, $avg_5_min, $avg_15_min ) ;
}
@@ -15592,7 +16223,7 @@
my $mysync = { } ;
$mysync->{ debug } = 1 ;
- ok( is_number( cpu_time( $mysync ) ), 'cpu_time: {} => a number' ) ;
+ ok( is_number( cpu_time( $mysync ) ), 'cpu_time: {} => a number' ) ;
note( 'Leaving tests_cpu_time()' ) ;
return ;
@@ -15601,15 +16232,15 @@
sub cpu_time
{
my $mysync = shift ;
-
+
my @cpu_times = times ;
if ( ! @cpu_times ) { return ; }
-
+
my $cpu_time = 0 ;
# last element is the sum of all elements
$cpu_time = ( map { $cpu_time += $_ } @cpu_times )[ -1 ] ;
- $mysync->{ debug } and myprint( join(' + ', @cpu_times), " = $cpu_time\n" ) ;
-
+ my $cpu_time_round = mysprintf( '%.2f', $cpu_time ) ;
+ $mysync->{ debug } and myprint( join(' + ', @cpu_times), " = $cpu_time ~ $cpu_time_round\n" ) ;
return $cpu_time ;
}
@@ -15644,9 +16275,9 @@
my $cpu_percent = 0 ;
$cpu_percent = mysprintf( '%.1f', 100 * $cpu_time / $timediff ) ;
$mysync->{ debug } and myprint( "cpu_percent: $cpu_percent \n" ) ;
-
+
return $cpu_percent ;
-
+
}
sub tests_cpu_percent_global
@@ -15666,7 +16297,7 @@
}
is( '25.0', cpu_percent_global( $mysync, 100 ), 'cpu_percent_global: {} 100 => 25 on host i005' ) ;
} ;
-
+
SKIP: {
if ( ! ( 'petite' eq hostname() ) )
{
@@ -15683,13 +16314,13 @@
{
my $mysync = shift ;
my $cpu_percent = shift || 0 ;
-
+
my $cpu_number = cpu_number( ) ;
-
+
my $cpu_percent_global ;
$cpu_percent_global = mysprintf( '%.1f', $cpu_percent / $cpu_number ) ;
$mysync->{ debug } and myprint( "cpu_percent_global: $cpu_percent_global \n" ) ;
-
+
return( $cpu_percent_global ) ;
}
@@ -15720,7 +16351,6 @@
sub memory_stress
{
-
my $total_ram_in_MB = Sys::MemInfo::get("totalmem") / ( $KIBI * $KIBI ) ;
my $i = 1 ;
@@ -15728,13 +16358,13 @@
while ( $i < $total_ram_in_MB / 1.7 ) { $a .= "A" x 1000_000; $i++ } ;
myprintf("Stress memory consumption after: %.1f MiB\n", memory_consumption( ) / $KIBI / $KIBI ) ;
return ;
-
}
sub tests_memory_consumption
{
note( 'Entering tests_memory_consumption()' ) ;
+ note( "memory_consumption: " . memory_consumption() . " bytes aka " . bytes_display_string_dec( memory_consumption() ) ) ;
like( memory_consumption( ), qr{\d+}xms,'memory_consumption no args') ;
like( memory_consumption( 1 ), qr{\d+}xms,'memory_consumption 1') ;
like( memory_consumption( $PROCESS_ID ), qr{\d+}xms,"memory_consumption_of_pids $PROCESS_ID") ;
@@ -15743,7 +16373,6 @@
like( memory_consumption_ratio(1), qr{\d+}xms, 'memory_consumption_ratio 1' ) ;
like( memory_consumption_ratio(10), qr{\d+}xms, 'memory_consumption_ratio 10' ) ;
- like( memory_consumption(), qr{\d+}xms, "memory_consumption\n" ) ;
note( 'Leaving tests_memory_consumption()' ) ;
return ;
@@ -15774,23 +16403,36 @@
my @val ;
if ( ( 'MSWin32' eq $OSNAME ) or ( 'cygwin' eq $OSNAME ) ) {
@val = memory_consumption_of_pids_win32( @pid ) ;
- }else{
+ }
+ elsif ( 'darwin' eq $OSNAME )
+ {
+ @val = memory_consumption_of_pids_mac( @pid ) ;
+ }
+ else
+ {
# Unix
my @ps = qx{ ps -o vsz -p @pid } ;
- #myprint( "ps: @ps" ) ;
-
- # Use IPC::Open3 from perlcrit -3
- # But it stalls on Darwin, I don't understand why!
- #my @ps = backtick( "ps -o vsz -p @pid" ) ;
- #myprint( "ps: @ps" ) ;
-
- shift @ps; # First line is column name "VSZ"
- chomp @ps;
+ shift @ps ; # First line is column name "VSZ"
+ chomp @ps ;
# convert to octets
@val = map { $_ * $KIBI } @ps ;
}
- $sync->{ debug } and myprint( "@val\n" ) ;
+ return( @val ) ;
+}
+
+
+sub memory_consumption_of_pids_mac
+{
+ my @pid = @_ ;
+ # Use IPC::Open3 from perlcrit -3
+ # But it stalls on Darwin, I don't understand why!
+ #my @ps = backtick( "ps -o rss -p @pid" ) ;
+ #myprint( "ps: @ps" ) ;
+ my @ps = qx{ ps -o rss -p @pid } ;
+ shift @ps ; # First line is column name "RSS"
+ chomp @ps ;
+ my @val = map { $_ * $KIBI } @ps ;
return( @val ) ;
}
@@ -16439,7 +17081,7 @@
" among ",
$nb_identified_h2_messages,
" identified messages in host2 that are not on host1.",
- " Use --delete2 and sync again to delete them and have a strict sync.\n"
+ " Use --delete2 and sync again to delete them and have a strict sync.\n"
) ;
}
return ;
@@ -17114,6 +17756,24 @@
return( $max ) ;
}
+sub set_checknoabletosearch
+{
+ my $mysync = shift @ARG ;
+ if ( defined $mysync->{ checknoabletosearch } )
+ {
+ return ;
+ }
+ elsif ( $mysync->{ justfolders } )
+ {
+ $mysync->{ checknoabletosearch } = 0 ;
+ }
+ else
+ {
+ $mysync->{ checknoabletosearch } = 1 ;
+ }
+ return ;
+}
+
sub tests_setlogfile
{
@@ -17215,7 +17875,7 @@
# When aborting another process the log file name finishes with "_abort.txt"
my $abort_suffix = ( $mysync->{ abort } ) ? '_abort' : q{} ;
-
+
# When acting as a proxy the log file name finishes with "_remote.txt"
# proxy mode is not done in imapsync, it is done by proximapsync
my $remote_suffix = ( $mysync->{ remote } ) ? '_remote' : q{} ;
@@ -17313,13 +17973,13 @@
sub localtimez
{
my $time = shift ;
-
+
$time = defined( $time ) ? $time : time ;
-
+
my $datetimestr = POSIX::strftime( '%A %e %B %Y-%m-%d %H:%M:%S %z %Z', localtime( $time ) ) ;
#myprint( "$datetimestr\n" ) ;
- return $datetimestr ;
+ return $datetimestr ;
}
@@ -17761,6 +18421,7 @@
$skipcrossduplicates = ( defined $skipcrossduplicates ) ? $skipcrossduplicates : 0 ;
$mysync->{ synclabels } = ( defined $mysync->{ synclabels } ) ? $mysync->{ synclabels } : 1 ;
$mysync->{ resynclabels } = ( defined $mysync->{ resynclabels } ) ? $mysync->{ resynclabels } : 1 ;
+ push @useheader, 'X-Gmail-Received', 'Message-Id' ;
push @exclude, '\[Gmail\]$' ;
push @folderlast, '[Gmail]/All Mail' ;
return ;
@@ -17785,7 +18446,7 @@
return ;
}
-sub gmail2
+sub gmail2
{
my $mysync = shift ;
# Gmail at host2
@@ -18610,8 +19271,8 @@
my ( $mysync, $mycgi, $key, $val ) = @ARG ;
my $badthings = 0 ;
-
-
+
+
my ( $name, $type, $struct ) ;
if ( $key !~ m/^([\w\d\|]+)([=:][isf])?([\+!\@\%])?$/mxs )
{
@@ -18624,7 +19285,7 @@
$type = $2 ; # = or : followed by i or s or f
$struct = $3 ; # + or ! or @ or %
}
-
+
if ( ( $struct || q{} ) eq '+' )
{
${$val} = $mycgi->param( $name ) ; # "Incremental" integer
@@ -18638,7 +19299,7 @@
{
setvalfromhash( $val, $type, @values ) ;
}
- else
+ else
{
setvalfromlist( $mysync, $val, $name, $type, $struct, @values ) ;
}
@@ -18647,7 +19308,7 @@
{
setvalfromcheckbox( $mysync, $mycgi, $key, $name, $val ) ;
}
-
+
return $badthings ;
}
@@ -18681,7 +19342,7 @@
else
{
}
-
+
return ;
}
sub setvalfromhash
@@ -18708,11 +19369,11 @@
{
%{ ${$val} } = %values ;
}
- else
+ else
{
%{$val} = %values ;
}
-
+
return ;
}
@@ -18720,7 +19381,7 @@
sub setvalfromcheckbox
{
my ( $mysync, $mycgi, $key, $name, $val ) = @ARG ;
-
+
# Checkbox
# --noname is set by name=0 or name=
my $value = $mycgi->param( $name ) ;
@@ -18774,7 +19435,7 @@
$badthings += setvalfromcgikey( $mysync, $mycgi, $key, $val ) ;
}
-
+
if ( $badthings ) {
return ; # undef or ()
}
@@ -18785,6 +19446,138 @@
+sub tests_get_options_extra
+{
+ note( 'Entering tests_get_options_extra()' ) ;
+
+ is( undef, get_options_extra( ), 'get_options_extra: no args => undef' ) ;
+
+ my $mysync = { } ;
+ is( undef, get_options_extra( $mysync ), 'get_options_extra: undef => undef' ) ;
+
+ my $cwd_save = getcwd( ) ;
+
+ ok( (-d 'W/tmp/tests/options_extra/' or mkpath( 'W/tmp/tests/options_extra/' )), 'get_options_extra: mkpath W/tmp/tests/options_extra/' ) ;
+
+ chdir 'W/tmp/tests/options_extra/' ;
+
+ is( '--debugimap1', string_to_file( '--debugimap1', 'options_extra.txt' ), 'get_options_extra: string_to_file filling options_extra.txt with --debugimap1' ) ;
+
+ is( '--debugimap1', file_to_string( 'options_extra.txt' ), 'get_options_extra: reading options_extra.txt is --debugimap1' ) ;
+
+ is( '', get_options_extra( $mysync ), 'get_options_extra: --debugimap1 in options_extra.txt => nothing left, empty string return' ) ;
+
+ is( 1, $mysync->{ acc1 }->{ debugimap }, 'get_options_extra: --debugimap1 in options_extra.txt => ok, acc1->debugimap = 1' ) ;
+
+ is( '--tls1 proutcaca', string_to_file( '--tls1 proutcaca', 'options_extra.txt' ), 'get_options_extra: string_to_file filling options_extra.txt with --tls1 proutcaca' ) ;
+
+ is( 'proutcaca', get_options_extra( $mysync ), 'get_options_extra: --tls1 proutcaca in options_extra.txt => proutcaca left, proutcaca return' ) ;
+
+ chdir $cwd_save ;
+
+ note( 'Leaving tests_get_options_extra()' ) ;
+ return ;
+}
+
+sub get_options_extra
+{
+ my $mysync = shift @ARG ;
+
+ if ( ! defined $mysync ) { return ; }
+
+ if ( -f -r 'options_extra.txt' )
+ {
+ my $cwd = getcwd( ) ;
+ my $string = firstline( 'options_extra.txt' ) ;
+ my $rest = get_options_from_string( $mysync, $string ) ;
+ output( $mysync, "Reading extra options from file options_extra.txt (cwd: $cwd) : $string\n" ) ;
+ return $rest ;
+ }
+ else
+ {
+ return ;
+ }
+}
+
+
+sub tests_get_options_from_string
+{
+ note( 'Entering tests_get_options_from_string()' ) ;
+
+ is( undef, get_options_from_string( ), 'get_options_from_string: no args => undef' ) ;
+ my $mysync = { } ;
+ is( undef, get_options_from_string( $mysync ), 'get_options_from_string: undef => undef' ) ;
+
+ is( '', get_options_from_string( $mysync, '--debugimap1' ),
+ 'get_options_from_string: --debugimap1 => ok, nothing left, empty string return' ) ;
+ is( 1, $mysync->{ acc1 }->{ debugimap }, 'get_options_from_string: --debugimap1 => ok, acc1->debugimap = 1' ) ;
+
+ $mysync = { } ; # reset
+ is( 'caca', get_options_from_string( $mysync, '--debugimap1 caca' ),
+ 'get_options_from_string: --debugimap1 caca => ok, caca left, caca return' ) ;
+ is( 1, $mysync->{ acc1 }->{ debugimap }, 'get_options_from_string: --debugimap1 => ok, acc1->debugimap = 1' ) ;
+
+ is( 'popo roro', get_options_from_string( $mysync, '--debugimap2 popo roro' ),
+ 'get_options_from_string: --debugimap1 popo roro => ok, popo roro left, popo roro return' ) ;
+ is( 1, $mysync->{ acc2 }->{ debugimap }, 'get_options_from_string: --debugimap2 popo roro => ok, acc2->debugimap = 1' ) ;
+ is( 1, $mysync->{ acc1 }->{ debugimap }, 'get_options_from_string: acc1->debugimap = 1 still' ) ;
+
+ is( '', get_options_from_string( $mysync, '--nodebugimap1 --debugflags --errorsmax 2' ),
+ 'get_options_from_string: --nodebugimap1 --debugflags --errorsmax 2 => ok, empty string return' ) ;
+
+ is( 0, $mysync->{ acc1 }->{ debugimap }, 'get_options_from_string: acc1->debugimap = 0 now' ) ;
+ is( 1, $debugflags, 'get_options_from_string: debugflags = 1 now' ) ;
+ is( 2, $mysync->{ errorsmax }, 'get_options_from_string: mysync->errorsmax = 2 now' ) ;
+
+ is( '', get_options_from_string( $mysync, '--folder "IN BOX" --folder JOE' ),
+ 'get_options_from_string: --folder "IN BOX" --folder JOE => ok, empty string return' ) ;
+
+ is_deeply( [ 'IN BOX', 'JOE' ], [@{$mysync->{ folder }}], 'get_options_from_string: "IN BOX" "JOE"' ) ;
+
+ is( '', get_options_from_string( $mysync, '--debugflags --koko' ),
+ 'get_options_from_string: --debugflags --koko => ok, empty string return, with "Unknown option: koko" on STDERR' ) ;
+
+ note( 'Leaving tests_get_options_from_string()' ) ;
+ return ;
+}
+
+sub get_options_from_string
+{
+ my $mysync = shift @ARG ;
+ my $mystring = shift @ARG ;
+
+ if ( ! defined $mystring ) { return ; }
+
+ my ( $ret, $args ) = Getopt::Long::GetOptionsFromString( $mystring,
+ 'debugimap!' => \$mysync->{ debugimap },
+ 'debugimap1!' => \$mysync->{ acc1 }->{ debugimap },
+ 'debugimap2!' => \$mysync->{ acc2 }->{ debugimap },
+ 'debugflags!' => \$debugflags,
+ 'debugsleep=f' => \$mysync->{ debugsleep },
+ 'errorsmax=i' => \$mysync->{ errorsmax },
+ 'folder=s@' => \$mysync->{ folder },
+ 'timeout=f' => \$mysync->{ timeout },
+ 'timeout1=f' => \$mysync->{ acc1 }->{ timeout },
+ 'timeout2=f' => \$mysync->{ acc2 }->{ timeout },
+ 'keepalive1!' => \$mysync->{ acc1 }->{ keepalive },
+ 'keepalive2!' => \$mysync->{ acc2 }->{ keepalive },
+ 'reconnectretry1=i' => \$mysync->{ acc1 }->{ reconnectretry },
+ 'reconnectretry2=i' => \$mysync->{ acc2 }->{ reconnectretry },
+ 'ssl1!' => \$mysync->{ ssl1 },
+ 'ssl2!' => \$mysync->{ ssl2 },
+ 'tls1!' => \$mysync->{ tls1 },
+ 'tls2!' => \$mysync->{ tls2 },
+ 'compress1!' => \$mysync->{ acc1 }->{ compress },
+ 'compress2!' => \$mysync->{ acc2 }->{ compress },
+ ) ;
+ my $left = join( ' ', @$args ) ;
+ return $left ;
+}
+
+
+
+
+
sub tests_get_options_cgi_context
{
@@ -18922,6 +19715,8 @@
'ssl2!' => \$mysync->{ ssl2 },
'tls1!' => \$mysync->{ tls1 },
'tls2!' => \$mysync->{ tls2 },
+ 'compress1!' => \$mysync->{ acc1 }->{ compress },
+ 'compress2!' => \$mysync->{ acc2 }->{ compress },
'justbanner!' => \$mysync->{ justbanner },
'justlogin!' => \$mysync->{ justlogin },
'justconnect!' => \$mysync->{ justconnect },
@@ -18957,7 +19752,7 @@
'syncduplicates!' => \$mysync->{ syncduplicates },
'log!' => \$mysync->{ log },
'loglogfile!' => \$mysync->{ loglogfile },
-
+
# f1f2h=s% could be removed but
# tests_get_options_cgi() should be split before
@@ -18991,8 +19786,8 @@
\@arguments,
'debug!' => \$mysync->{ debug },
'debuglist!' => \$debuglist,
- 'debugcontent!' => \$debugcontent,
- 'debugsleep=f' => \$mysync->{debugsleep},
+ 'debugcontent!' => \$mysync->{ debugcontent },
+ 'debugsleep=f' => \$mysync->{ debugsleep },
'debugflags!' => \$debugflags,
'debugimap!' => \$mysync->{ debugimap },
'debugimap1!' => \$mysync->{ acc1 }->{ debugimap },
@@ -19012,10 +19807,10 @@
'host1=s' => \$mysync->{ host1 },
'host2=s' => \$mysync->{ host2 },
- 'port1=i' => \$mysync->{port1},
- 'port2=i' => \$mysync->{port2},
- 'inet4|ipv4' => \$mysync->{inet4},
- 'inet6|ipv6' => \$mysync->{inet6},
+ 'port1=i' => \$mysync->{ port1 },
+ 'port2=i' => \$mysync->{ port2 },
+ 'inet4|ipv4' => \$mysync->{ inet4 },
+ 'inet6|ipv6' => \$mysync->{ inet6 },
'user1=s' => \$mysync->{ user1 },
'user2=s' => \$mysync->{ user2 },
'gmail1' => \$mysync->{gmail1},
@@ -19109,7 +19904,6 @@
'justbanner!' => \$mysync->{ justbanner },
'justfolders!'=> \$mysync->{ justfolders },
'justfoldersizes!' => \$mysync->{ justfoldersizes },
- 'fast!' => \$fast,
'version' => \$mysync->{version},
'help' => \$help,
'timeout=f' => \$mysync->{timeout},
@@ -19140,6 +19934,10 @@
'authuser2=s' => \$mysync->{ acc2 }->{ authuser },
'proxyauth1' => \$mysync->{ acc1 }->{ proxyauth },
'proxyauth2' => \$mysync->{ acc2 }->{ proxyauth },
+ 'compress1!' => \$mysync->{ acc1 }->{ compress },
+ 'compress2!' => \$mysync->{ acc2 }->{ compress },
+ 'keepalive1!' => \$mysync->{ acc1 }->{ keepalive },
+ 'keepalive2!' => \$mysync->{ acc2 }->{ keepalive },
'split1=i' => \$split1,
'split2=i' => \$split2,
'buffersize=i' => \$buffersize,
@@ -19191,7 +19989,7 @@
'logfile=s' => \$mysync->{logfile},
'logdir=s' => \$mysync->{logdir},
'errorsmax=i' => \$mysync->{errorsmax},
- 'errorsdump!' => \$mysync->{errorsdump},
+ 'errorsdump!' => \$mysync->{ errorsdump },
'fetch_hash_set=s' => \$fetch_hash_set,
'automap!' => \$mysync->{automap},
'justautomap!' => \$mysync->{justautomap},
@@ -19204,7 +20002,7 @@
'checknoabletosearch!' => \$mysync->{checknoabletosearch},
'syncduplicates!' => \$mysync->{ syncduplicates },
'dockercontext!' => \$mysync->{ dockercontext },
-
+
) ;
#myprint( Data::Dumper->Dump( [ $mysync ] ) ) ;
@@ -19294,6 +20092,7 @@
$ret = get_options_cmd( $mysync, @arguments ) ;
} ;
#myprint( "2 mysync: ", Data::Dumper->Dump( [ $mysync ] ) ) ;
+
foreach my $key ( sort keys %{ $mysync } ) {
if ( ! defined $mysync->{$key} ) {
delete $mysync->{$key} ;
@@ -19308,6 +20107,31 @@
}
+sub tests_infos
+{
+ note( 'Entering tests_infos()' ) ;
+ note( "OSNAME=$OSNAME" ) ;
+ note( "hostname=". hostname() ) ;
+ note( "cwd=" . getcwd( ) ) ;
+ note( "PROGRAM_NAME=$PROGRAM_NAME" ) ;
+ my $stat = stat("$PROGRAM_NAME") ;
+ my $perms = sprintf( "%04o\n", $stat->mode & oct($PERMISSION_FILTER) ) ;
+ note( "permissions=$perms" ) ;
+ note( "PROCESS_ID=$PROCESS_ID" ) ;
+ note( "REAL_USER_ID=$REAL_USER_ID" ) ;
+ note( "EFFECTIVE_USER_ID=$EFFECTIVE_USER_ID" ) ;
+ note( "context: " . imapsync_context( $sync ) ) ;
+ note( "memory_consumption: " . memory_consumption() . " bytes aka " . bytes_display_string_dec( memory_consumption() ) ) ;
+ cpu_number
+ note( "cpu_number: " . cpu_number() ) ;
+ note( $sync->{rcs} ) ;
+
+ note( 'Leaving tests_infos()' ) ;
+ return ;
+}
+
+
+
sub condition_to_leave_after_tests
{
my $mysync = shift ;
@@ -19316,9 +20140,9 @@
return 0 ;
}
- if ( $mysync->{ tests }
- or $mysync->{ testsdebug }
- or $mysync->{ testsunit }
+ if ( $mysync->{ tests }
+ or $mysync->{ testsdebug }
+ or $mysync->{ testsunit }
)
{
return 1 ;
@@ -19467,8 +20291,9 @@
#tests_kill_zero( ) ;
#tests_connect_socket( ) ;
#tests_probe_imapssl( ) ;
- tests_cpu_number( ) ;
- tests_mailimapclient_connect( ) ;
+ #tests_cpu_number( ) ;
+ #tests_mailimapclient_connect( ) ;
+ tests_loadavg( ) ;
#tests_always_fail( ) ;
note( 'Leaving testsdebug()' ) ;
@@ -19524,7 +20349,8 @@
tests_time_remaining( ) ;
tests_decompose_regex( ) ;
tests_backtick( ) ;
- tests_bytes_display_string( ) ;
+ tests_bytes_display_string_bin( ) ;
+ tests_bytes_display_string_dec( ) ;
tests_header_line_normalize( ) ;
tests_fix_Inbox_INBOX_mapping( ) ;
tests_max_line_length( ) ;
@@ -19666,6 +20492,14 @@
tests_cpu_percent_global( ) ;
tests_flags_for_host2( ) ;
tests_under_docker_context( ) ;
+ tests_exit_value( ) ;
+ tests_comment_of_error_type( ) ;
+ tests_debugcontent( ) ;
+ tests_compress_ssl( ) ;
+ tests_compress( ) ;
+ tests_get_options_extra( ) ;
+ tests_get_options_from_string( ) ;
+ tests_infos( ) ;
#tests_resolv( ) ;
# Those three are for later use, when webserver will be inside imapsync
@@ -19675,7 +20509,7 @@
#tests_kill_zero( ) ;
#tests_always_fail( ) ;
- done_testing( 1742 ) ;
+ done_testing( 1860 ) ;
note( 'Leaving tests()' ) ;
}
return ;
@@ -19698,6 +20532,6 @@
sub template
{
my $mysync = shift @ARG ;
-
+
return ;
}
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/quarantine_notify.py b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/quarantine_notify.py
index acd887a..65f2a0e 100755
--- a/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/quarantine_notify.py
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/quarantine_notify.py
@@ -50,7 +50,7 @@
def query_mysql(query, headers = True, update = False):
while True:
try:
- cnx = mysql.connector.connect(unix_socket = '/var/run/mysqld/mysqld.sock', user=os.environ.get('DBUSER'), passwd=os.environ.get('DBPASS'), database=os.environ.get('DBNAME'), charset="utf8")
+ cnx = mysql.connector.connect(unix_socket = '/var/run/mysqld/mysqld.sock', user=os.environ.get('DBUSER'), passwd=os.environ.get('DBPASS'), database=os.environ.get('DBNAME'), charset="utf8mb4", collation="utf8mb4_general_ci")
except Exception as ex:
print('%s - trying again...' % (ex))
time.sleep(3)
@@ -161,9 +161,9 @@
attrs = json.loads(attrs.decode('utf-8'))
if attrs['quarantine_notification'] not in ('hourly', 'daily', 'weekly'):
continue
- if last_notification == 0 or (last_notification + time_trans[attrs['quarantine_notification']]) < time_now:
+ if last_notification == 0 or (last_notification + time_trans[attrs['quarantine_notification']]) <= time_now:
print("Notifying %s: Considering %d new items in quarantine (policy: %s)" % (record['rcpt'], record['counter'], attrs['quarantine_notification']))
notify_rcpt(record['rcpt'], record['counter'], record['quarantine_acl'], attrs['quarantine_category'])
finally:
- os.unlink(pidfile)
\ No newline at end of file
+ os.unlink(pidfile)
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/syslog-ng-redis_slave.conf b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/syslog-ng-redis_slave.conf
index f690509..ea2bcfb 100644
--- a/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/syslog-ng-redis_slave.conf
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/syslog-ng-redis_slave.conf
@@ -1,4 +1,4 @@
-@version: 3.19
+@version: 3.28
@include "scl.conf"
options {
chain_hostnames(off);
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/syslog-ng.conf b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/syslog-ng.conf
index bdaca9c..2ee4f62 100644
--- a/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/syslog-ng.conf
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/dovecot/syslog-ng.conf
@@ -1,4 +1,4 @@
-@version: 3.19
+@version: 3.28
@include "scl.conf"
options {
chain_hostnames(off);
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/netfilter/Dockerfile b/mailcow/src/mailcow-dockerized/data/Dockerfiles/netfilter/Dockerfile
index 14b3464..7687c3c 100644
--- a/mailcow/src/mailcow-dockerized/data/Dockerfiles/netfilter/Dockerfile
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/netfilter/Dockerfile
@@ -1,4 +1,4 @@
-FROM alpine:3.14
+FROM alpine:3.17
LABEL maintainer "Andre Peters <andre.peters@servercow.de>"
ENV XTABLES_LIBDIR /usr/lib/xtables
@@ -13,12 +13,13 @@
&& apk add -U python3 \
iptables \
ip6tables \
+ xtables-addons \
tzdata \
py3-pip \
musl-dev \
&& pip3 install --upgrade --ignore-installed \
packaging \
-&& pip3 install --upgrade pip \
+&& pip3 install --ignore-installed --upgrade pip \
python-iptables \
redis \
ipaddress \
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/netfilter/server.py b/mailcow/src/mailcow-dockerized/data/Dockerfiles/netfilter/server.py
index 1adcf59..4eb8e7a 100644
--- a/mailcow/src/mailcow-dockerized/data/Dockerfiles/netfilter/server.py
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/netfilter/server.py
@@ -1,491 +1,500 @@
-#!/usr/bin/env python3
-
-import re
-import os
-import sys
-import time
-import atexit
-import signal
-import ipaddress
-from collections import Counter
-from random import randint
-from threading import Thread
-from threading import Lock
-import redis
-import json
-import iptc
-import dns.resolver
-import dns.exception
-
-while True:
- try:
- redis_slaveof_ip = os.getenv('REDIS_SLAVEOF_IP', '')
- redis_slaveof_port = os.getenv('REDIS_SLAVEOF_PORT', '')
- if "".__eq__(redis_slaveof_ip):
- r = redis.StrictRedis(host=os.getenv('IPV4_NETWORK', '172.22.1') + '.249', decode_responses=True, port=6379, db=0)
- else:
- r = redis.StrictRedis(host=redis_slaveof_ip, decode_responses=True, port=redis_slaveof_port, db=0)
- r.ping()
- except Exception as ex:
- print('%s - trying again in 3 seconds' % (ex))
- time.sleep(3)
- else:
- break
-
-pubsub = r.pubsub()
-
-WHITELIST = []
-BLACKLIST= []
-
-bans = {}
-
-quit_now = False
-exit_code = 0
-lock = Lock()
-
-def log(priority, message):
- tolog = {}
- tolog['time'] = int(round(time.time()))
- tolog['priority'] = priority
- tolog['message'] = message
- r.lpush('NETFILTER_LOG', json.dumps(tolog, ensure_ascii=False))
- print(message)
-
-def logWarn(message):
- log('warn', message)
-
-def logCrit(message):
- log('crit', message)
-
-def logInfo(message):
- log('info', message)
-
-def refreshF2boptions():
- global f2boptions
- global quit_now
- global exit_code
- if not r.get('F2B_OPTIONS'):
- f2boptions = {}
- f2boptions['ban_time'] = int
- f2boptions['max_attempts'] = int
- f2boptions['retry_window'] = int
- f2boptions['netban_ipv4'] = int
- f2boptions['netban_ipv6'] = int
- f2boptions['ban_time'] = r.get('F2B_BAN_TIME') or 1800
- f2boptions['max_attempts'] = r.get('F2B_MAX_ATTEMPTS') or 10
- f2boptions['retry_window'] = r.get('F2B_RETRY_WINDOW') or 600
- f2boptions['netban_ipv4'] = r.get('F2B_NETBAN_IPV4') or 32
- f2boptions['netban_ipv6'] = r.get('F2B_NETBAN_IPV6') or 128
- r.set('F2B_OPTIONS', json.dumps(f2boptions, ensure_ascii=False))
- else:
- try:
- f2boptions = {}
- f2boptions = json.loads(r.get('F2B_OPTIONS'))
- except ValueError:
- print('Error loading F2B options: F2B_OPTIONS is not json')
- quit_now = True
- exit_code = 2
-
-def refreshF2bregex():
- global f2bregex
- global quit_now
- global exit_code
- if not r.get('F2B_REGEX'):
- f2bregex = {}
- f2bregex[1] = 'mailcow UI: Invalid password for .+ by ([0-9a-f\.:]+)'
- f2bregex[2] = 'Rspamd UI: Invalid password by ([0-9a-f\.:]+)'
- f2bregex[3] = 'warning: .*\[([0-9a-f\.:]+)\]: SASL .+ authentication failed'
- f2bregex[4] = 'warning: non-SMTP command from .*\[([0-9a-f\.:]+)]:.+'
- f2bregex[5] = 'NOQUEUE: reject: RCPT from \[([0-9a-f\.:]+)].+Protocol error.+'
- f2bregex[6] = '-login: Disconnected \(auth failed, .+\): user=.*, method=.+, rip=([0-9a-f\.:]+),'
- f2bregex[7] = '-login: Aborted login \(auth failed .+\): user=.+, rip=([0-9a-f\.:]+), lip.+'
- f2bregex[8] = '-login: Aborted login \(tried to use disallowed .+\): user=.+, rip=([0-9a-f\.:]+), lip.+'
- f2bregex[9] = 'SOGo.+ Login from \'([0-9a-f\.:]+)\' for user .+ might not have worked'
- f2bregex[10] = '([0-9a-f\.:]+) \"GET \/SOGo\/.* HTTP.+\" 403 .+'
- r.set('F2B_REGEX', json.dumps(f2bregex, ensure_ascii=False))
- else:
- try:
- f2bregex = {}
- f2bregex = json.loads(r.get('F2B_REGEX'))
- except ValueError:
- print('Error loading F2B options: F2B_REGEX is not json')
- quit_now = True
- exit_code = 2
-
-if r.exists('F2B_LOG'):
- r.rename('F2B_LOG', 'NETFILTER_LOG')
-
-def mailcowChainOrder():
- global lock
- global quit_now
- global exit_code
- while not quit_now:
- time.sleep(10)
- with lock:
- filter4_table = iptc.Table(iptc.Table.FILTER)
- filter4_table.refresh()
- for f in [filter4_table]:
- forward_chain = iptc.Chain(f, 'FORWARD')
- input_chain = iptc.Chain(f, 'INPUT')
- for chain in [forward_chain, input_chain]:
- target_found = False
- for position, item in enumerate(chain.rules):
- if item.target.name == 'MAILCOW':
- target_found = True
- if position > 2:
- logCrit('Error in %s chain order: MAILCOW on position %d, restarting container' % (chain.name, position))
- quit_now = True
- exit_code = 2
- if not target_found:
- logCrit('Error in %s chain: MAILCOW target not found, restarting container' % (chain.name))
- quit_now = True
- exit_code = 2
-
-def ban(address):
- global lock
- refreshF2boptions()
- BAN_TIME = int(f2boptions['ban_time'])
- MAX_ATTEMPTS = int(f2boptions['max_attempts'])
- RETRY_WINDOW = int(f2boptions['retry_window'])
- NETBAN_IPV4 = '/' + str(f2boptions['netban_ipv4'])
- NETBAN_IPV6 = '/' + str(f2boptions['netban_ipv6'])
-
- ip = ipaddress.ip_address(address)
- if type(ip) is ipaddress.IPv6Address and ip.ipv4_mapped:
- ip = ip.ipv4_mapped
- address = str(ip)
- if ip.is_private or ip.is_loopback:
- return
-
- self_network = ipaddress.ip_network(address)
-
- with lock:
- temp_whitelist = set(WHITELIST)
-
- if temp_whitelist:
- for wl_key in temp_whitelist:
- wl_net = ipaddress.ip_network(wl_key, False)
- if wl_net.overlaps(self_network):
- logInfo('Address %s is whitelisted by rule %s' % (self_network, wl_net))
- return
-
- net = ipaddress.ip_network((address + (NETBAN_IPV4 if type(ip) is ipaddress.IPv4Address else NETBAN_IPV6)), strict=False)
- net = str(net)
-
- if not net in bans or time.time() - bans[net]['last_attempt'] > RETRY_WINDOW:
- bans[net] = { 'attempts': 0 }
- active_window = RETRY_WINDOW
- else:
- active_window = time.time() - bans[net]['last_attempt']
-
- bans[net]['attempts'] += 1
- bans[net]['last_attempt'] = time.time()
-
- active_window = time.time() - bans[net]['last_attempt']
-
- if bans[net]['attempts'] >= MAX_ATTEMPTS:
- cur_time = int(round(time.time()))
- logCrit('Banning %s for %d minutes' % (net, BAN_TIME / 60))
- if type(ip) is ipaddress.IPv4Address:
- with lock:
- chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), 'MAILCOW')
- rule = iptc.Rule()
- rule.src = net
- target = iptc.Target(rule, "REJECT")
- rule.target = target
- if rule not in chain.rules:
- chain.insert_rule(rule)
- else:
- pass
- r.hset('F2B_ACTIVE_BANS', '%s' % net, cur_time + BAN_TIME)
- else:
- logWarn('%d more attempts in the next %d seconds until %s is banned' % (MAX_ATTEMPTS - bans[net]['attempts'], RETRY_WINDOW, net))
-
-def unban(net):
- global lock
- if not net in bans:
- logInfo('%s is not banned, skipping unban and deleting from queue (if any)' % net)
- r.hdel('F2B_QUEUE_UNBAN', '%s' % net)
- return
- logInfo('Unbanning %s' % net)
- if type(ipaddress.ip_network(net)) is ipaddress.IPv4Network:
- with lock:
- chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), 'MAILCOW')
- rule = iptc.Rule()
- rule.src = net
- target = iptc.Target(rule, "REJECT")
- rule.target = target
- if rule in chain.rules:
- chain.delete_rule(rule)
- else:
- pass
- r.hdel('F2B_ACTIVE_BANS', '%s' % net)
- r.hdel('F2B_QUEUE_UNBAN', '%s' % net)
- if net in bans:
- del bans[net]
-
-def permBan(net, unban=False):
- global lock
- if type(ipaddress.ip_network(net, strict=False)) is ipaddress.IPv4Network:
- with lock:
- chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), 'MAILCOW')
- rule = iptc.Rule()
- rule.src = net
- target = iptc.Target(rule, "REJECT")
- rule.target = target
- if rule not in chain.rules and not unban:
- logCrit('Add host/network %s to blacklist' % net)
- chain.insert_rule(rule)
- r.hset('F2B_PERM_BANS', '%s' % net, int(round(time.time())))
- elif rule in chain.rules and unban:
- logCrit('Remove host/network %s from blacklist' % net)
- chain.delete_rule(rule)
- r.hdel('F2B_PERM_BANS', '%s' % net)
- else:
- pass
-
-def quit(signum, frame):
- global quit_now
- quit_now = True
-
-def clear():
- global lock
- logInfo('Clearing all bans')
- for net in bans.copy():
- unban(net)
- with lock:
- filter4_table = iptc.Table(iptc.Table.FILTER)
- for filter_table in [filter4_table]:
- filter_table.autocommit = False
- forward_chain = iptc.Chain(filter_table, "FORWARD")
- input_chain = iptc.Chain(filter_table, "INPUT")
- mailcow_chain = iptc.Chain(filter_table, "MAILCOW")
- if mailcow_chain in filter_table.chains:
- for rule in mailcow_chain.rules:
- mailcow_chain.delete_rule(rule)
- for rule in forward_chain.rules:
- if rule.target.name == 'MAILCOW':
- forward_chain.delete_rule(rule)
- for rule in input_chain.rules:
- if rule.target.name == 'MAILCOW':
- input_chain.delete_rule(rule)
- filter_table.delete_chain("MAILCOW")
- filter_table.commit()
- filter_table.refresh()
- filter_table.autocommit = True
- r.delete('F2B_ACTIVE_BANS')
- r.delete('F2B_PERM_BANS')
- pubsub.unsubscribe()
-
-def watch():
- logInfo('Watching Redis channel F2B_CHANNEL')
- pubsub.subscribe('F2B_CHANNEL')
-
- global quit_now
- global exit_code
-
- while not quit_now:
- try:
- for item in pubsub.listen():
- refreshF2bregex()
- for rule_id, rule_regex in f2bregex.items():
- if item['data'] and item['type'] == 'message':
- try:
- result = re.search(rule_regex, item['data'])
- except re.error:
- result = False
- if result:
- addr = result.group(1)
- ip = ipaddress.ip_address(addr)
- if ip.is_private or ip.is_loopback:
- continue
- logWarn('%s matched rule id %s (%s)' % (addr, rule_id, item['data']))
- ban(addr)
- except Exception as ex:
- logWarn('Error reading log line from pubsub')
- quit_now = True
- exit_code = 2
-
-def snat4(snat_target):
- global lock
- global quit_now
-
- def get_snat4_rule():
- rule = iptc.Rule()
- rule.src = os.getenv('IPV4_NETWORK', '172.22.1') + '.0/24'
- rule.dst = '!' + rule.src
- target = rule.create_target("SNAT")
- target.to_source = snat_target
- return rule
-
- while not quit_now:
- time.sleep(10)
- with lock:
- try:
- table = iptc.Table('nat')
- table.refresh()
- chain = iptc.Chain(table, 'POSTROUTING')
- table.autocommit = False
- if get_snat4_rule() not in chain.rules:
- logCrit('Added POSTROUTING rule for source network %s to SNAT target %s' % (get_snat4_rule().src, snat_target))
- chain.insert_rule(get_snat4_rule())
- table.commit()
- else:
- for position, item in enumerate(chain.rules):
- if item == get_snat4_rule():
- if position != 0:
- chain.delete_rule(get_snat4_rule())
- table.commit()
- table.autocommit = True
- except:
- print('Error running SNAT4, retrying...')
-
-def autopurge():
- while not quit_now:
- time.sleep(10)
- refreshF2boptions()
- BAN_TIME = int(f2boptions['ban_time'])
- MAX_ATTEMPTS = int(f2boptions['max_attempts'])
- QUEUE_UNBAN = r.hgetall('F2B_QUEUE_UNBAN')
- if QUEUE_UNBAN:
- for net in QUEUE_UNBAN:
- unban(str(net))
- for net in bans.copy():
- if bans[net]['attempts'] >= MAX_ATTEMPTS:
- if time.time() - bans[net]['last_attempt'] > BAN_TIME:
- unban(net)
-
-def isIpNetwork(address):
- try:
- ipaddress.ip_network(address, False)
- except ValueError:
- return False
- return True
-
-
-def genNetworkList(list):
- resolver = dns.resolver.Resolver()
- hostnames = []
- networks = []
- for key in list:
- if isIpNetwork(key):
- networks.append(key)
- else:
- hostnames.append(key)
- for hostname in hostnames:
- hostname_ips = []
- for rdtype in ['A', 'AAAA']:
- try:
- answer = resolver.resolve(qname=hostname, rdtype=rdtype, lifetime=3)
- except dns.exception.Timeout:
- logInfo('Hostname %s timedout on resolve' % hostname)
- break
- except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
- continue
- except dns.exception.DNSException as dnsexception:
- logInfo('%s' % dnsexception)
- continue
- for rdata in answer:
- hostname_ips.append(rdata.to_text())
- networks.extend(hostname_ips)
- return set(networks)
-
-def whitelistUpdate():
- global lock
- global quit_now
- global WHITELIST
- while not quit_now:
- start_time = time.time()
- list = r.hgetall('F2B_WHITELIST')
- new_whitelist = []
- if list:
- new_whitelist = genNetworkList(list)
- with lock:
- if Counter(new_whitelist) != Counter(WHITELIST):
- WHITELIST = new_whitelist
- logInfo('Whitelist was changed, it has %s entries' % len(WHITELIST))
- time.sleep(60.0 - ((time.time() - start_time) % 60.0))
-
-def blacklistUpdate():
- global quit_now
- global BLACKLIST
- while not quit_now:
- start_time = time.time()
- list = r.hgetall('F2B_BLACKLIST')
- new_blacklist = []
- if list:
- new_blacklist = genNetworkList(list)
- if Counter(new_blacklist) != Counter(BLACKLIST):
- addban = set(new_blacklist).difference(BLACKLIST)
- delban = set(BLACKLIST).difference(new_blacklist)
- BLACKLIST = new_blacklist
- logInfo('Blacklist was changed, it has %s entries' % len(BLACKLIST))
- if addban:
- for net in addban:
- permBan(net=net)
- if delban:
- for net in delban:
- permBan(net=net, unban=True)
- time.sleep(60.0 - ((time.time() - start_time) % 60.0))
-
-def initChain():
- # Is called before threads start, no locking
- print("Initializing mailcow netfilter chain")
- # IPv4
- if not iptc.Chain(iptc.Table(iptc.Table.FILTER), "MAILCOW") in iptc.Table(iptc.Table.FILTER).chains:
- iptc.Table(iptc.Table.FILTER).create_chain("MAILCOW")
- for c in ['FORWARD', 'INPUT']:
- chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), c)
- rule = iptc.Rule()
- rule.src = '0.0.0.0/0'
- rule.dst = '0.0.0.0/0'
- target = iptc.Target(rule, "MAILCOW")
- rule.target = target
- if rule not in chain.rules:
- chain.insert_rule(rule)
-
-if __name__ == '__main__':
-
- # In case a previous session was killed without cleanup
- clear()
- # Reinit MAILCOW chain
- initChain()
-
- watch_thread = Thread(target=watch)
- watch_thread.daemon = True
- watch_thread.start()
-
- if os.getenv('SNAT_TO_SOURCE') and os.getenv('SNAT_TO_SOURCE') != 'n':
- try:
- snat_ip = os.getenv('SNAT_TO_SOURCE')
- snat_ipo = ipaddress.ip_address(snat_ip)
- if type(snat_ipo) is ipaddress.IPv4Address:
- snat4_thread = Thread(target=snat4,args=(snat_ip,))
- snat4_thread.daemon = True
- snat4_thread.start()
- except ValueError:
- print(os.getenv('SNAT_TO_SOURCE') + ' is not a valid IPv4 address')
-
- autopurge_thread = Thread(target=autopurge)
- autopurge_thread.daemon = True
- autopurge_thread.start()
-
- mailcowchainwatch_thread = Thread(target=mailcowChainOrder)
- mailcowchainwatch_thread.daemon = True
- mailcowchainwatch_thread.start()
-
- blacklistupdate_thread = Thread(target=blacklistUpdate)
- blacklistupdate_thread.daemon = True
- blacklistupdate_thread.start()
-
- whitelistupdate_thread = Thread(target=whitelistUpdate)
- whitelistupdate_thread.daemon = True
- whitelistupdate_thread.start()
-
- signal.signal(signal.SIGTERM, quit)
- atexit.register(clear)
-
- while not quit_now:
- time.sleep(0.5)
-
- sys.exit(exit_code)
+#!/usr/bin/env python3
+
+import re
+import os
+import sys
+import time
+import atexit
+import signal
+import ipaddress
+from collections import Counter
+from random import randint
+from threading import Thread
+from threading import Lock
+import redis
+import json
+import iptc
+import dns.resolver
+import dns.exception
+
+while True:
+ try:
+ redis_slaveof_ip = os.getenv('REDIS_SLAVEOF_IP', '')
+ redis_slaveof_port = os.getenv('REDIS_SLAVEOF_PORT', '')
+ if "".__eq__(redis_slaveof_ip):
+ r = redis.StrictRedis(host=os.getenv('IPV4_NETWORK', '172.22.1') + '.249', decode_responses=True, port=6379, db=0)
+ else:
+ r = redis.StrictRedis(host=redis_slaveof_ip, decode_responses=True, port=redis_slaveof_port, db=0)
+ r.ping()
+ except Exception as ex:
+ print('%s - trying again in 3 seconds' % (ex))
+ time.sleep(3)
+ else:
+ break
+
+pubsub = r.pubsub()
+
+WHITELIST = []
+BLACKLIST= []
+
+bans = {}
+
+quit_now = False
+exit_code = 0
+lock = Lock()
+
+def log(priority, message):
+ tolog = {}
+ tolog['time'] = int(round(time.time()))
+ tolog['priority'] = priority
+ tolog['message'] = message
+ r.lpush('NETFILTER_LOG', json.dumps(tolog, ensure_ascii=False))
+ print(message)
+
+def logWarn(message):
+ log('warn', message)
+
+def logCrit(message):
+ log('crit', message)
+
+def logInfo(message):
+ log('info', message)
+
+def refreshF2boptions():
+ global f2boptions
+ global quit_now
+ global exit_code
+ if not r.get('F2B_OPTIONS'):
+ f2boptions = {}
+ f2boptions['ban_time'] = int
+ f2boptions['max_attempts'] = int
+ f2boptions['retry_window'] = int
+ f2boptions['netban_ipv4'] = int
+ f2boptions['netban_ipv6'] = int
+ f2boptions['ban_time'] = r.get('F2B_BAN_TIME') or 1800
+ f2boptions['max_attempts'] = r.get('F2B_MAX_ATTEMPTS') or 10
+ f2boptions['retry_window'] = r.get('F2B_RETRY_WINDOW') or 600
+ f2boptions['netban_ipv4'] = r.get('F2B_NETBAN_IPV4') or 32
+ f2boptions['netban_ipv6'] = r.get('F2B_NETBAN_IPV6') or 128
+ r.set('F2B_OPTIONS', json.dumps(f2boptions, ensure_ascii=False))
+ else:
+ try:
+ f2boptions = {}
+ f2boptions = json.loads(r.get('F2B_OPTIONS'))
+ except ValueError:
+ print('Error loading F2B options: F2B_OPTIONS is not json')
+ quit_now = True
+ exit_code = 2
+
+def refreshF2bregex():
+ global f2bregex
+ global quit_now
+ global exit_code
+ if not r.get('F2B_REGEX'):
+ f2bregex = {}
+ f2bregex[1] = 'mailcow UI: Invalid password for .+ by ([0-9a-f\.:]+)'
+ f2bregex[2] = 'Rspamd UI: Invalid password by ([0-9a-f\.:]+)'
+ f2bregex[3] = 'warning: .*\[([0-9a-f\.:]+)\]: SASL .+ authentication failed: (?!.*Connection lost to authentication server).+'
+ f2bregex[4] = 'warning: non-SMTP command from .*\[([0-9a-f\.:]+)]:.+'
+ f2bregex[5] = 'NOQUEUE: reject: RCPT from \[([0-9a-f\.:]+)].+Protocol error.+'
+ f2bregex[6] = '-login: Disconnected.+ \(auth failed, .+\): user=.*, method=.+, rip=([0-9a-f\.:]+),'
+ f2bregex[7] = '-login: Aborted login.+ \(auth failed .+\): user=.+, rip=([0-9a-f\.:]+), lip.+'
+ f2bregex[8] = '-login: Aborted login.+ \(tried to use disallowed .+\): user=.+, rip=([0-9a-f\.:]+), lip.+'
+ f2bregex[9] = 'SOGo.+ Login from \'([0-9a-f\.:]+)\' for user .+ might not have worked'
+ f2bregex[10] = '([0-9a-f\.:]+) \"GET \/SOGo\/.* HTTP.+\" 403 .+'
+ r.set('F2B_REGEX', json.dumps(f2bregex, ensure_ascii=False))
+ else:
+ try:
+ f2bregex = {}
+ f2bregex = json.loads(r.get('F2B_REGEX'))
+ except ValueError:
+ print('Error loading F2B options: F2B_REGEX is not json')
+ quit_now = True
+ exit_code = 2
+
+if r.exists('F2B_LOG'):
+ r.rename('F2B_LOG', 'NETFILTER_LOG')
+
+def mailcowChainOrder():
+ global lock
+ global quit_now
+ global exit_code
+ while not quit_now:
+ time.sleep(10)
+ with lock:
+ filter4_table = iptc.Table(iptc.Table.FILTER)
+ filter4_table.refresh()
+ for f in [filter4_table]:
+ forward_chain = iptc.Chain(f, 'FORWARD')
+ input_chain = iptc.Chain(f, 'INPUT')
+ for chain in [forward_chain, input_chain]:
+ target_found = False
+ for position, item in enumerate(chain.rules):
+ if item.target.name == 'MAILCOW':
+ target_found = True
+ if position > 2:
+ logCrit('Error in %s chain order: MAILCOW on position %d, restarting container' % (chain.name, position))
+ quit_now = True
+ exit_code = 2
+ if not target_found:
+ logCrit('Error in %s chain: MAILCOW target not found, restarting container' % (chain.name))
+ quit_now = True
+ exit_code = 2
+
+def ban(address):
+ global lock
+ refreshF2boptions()
+ BAN_TIME = int(f2boptions['ban_time'])
+ MAX_ATTEMPTS = int(f2boptions['max_attempts'])
+ RETRY_WINDOW = int(f2boptions['retry_window'])
+ NETBAN_IPV4 = '/' + str(f2boptions['netban_ipv4'])
+ NETBAN_IPV6 = '/' + str(f2boptions['netban_ipv6'])
+
+ ip = ipaddress.ip_address(address)
+ if type(ip) is ipaddress.IPv6Address and ip.ipv4_mapped:
+ ip = ip.ipv4_mapped
+ address = str(ip)
+ if ip.is_private or ip.is_loopback:
+ return
+
+ self_network = ipaddress.ip_network(address)
+
+ with lock:
+ temp_whitelist = set(WHITELIST)
+
+ if temp_whitelist:
+ for wl_key in temp_whitelist:
+ wl_net = ipaddress.ip_network(wl_key, False)
+ if wl_net.overlaps(self_network):
+ logInfo('Address %s is whitelisted by rule %s' % (self_network, wl_net))
+ return
+
+ net = ipaddress.ip_network((address + (NETBAN_IPV4 if type(ip) is ipaddress.IPv4Address else NETBAN_IPV6)), strict=False)
+ net = str(net)
+
+ if not net in bans or time.time() - bans[net]['last_attempt'] > RETRY_WINDOW:
+ bans[net] = { 'attempts': 0 }
+ active_window = RETRY_WINDOW
+ else:
+ active_window = time.time() - bans[net]['last_attempt']
+
+ bans[net]['attempts'] += 1
+ bans[net]['last_attempt'] = time.time()
+
+ active_window = time.time() - bans[net]['last_attempt']
+
+ if bans[net]['attempts'] >= MAX_ATTEMPTS:
+ cur_time = int(round(time.time()))
+ logCrit('Banning %s for %d minutes' % (net, BAN_TIME / 60))
+ if type(ip) is ipaddress.IPv4Address:
+ with lock:
+ chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), 'MAILCOW')
+ rule = iptc.Rule()
+ rule.src = net
+ target = iptc.Target(rule, "REJECT")
+ rule.target = target
+ if rule not in chain.rules:
+ chain.insert_rule(rule)
+ else:
+ pass
+ r.hset('F2B_ACTIVE_BANS', '%s' % net, cur_time + BAN_TIME)
+ else:
+ logWarn('%d more attempts in the next %d seconds until %s is banned' % (MAX_ATTEMPTS - bans[net]['attempts'], RETRY_WINDOW, net))
+
+def unban(net):
+ global lock
+ if not net in bans:
+ logInfo('%s is not banned, skipping unban and deleting from queue (if any)' % net)
+ r.hdel('F2B_QUEUE_UNBAN', '%s' % net)
+ return
+ logInfo('Unbanning %s' % net)
+ if type(ipaddress.ip_network(net)) is ipaddress.IPv4Network:
+ with lock:
+ chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), 'MAILCOW')
+ rule = iptc.Rule()
+ rule.src = net
+ target = iptc.Target(rule, "REJECT")
+ rule.target = target
+ if rule in chain.rules:
+ chain.delete_rule(rule)
+ else:
+ pass
+ r.hdel('F2B_ACTIVE_BANS', '%s' % net)
+ r.hdel('F2B_QUEUE_UNBAN', '%s' % net)
+ if net in bans:
+ del bans[net]
+
+def permBan(net, unban=False):
+ global lock
+ if type(ipaddress.ip_network(net, strict=False)) is ipaddress.IPv4Network:
+ with lock:
+ chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), 'MAILCOW')
+ rule = iptc.Rule()
+ rule.src = net
+ target = iptc.Target(rule, "REJECT")
+ rule.target = target
+ if rule not in chain.rules and not unban:
+ logCrit('Add host/network %s to blacklist' % net)
+ chain.insert_rule(rule)
+ r.hset('F2B_PERM_BANS', '%s' % net, int(round(time.time())))
+ elif rule in chain.rules and unban:
+ logCrit('Remove host/network %s from blacklist' % net)
+ chain.delete_rule(rule)
+ r.hdel('F2B_PERM_BANS', '%s' % net)
+ else:
+ pass
+
+def quit(signum, frame):
+ global quit_now
+ quit_now = True
+
+def clear():
+ global lock
+ logInfo('Clearing all bans')
+ for net in bans.copy():
+ unban(net)
+ with lock:
+ filter4_table = iptc.Table(iptc.Table.FILTER)
+ for filter_table in [filter4_table]:
+ filter_table.autocommit = False
+ forward_chain = iptc.Chain(filter_table, "FORWARD")
+ input_chain = iptc.Chain(filter_table, "INPUT")
+ mailcow_chain = iptc.Chain(filter_table, "MAILCOW")
+ if mailcow_chain in filter_table.chains:
+ for rule in mailcow_chain.rules:
+ mailcow_chain.delete_rule(rule)
+ for rule in forward_chain.rules:
+ if rule.target.name == 'MAILCOW':
+ forward_chain.delete_rule(rule)
+ for rule in input_chain.rules:
+ if rule.target.name == 'MAILCOW':
+ input_chain.delete_rule(rule)
+ filter_table.delete_chain("MAILCOW")
+ filter_table.commit()
+ filter_table.refresh()
+ filter_table.autocommit = True
+ r.delete('F2B_ACTIVE_BANS')
+ r.delete('F2B_PERM_BANS')
+ pubsub.unsubscribe()
+
+def watch():
+ logInfo('Watching Redis channel F2B_CHANNEL')
+ pubsub.subscribe('F2B_CHANNEL')
+
+ global quit_now
+ global exit_code
+
+ while not quit_now:
+ try:
+ for item in pubsub.listen():
+ refreshF2bregex()
+ for rule_id, rule_regex in f2bregex.items():
+ if item['data'] and item['type'] == 'message':
+ try:
+ result = re.search(rule_regex, item['data'])
+ except re.error:
+ result = False
+ if result:
+ addr = result.group(1)
+ ip = ipaddress.ip_address(addr)
+ if ip.is_private or ip.is_loopback:
+ continue
+ logWarn('%s matched rule id %s (%s)' % (addr, rule_id, item['data']))
+ ban(addr)
+ except Exception as ex:
+ logWarn('Error reading log line from pubsub')
+ quit_now = True
+ exit_code = 2
+
+def snat4(snat_target):
+ global lock
+ global quit_now
+
+ def get_snat4_rule():
+ rule = iptc.Rule()
+ rule.src = os.getenv('IPV4_NETWORK', '172.22.1') + '.0/24'
+ rule.dst = '!' + rule.src
+ target = rule.create_target("SNAT")
+ target.to_source = snat_target
+ match = rule.create_match("comment")
+ match.comment = f'{int(round(time.time()))}'
+ return rule
+
+ while not quit_now:
+ time.sleep(10)
+ with lock:
+ try:
+ table = iptc.Table('nat')
+ table.refresh()
+ chain = iptc.Chain(table, 'POSTROUTING')
+ table.autocommit = False
+ new_rule = get_snat4_rule()
+ for position, rule in enumerate(chain.rules):
+ match = all((
+ new_rule.get_src() == rule.get_src(),
+ new_rule.get_dst() == rule.get_dst(),
+ new_rule.target.parameters == rule.target.parameters,
+ new_rule.target.name == rule.target.name
+ ))
+ if position == 0:
+ if not match:
+ logInfo(f'Added POSTROUTING rule for source network {new_rule.src} to SNAT target {snat_target}')
+ chain.insert_rule(new_rule)
+ else:
+ if match:
+ logInfo(f'Remove rule for source network {new_rule.src} to SNAT target {snat_target} from POSTROUTING chain at position {position}')
+ chain.delete_rule(rule)
+ table.commit()
+ table.autocommit = True
+ except:
+ print('Error running SNAT4, retrying...')
+
+def autopurge():
+ while not quit_now:
+ time.sleep(10)
+ refreshF2boptions()
+ BAN_TIME = int(f2boptions['ban_time'])
+ MAX_ATTEMPTS = int(f2boptions['max_attempts'])
+ QUEUE_UNBAN = r.hgetall('F2B_QUEUE_UNBAN')
+ if QUEUE_UNBAN:
+ for net in QUEUE_UNBAN:
+ unban(str(net))
+ for net in bans.copy():
+ if bans[net]['attempts'] >= MAX_ATTEMPTS:
+ if time.time() - bans[net]['last_attempt'] > BAN_TIME:
+ unban(net)
+
+def isIpNetwork(address):
+ try:
+ ipaddress.ip_network(address, False)
+ except ValueError:
+ return False
+ return True
+
+
+def genNetworkList(list):
+ resolver = dns.resolver.Resolver()
+ hostnames = []
+ networks = []
+ for key in list:
+ if isIpNetwork(key):
+ networks.append(key)
+ else:
+ hostnames.append(key)
+ for hostname in hostnames:
+ hostname_ips = []
+ for rdtype in ['A', 'AAAA']:
+ try:
+ answer = resolver.resolve(qname=hostname, rdtype=rdtype, lifetime=3)
+ except dns.exception.Timeout:
+ logInfo('Hostname %s timedout on resolve' % hostname)
+ break
+ except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
+ continue
+ except dns.exception.DNSException as dnsexception:
+ logInfo('%s' % dnsexception)
+ continue
+ for rdata in answer:
+ hostname_ips.append(rdata.to_text())
+ networks.extend(hostname_ips)
+ return set(networks)
+
+def whitelistUpdate():
+ global lock
+ global quit_now
+ global WHITELIST
+ while not quit_now:
+ start_time = time.time()
+ list = r.hgetall('F2B_WHITELIST')
+ new_whitelist = []
+ if list:
+ new_whitelist = genNetworkList(list)
+ with lock:
+ if Counter(new_whitelist) != Counter(WHITELIST):
+ WHITELIST = new_whitelist
+ logInfo('Whitelist was changed, it has %s entries' % len(WHITELIST))
+ time.sleep(60.0 - ((time.time() - start_time) % 60.0))
+
+def blacklistUpdate():
+ global quit_now
+ global BLACKLIST
+ while not quit_now:
+ start_time = time.time()
+ list = r.hgetall('F2B_BLACKLIST')
+ new_blacklist = []
+ if list:
+ new_blacklist = genNetworkList(list)
+ if Counter(new_blacklist) != Counter(BLACKLIST):
+ addban = set(new_blacklist).difference(BLACKLIST)
+ delban = set(BLACKLIST).difference(new_blacklist)
+ BLACKLIST = new_blacklist
+ logInfo('Blacklist was changed, it has %s entries' % len(BLACKLIST))
+ if addban:
+ for net in addban:
+ permBan(net=net)
+ if delban:
+ for net in delban:
+ permBan(net=net, unban=True)
+ time.sleep(60.0 - ((time.time() - start_time) % 60.0))
+
+def initChain():
+ # Is called before threads start, no locking
+ print("Initializing mailcow netfilter chain")
+ # IPv4
+ if not iptc.Chain(iptc.Table(iptc.Table.FILTER), "MAILCOW") in iptc.Table(iptc.Table.FILTER).chains:
+ iptc.Table(iptc.Table.FILTER).create_chain("MAILCOW")
+ for c in ['FORWARD', 'INPUT']:
+ chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), c)
+ rule = iptc.Rule()
+ rule.src = '0.0.0.0/0'
+ rule.dst = '0.0.0.0/0'
+ target = iptc.Target(rule, "MAILCOW")
+ rule.target = target
+ if rule not in chain.rules:
+ chain.insert_rule(rule)
+
+if __name__ == '__main__':
+
+ # In case a previous session was killed without cleanup
+ clear()
+ # Reinit MAILCOW chain
+ initChain()
+
+ watch_thread = Thread(target=watch)
+ watch_thread.daemon = True
+ watch_thread.start()
+
+ if os.getenv('SNAT_TO_SOURCE') and os.getenv('SNAT_TO_SOURCE') != 'n':
+ try:
+ snat_ip = os.getenv('SNAT_TO_SOURCE')
+ snat_ipo = ipaddress.ip_address(snat_ip)
+ if type(snat_ipo) is ipaddress.IPv4Address:
+ snat4_thread = Thread(target=snat4,args=(snat_ip,))
+ snat4_thread.daemon = True
+ snat4_thread.start()
+ except ValueError:
+ print(os.getenv('SNAT_TO_SOURCE') + ' is not a valid IPv4 address')
+
+ autopurge_thread = Thread(target=autopurge)
+ autopurge_thread.daemon = True
+ autopurge_thread.start()
+
+ mailcowchainwatch_thread = Thread(target=mailcowChainOrder)
+ mailcowchainwatch_thread.daemon = True
+ mailcowchainwatch_thread.start()
+
+ blacklistupdate_thread = Thread(target=blacklistUpdate)
+ blacklistupdate_thread.daemon = True
+ blacklistupdate_thread.start()
+
+ whitelistupdate_thread = Thread(target=whitelistUpdate)
+ whitelistupdate_thread.daemon = True
+ whitelistupdate_thread.start()
+
+ signal.signal(signal.SIGTERM, quit)
+ atexit.register(clear)
+
+ while not quit_now:
+ time.sleep(0.5)
+
+ sys.exit(exit_code)
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/olefy/Dockerfile b/mailcow/src/mailcow-dockerized/data/Dockerfiles/olefy/Dockerfile
index 6d9727c..10d63d0 100644
--- a/mailcow/src/mailcow-dockerized/data/Dockerfiles/olefy/Dockerfile
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/olefy/Dockerfile
@@ -1,4 +1,4 @@
-FROM alpine:3.14
+FROM alpine:3.17
LABEL maintainer "Andre Peters <andre.peters@servercow.de>"
WORKDIR /app
@@ -8,11 +8,11 @@
&& apk add --update --no-cache python3 py3-pip openssl tzdata libmagic \
&& pip3 install --upgrade pip \
&& pip3 install --upgrade asyncio python-magic \
- && pip3 install --upgrade https://github.com/HeinleinSupport/oletools/archive/master.zip \
+ && pip3 install --upgrade https://github.com/decalage2/oletools/archive/master.zip \
&& apk del .build-deps
-# && sed -i 's/decompress_stream(bytearray(compressed_code))/bytes2str(decompress_stream(bytearray(compressed_code)))/g' /usr/lib/python3.8/site-packages/oletools/olevba.py
+# && sed -i 's/template_injection_detected = True/template_injection_detected = False/g' /usr/lib/python3.9/site-packages/oletools/olevba.py
-ADD https://raw.githubusercontent.com/HeinleinSupport/olefy/master/olefy.py /app/
+ADD olefy.py /app/
RUN chown -R nobody:nobody /app /tmp
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/olefy/olefy.py b/mailcow/src/mailcow-dockerized/data/Dockerfiles/olefy/olefy.py
new file mode 100644
index 0000000..776e786
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/olefy/olefy.py
@@ -0,0 +1,220 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2020, Dennis Kalbhen <d.kalbhen@heinlein-support.de>
+# Copyright (c) 2020, Carsten Rosenberg <c.rosenberg@heinlein-support.de>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+###
+#
+# olefy is a little helper socket to use oletools with rspamd. (https://rspamd.com)
+# Please find updates and issues here: https://github.com/HeinleinSupport/olefy
+#
+###
+
+from subprocess import Popen, PIPE
+import sys
+import os
+import logging
+import asyncio
+import time
+import magic
+import re
+
+# merge variables from /etc/olefy.conf and the defaults
+olefy_listen_addr_string = os.getenv('OLEFY_BINDADDRESS', '127.0.0.1,::1')
+olefy_listen_port = int(os.getenv('OLEFY_BINDPORT', '10050'))
+olefy_tmp_dir = os.getenv('OLEFY_TMPDIR', '/tmp')
+olefy_python_path = os.getenv('OLEFY_PYTHON_PATH', '/usr/bin/python3')
+olefy_olevba_path = os.getenv('OLEFY_OLEVBA_PATH', '/usr/local/bin/olevba3')
+# 10:DEBUG, 20:INFO, 30:WARNING, 40:ERROR, 50:CRITICAL
+olefy_loglvl = int(os.getenv('OLEFY_LOGLVL', 20))
+olefy_min_length = int(os.getenv('OLEFY_MINLENGTH', 500))
+olefy_del_tmp = int(os.getenv('OLEFY_DEL_TMP', 1))
+olefy_del_tmp_failed = int(os.getenv('OLEFY_DEL_TMP_FAILED', 1))
+
+# internal used variables
+request_time = '0000000000.000000'
+olefy_protocol = 'OLEFY'
+olefy_ping = 'PING'
+olefy_protocol_sep = '\n\n'
+olefy_headers = {}
+
+# init logging
+logger = logging.getLogger('olefy')
+logging.basicConfig(stream=sys.stdout, level=olefy_loglvl, format='olefy %(levelname)s %(funcName)s %(message)s')
+
+logger.debug('olefy listen address string: {} (type {})'.format(olefy_listen_addr_string, type(olefy_listen_addr_string)))
+
+if not olefy_listen_addr_string:
+ olefy_listen_addr = ""
+else:
+ addr_re = re.compile('[\[" \]]')
+ olefy_listen_addr = addr_re.sub('', olefy_listen_addr_string.replace("'", "")).split(',')
+
+# log runtime variables
+logger.info('olefy listen address: {} (type: {})'.format(olefy_listen_addr, type(olefy_listen_addr)))
+logger.info('olefy listen port: {}'.format(olefy_listen_port))
+logger.info('olefy tmp dir: {}'.format(olefy_tmp_dir))
+logger.info('olefy python path: {}'.format(olefy_python_path))
+logger.info('olefy olvba path: {}'.format(olefy_olevba_path))
+logger.info('olefy log level: {}'.format(olefy_loglvl))
+logger.info('olefy min file length: {}'.format(olefy_min_length))
+logger.info('olefy delete tmp file: {}'.format(olefy_del_tmp))
+logger.info('olefy delete tmp file when failed: {}'.format(olefy_del_tmp_failed))
+
+if not os.path.isfile(olefy_python_path):
+ logger.critical('python path not found: {}'.format(olefy_python_path))
+ exit(1)
+if not os.path.isfile(olefy_olevba_path):
+ logger.critical('olevba path not found: {}'.format(olefy_olevba_path))
+ exit(1)
+
+# olefy protocol function
+def protocol_split( olefy_line ):
+ header_lines = olefy_line.split('\n')
+ for line in header_lines:
+ if line == 'OLEFY/1.0':
+ olefy_headers['olefy'] = line
+ elif line != '':
+ kv = line.split(': ')
+ if kv[0] != '' and kv[1] != '':
+ olefy_headers[kv[0]] = kv[1]
+ logger.debug('olefy_headers: {}'.format(olefy_headers))
+
+# calling oletools
+def oletools( stream, tmp_file_name, lid ):
+ if olefy_min_length > stream.__len__():
+ logger.error('{} {} bytes (Not Scanning! File smaller than {!r})'.format(lid, stream.__len__(), olefy_min_length))
+ out = b'[ { "error": "File too small" } ]'
+ else:
+ tmp_file = open(tmp_file_name, 'wb')
+ tmp_file.write(stream)
+ tmp_file.close()
+
+ file_magic = magic.Magic(mime=True, uncompress=True)
+ file_mime = file_magic.from_file(tmp_file_name)
+ logger.info('{} {} (libmagic output)'.format(lid, file_mime))
+
+ # do the olefy
+ cmd_tmp = Popen([olefy_python_path, olefy_olevba_path, '-a', '-j' , '-l', 'error', tmp_file_name], stdout=PIPE, stderr=PIPE)
+ out, err = cmd_tmp.communicate()
+ out = bytes(out.decode('utf-8', 'ignore').replace(' ', ' ').replace('\t', '').replace('\n', '').replace('XLMMacroDeobfuscator: pywin32 is not installed (only is required if you want to use MS Excel)', ''), encoding="utf-8")
+ failed = False
+ if out.__len__() < 30:
+ logger.error('{} olevba returned <30 chars - rc: {!r}, response: {!r}, error: {!r}'.format(lid,cmd_tmp.returncode,
+ out.decode('utf-8', 'ignore'), err.decode('utf-8', 'ignore')))
+ out = b'[ { "error": "Unhandled error - too short olevba response" } ]'
+ failed = True
+ elif err.__len__() > 10 and cmd_tmp.returncode == 9:
+ logger.error("{} olevba stderr >10 chars - rc: {!r}, response: {!r}".format(lid, cmd_tmp.returncode, err.decode("utf-8", "ignore")))
+ out = b'[ { "error": "Decrypt failed" } ]'
+ failed = True
+ elif err.__len__() > 10 and cmd_tmp.returncode > 9:
+ logger.error('{} olevba stderr >10 chars - rc: {!r}, response: {!r}'.format(lid, cmd_tmp.returncode, err.decode('utf-8', 'ignore')))
+ out = b'[ { "error": "Unhandled oletools error" } ]'
+ failed = True
+ elif cmd_tmp.returncode != 0:
+ logger.error('{} olevba exited with code {!r}; err: {!r}'.format(lid, cmd_tmp.returncode, err.decode('utf-8', 'ignore')))
+ failed = True
+
+ if failed and olefy_del_tmp_failed == 0:
+ logger.debug('{} {} FAILED: not deleting tmp file'.format(lid, tmp_file_name))
+ elif olefy_del_tmp == 1:
+ logger.debug('{} {} deleting tmp file'.format(lid, tmp_file_name))
+ os.remove(tmp_file_name)
+
+ logger.debug('{} response: {}'.format(lid, out.decode('utf-8', 'ignore')))
+ return out + b'\t\n\n\t'
+
+# Asyncio data handling, default AIO-Functions
+class AIO(asyncio.Protocol):
+ def __init__(self):
+ self.extra = bytearray()
+
+ def connection_made(self, transport):
+ global request_time
+ peer = transport.get_extra_info('peername')
+ logger.debug('{} new connection was made'.format(peer))
+ self.transport = transport
+ request_time = str(time.time())
+
+ def data_received(self, request, msgid=1):
+ peer = self.transport.get_extra_info('peername')
+ logger.debug('{} data received from new connection'.format(peer))
+ self.extra.extend(request)
+
+ def eof_received(self):
+ peer = self.transport.get_extra_info('peername')
+ olefy_protocol_err = False
+ proto_ck = self.extra[0:2000].decode('utf-8', 'ignore')
+
+ headers = proto_ck[0:proto_ck.find(olefy_protocol_sep)]
+
+ if olefy_protocol == headers[0:5]:
+ self.extra = bytearray(self.extra[len(headers)+2:len(self.extra)])
+ protocol_split(headers)
+ else:
+ olefy_protocol_err = True
+
+ if olefy_ping == headers[0:4]:
+ is_ping = True
+ else:
+ is_ping = False
+ rspamd_id = olefy_headers['Rspamd-ID'][:6] or ''
+ lid = 'Rspamd-ID' in olefy_headers and '<'+rspamd_id+'>'
+ tmp_file_name = olefy_tmp_dir+'/'+request_time+'.'+str(peer[1])+'.'+rspamd_id
+ logger.debug('{} {} choosen as tmp filename'.format(lid, tmp_file_name))
+
+ if not is_ping or olefy_loglvl == 10:
+ logger.info('{} {} bytes (stream size)'.format(lid, self.extra.__len__()))
+
+ if olefy_ping == headers[0:4]:
+ logger.debug('{} PING request'.format(peer))
+ out = b'PONG'
+ elif olefy_protocol_err == True or olefy_headers['olefy'] != 'OLEFY/1.0':
+ logger.error('{} Protocol ERROR: no OLEFY/1.0 found'.format(lid))
+ out = b'[ { "error": "Protocol error" } ]'
+ elif 'Method' in olefy_headers:
+ if olefy_headers['Method'] == 'oletools':
+ out = oletools(self.extra, tmp_file_name, lid)
+ else:
+ logger.error('Protocol ERROR: Method header not found')
+ out = b'[ { "error": "Protocol error: Method header not found" } ]'
+
+ self.transport.write(out)
+ if not is_ping or olefy_loglvl == 10:
+ logger.info('{} {} response send: {!r}'.format(lid, peer, out))
+ self.transport.close()
+
+
+# start the listeners
+loop = asyncio.get_event_loop()
+# each client connection will create a new protocol instance
+coro = loop.create_server(AIO, olefy_listen_addr, olefy_listen_port)
+server = loop.run_until_complete(coro)
+for sockets in server.sockets:
+ logger.info('serving on {}'.format(sockets.getsockname()))
+
+# XXX serve requests until KeyboardInterrupt, not needed for production
+try:
+ loop.run_forever()
+except KeyboardInterrupt:
+ pass
+
+# graceful shutdown/reload
+server.close()
+loop.run_until_complete(server.wait_closed())
+loop.close()
+logger.info('stopped serving')
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/phpfpm/Dockerfile b/mailcow/src/mailcow-dockerized/data/Dockerfiles/phpfpm/Dockerfile
index ca851e6..93acb33 100644
--- a/mailcow/src/mailcow-dockerized/data/Dockerfiles/phpfpm/Dockerfile
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/phpfpm/Dockerfile
@@ -1,12 +1,12 @@
-FROM php:8.0-fpm-alpine3.14
+FROM php:8.1-fpm-alpine3.17
LABEL maintainer "Andre Peters <andre.peters@servercow.de>"
-ENV APCU_PECL 5.1.20
-ENV IMAGICK_PECL 3.5.1
-# Mailparse is pulled from master branch
-#ENV MAILPARSE_PECL 3.0.2
-ENV MEMCACHED_PECL 3.1.5
-ENV REDIS_PECL 5.3.4
+ENV APCU_PECL 5.1.22
+ENV IMAGICK_PECL 3.7.0
+ENV MAILPARSE_PECL 3.1.4
+ENV MEMCACHED_PECL 3.2.0
+ENV REDIS_PECL 5.3.7
+ENV COMPOSER 2.4.4
RUN apk add -U --no-cache autoconf \
aspell-dev \
@@ -18,6 +18,7 @@
freetype-dev \
g++ \
git \
+ gettext \
gettext-dev \
gmp-dev \
gnupg \
@@ -27,8 +28,11 @@
imagemagick-dev \
imap-dev \
jq \
+ libavif \
+ libavif-dev \
libjpeg-turbo \
libjpeg-turbo-dev \
+ libmemcached \
libmemcached-dev \
libpng \
libpng-dev \
@@ -38,7 +42,9 @@
libtool \
libwebp-dev \
libxml2-dev \
+ libxpm \
libxpm-dev \
+ libzip \
libzip-dev \
make \
mysql-client \
@@ -49,22 +55,24 @@
samba-client \
zlib-dev \
tzdata \
- && git clone https://github.com/php/pecl-mail-mailparse \
- && cd pecl-mail-mailparse \
- && pecl install package.xml \
- && cd .. \
- && rm -r pecl-mail-mailparse \
- && pecl install redis-${REDIS_PECL} memcached-${MEMCACHED_PECL} APCu-${APCU_PECL} imagick-${IMAGICK_PECL} \
+ && pecl install mailparse-${MAILPARSE_PECL} \
+ && pecl install redis-${REDIS_PECL} \
+ && pecl install memcached-${MEMCACHED_PECL} \
+ && pecl install APCu-${APCU_PECL} \
+ && pecl install imagick-${IMAGICK_PECL} \
&& docker-php-ext-enable apcu imagick memcached mailparse redis \
&& pecl clear-cache \
&& docker-php-ext-configure intl \
&& docker-php-ext-configure exif \
&& docker-php-ext-configure gd --with-freetype=/usr/include/ \
--with-jpeg=/usr/include/ \
+ --with-webp \
+ --with-xpm \
+ --with-avif \
&& docker-php-ext-install -j 4 exif gd gettext intl ldap opcache pcntl pdo pdo_mysql pspell soap sockets zip bcmath gmp \
&& docker-php-ext-configure imap --with-imap --with-imap-ssl \
&& docker-php-ext-install -j 4 imap \
- && curl --silent --show-error https://getcomposer.org/installer | php \
+ && curl --silent --show-error https://getcomposer.org/installer | php -- --version=${COMPOSER} \
&& mv composer.phar /usr/local/bin/composer \
&& chmod +x /usr/local/bin/composer \
&& apk del --purge autoconf \
@@ -72,15 +80,21 @@
cyrus-sasl-dev \
freetype-dev \
g++ \
+ gettext-dev \
icu-dev \
imagemagick-dev \
imap-dev \
+ libavif-dev \
libjpeg-turbo-dev \
+ libmemcached-dev \
libpng-dev \
libressl-dev \
libwebp-dev \
libxml2-dev \
+ libxpm-dev \
+ libzip-dev \
make \
+ openldap-dev \
pcre-dev \
zlib-dev
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/Dockerfile b/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/Dockerfile
index 56b274a..e3c6498 100644
--- a/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/Dockerfile
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/Dockerfile
@@ -1,4 +1,4 @@
-FROM debian:buster-slim
+FROM debian:bullseye-slim
LABEL maintainer "Andre Peters <andre.peters@servercow.de>"
ARG DEBIAN_FRONTEND=noninteractive
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/postfix.sh b/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/postfix.sh
index e734a9a..78b070e 100755
--- a/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/postfix.sh
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/postfix.sh
@@ -323,7 +323,19 @@
dbname = ${DBNAME}
# First select queries domain and alias_domain to determine if domains are active.
query = SELECT goto FROM alias
- WHERE address='%s'
+ WHERE id IN (
+ SELECT COALESCE (
+ (
+ SELECT id FROM alias
+ WHERE address='%s'
+ AND (active='1' OR active='2')
+ ), (
+ SELECT id FROM alias
+ WHERE address='@%d'
+ AND (active='1' OR active='2')
+ )
+ )
+ )
AND active='1'
AND (domain IN
(SELECT domain FROM domain
@@ -354,7 +366,7 @@
WHERE alias_domain.alias_domain = '%d'
AND mailbox.username = CONCAT('%u','@',alias_domain.target_domain)
AND (mailbox.active = '1' OR mailbox.active ='2')
- AND alias_domain.active='1'
+ AND alias_domain.active='1';
EOF
# MX based routing
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/syslog-ng-redis_slave.conf b/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/syslog-ng-redis_slave.conf
index 40fb1cd..558305e 100644
--- a/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/syslog-ng-redis_slave.conf
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/syslog-ng-redis_slave.conf
@@ -1,4 +1,4 @@
-@version: 3.19
+@version: 3.28
@include "scl.conf"
options {
chain_hostnames(off);
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/syslog-ng.conf b/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/syslog-ng.conf
index 8fdc104..a1ccd07 100644
--- a/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/syslog-ng.conf
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/postfix/syslog-ng.conf
@@ -1,4 +1,4 @@
-@version: 3.19
+@version: 3.28
@include "scl.conf"
options {
chain_hostnames(off);
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/rspamd/Dockerfile b/mailcow/src/mailcow-dockerized/data/Dockerfiles/rspamd/Dockerfile
index ee08f8d..2520ddc 100644
--- a/mailcow/src/mailcow-dockerized/data/Dockerfiles/rspamd/Dockerfile
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/rspamd/Dockerfile
@@ -21,10 +21,12 @@
&& apt-get clean \
&& mkdir -p /run/rspamd \
&& chown _rspamd:_rspamd /run/rspamd \
- && echo 'alias ll="ls -la --color"' >> ~/.bashrc
+ && echo 'alias ll="ls -la --color"' >> ~/.bashrc \
+ && sed -i 's/#analysis_keyword_table > 0/analysis_cat_table.macro_exist == "M"/g' /usr/share/rspamd/lualib/lua_scanners/oletools.lua
COPY settings.conf /etc/rspamd/settings.conf
COPY metadata_exporter.lua /usr/share/rspamd/plugins/metadata_exporter.lua
+COPY set_worker_password.sh /set_worker_password.sh
COPY docker-entrypoint.sh /docker-entrypoint.sh
ENTRYPOINT ["/docker-entrypoint.sh"]
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/rspamd/set_worker_password.sh b/mailcow/src/mailcow-dockerized/data/Dockerfiles/rspamd/set_worker_password.sh
new file mode 100755
index 0000000..7205e88
--- /dev/null
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/rspamd/set_worker_password.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+password_file='/etc/rspamd/override.d/worker-controller-password.inc'
+password_hash=`/usr/bin/rspamadm pw -e -p $1`
+
+echo 'enable_password = "'$password_hash'";' > $password_file
+
+if grep -q "$password_hash" "$password_file"; then
+ echo "OK"
+else
+ echo "ERROR"
+fi
\ No newline at end of file
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/sogo/Dockerfile b/mailcow/src/mailcow-dockerized/data/Dockerfiles/sogo/Dockerfile
index 72f7d81..f08600a 100644
--- a/mailcow/src/mailcow-dockerized/data/Dockerfiles/sogo/Dockerfile
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/sogo/Dockerfile
@@ -2,7 +2,7 @@
LABEL maintainer "Andre Peters <andre.peters@servercow.de>"
ARG DEBIAN_FRONTEND=noninteractive
-ARG SOGO_DEBIAN_REPOSITORY=http://packages.inverse.ca/SOGo/nightly/5/debian/
+ARG SOGO_DEBIAN_REPOSITORY=http://packages.sogo.nu/nightly/5/debian/
ENV LC_ALL C
ENV GOSU_VERSION 1.14
@@ -30,7 +30,7 @@
&& gosu nobody true \
&& mkdir /usr/share/doc/sogo \
&& touch /usr/share/doc/sogo/empty.sh \
- && apt-key adv --keyserver keyserver.ubuntu.com --recv-key 0x810273C4 \
+ && apt-key adv --keyserver keys.openpgp.org --recv-key 74FFC6D72B925A34B5D356BDF8A27B36A6E2EAE9 \
&& echo "deb ${SOGO_DEBIAN_REPOSITORY} bullseye bullseye" > /etc/apt/sources.list.d/sogo.list \
&& apt-get update && apt-get install -y --no-install-recommends \
sogo \
@@ -52,4 +52,4 @@
ENTRYPOINT ["/docker-entrypoint.sh"]
-CMD exec /usr/bin/supervisord -c /etc/supervisor/supervisord.conf
+CMD exec /usr/bin/supervisord -c /etc/supervisor/supervisord.conf
\ No newline at end of file
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/sogo/bootstrap-sogo.sh b/mailcow/src/mailcow-dockerized/data/Dockerfiles/sogo/bootstrap-sogo.sh
index 04472df..aa15525 100755
--- a/mailcow/src/mailcow-dockerized/data/Dockerfiles/sogo/bootstrap-sogo.sh
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/sogo/bootstrap-sogo.sh
@@ -142,6 +142,10 @@
<string>mysql://${DBUSER}:${DBPASS}@%2Fvar%2Frun%2Fmysqld%2Fmysqld.sock/${DBNAME}/sogo_acl</string>
<key>SOGoIMAPServer</key>
<string>imap://${IPV4_NETWORK}.250:143/?TLS=YES&tlsVerifyMode=none</string>
+ <key>SOGoSieveServer</key>
+ <string>sieve://${IPV4_NETWORK}.250:4190/?TLS=YES&tlsVerifyMode=none</string>
+ <key>SOGoSMTPServer</key>
+ <string>smtp://${IPV4_NETWORK}.253:588/?TLS=YES&tlsVerifyMode=none</string>
<key>SOGoTrustProxyAuthentication</key>
<string>YES</string>
<key>SOGoEncryptionKey</key>
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/sogo/syslog-ng-redis_slave.conf b/mailcow/src/mailcow-dockerized/data/Dockerfiles/sogo/syslog-ng-redis_slave.conf
index 5a84b72..9b460bd 100644
--- a/mailcow/src/mailcow-dockerized/data/Dockerfiles/sogo/syslog-ng-redis_slave.conf
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/sogo/syslog-ng-redis_slave.conf
@@ -1,4 +1,4 @@
-@version: 3.19
+@version: 3.28
@include "scl.conf"
options {
chain_hostnames(off);
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/sogo/syslog-ng.conf b/mailcow/src/mailcow-dockerized/data/Dockerfiles/sogo/syslog-ng.conf
index 537038e..889a3f3 100644
--- a/mailcow/src/mailcow-dockerized/data/Dockerfiles/sogo/syslog-ng.conf
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/sogo/syslog-ng.conf
@@ -1,4 +1,4 @@
-@version: 3.19
+@version: 3.28
@include "scl.conf"
options {
chain_hostnames(off);
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/solr/Dockerfile b/mailcow/src/mailcow-dockerized/data/Dockerfiles/solr/Dockerfile
index 6dfec41..0629925 100644
--- a/mailcow/src/mailcow-dockerized/data/Dockerfiles/solr/Dockerfile
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/solr/Dockerfile
@@ -16,10 +16,15 @@
tzdata \
curl \
bash \
+ zip \
&& apt-get autoclean \
&& rm -rf /var/lib/apt/lists/* \
&& chmod +x /solr.sh \
&& sync \
&& bash /solr.sh --bootstrap
+
+RUN zip -q -d /opt/solr/server/lib/ext/log4j-core-*.jar org/apache/logging/log4j/core/lookup/JndiLookup.class
+
+RUN apt remove zip -y
CMD ["/solr.sh"]
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/unbound/Dockerfile b/mailcow/src/mailcow-dockerized/data/Dockerfiles/unbound/Dockerfile
index a937e7e..d9756d0 100644
--- a/mailcow/src/mailcow-dockerized/data/Dockerfiles/unbound/Dockerfile
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/unbound/Dockerfile
@@ -1,4 +1,4 @@
-FROM alpine:3.14
+FROM alpine:3.17
LABEL maintainer "Andre Peters <andre.peters@servercow.de>"
diff --git a/mailcow/src/mailcow-dockerized/data/Dockerfiles/watchdog/Dockerfile b/mailcow/src/mailcow-dockerized/data/Dockerfiles/watchdog/Dockerfile
index 00cb8e9..654dea0 100644
--- a/mailcow/src/mailcow-dockerized/data/Dockerfiles/watchdog/Dockerfile
+++ b/mailcow/src/mailcow-dockerized/data/Dockerfiles/watchdog/Dockerfile
@@ -1,4 +1,4 @@
-FROM alpine:3.14
+FROM alpine:3.17
LABEL maintainer "André Peters <andre.peters@servercow.de>"
# Installation