first
This commit is contained in:
commit
5aa7d034f7
3292 changed files with 465160 additions and 0 deletions
329
helper-scripts/_cold-standby.sh
Executable file
329
helper-scripts/_cold-standby.sh
Executable file
|
|
@ -0,0 +1,329 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
PATH=${PATH}:/opt/bin
|
||||
DATE=$(date +%Y-%m-%d_%H_%M_%S)
|
||||
LOCAL_ARCH=$(uname -m)
|
||||
export LC_ALL=C
|
||||
|
||||
echo
|
||||
echo "If this script is run automatically by cron or a timer AND you are using block-level snapshots on your backup destination, make sure both do not run at the same time."
|
||||
echo "The snapshots of your backup destination should run AFTER the cold standby script finished to ensure consistent snapshots."
|
||||
echo
|
||||
|
||||
function docker_garbage() {
|
||||
IMGS_TO_DELETE=()
|
||||
|
||||
for container in $(grep -oP "image: \Kmailcow.+" docker-compose.yml); do
|
||||
|
||||
REPOSITORY=${container/:*}
|
||||
TAG=${container/*:}
|
||||
V_MAIN=${container/*.}
|
||||
V_SUB=${container/*.}
|
||||
EXISTING_TAGS=$(docker images | grep ${REPOSITORY} | awk '{ print $2 }')
|
||||
|
||||
for existing_tag in ${EXISTING_TAGS[@]}; do
|
||||
|
||||
V_MAIN_EXISTING=${existing_tag/*.}
|
||||
V_SUB_EXISTING=${existing_tag/*.}
|
||||
|
||||
# Not an integer
|
||||
[[ ! ${V_MAIN_EXISTING} =~ ^[0-9]+$ ]] && continue
|
||||
[[ ! ${V_SUB_EXISTING} =~ ^[0-9]+$ ]] && continue
|
||||
|
||||
if [[ ${V_MAIN_EXISTING} == "latest" ]]; then
|
||||
echo "Found deprecated label \"latest\" for repository ${REPOSITORY}, it should be deleted."
|
||||
IMGS_TO_DELETE+=(${REPOSITORY}:${existing_tag})
|
||||
elif [[ ${V_MAIN_EXISTING} -lt ${V_MAIN} ]]; then
|
||||
echo "Found tag ${existing_tag} for ${REPOSITORY}, which is older than the current tag ${TAG} and should be deleted."
|
||||
IMGS_TO_DELETE+=(${REPOSITORY}:${existing_tag})
|
||||
elif [[ ${V_SUB_EXISTING} -lt ${V_SUB} ]]; then
|
||||
echo "Found tag ${existing_tag} for ${REPOSITORY}, which is older than the current tag ${TAG} and should be deleted."
|
||||
IMGS_TO_DELETE+=(${REPOSITORY}:${existing_tag})
|
||||
fi
|
||||
|
||||
done
|
||||
|
||||
done
|
||||
|
||||
if [[ ! -z ${IMGS_TO_DELETE[*]} ]]; then
|
||||
docker rmi ${IMGS_TO_DELETE[*]}
|
||||
fi
|
||||
}
|
||||
|
||||
function preflight_local_checks() {
|
||||
if [[ -z "${REMOTE_SSH_KEY}" ]]; then
|
||||
>&2 echo -e "\e[31mREMOTE_SSH_KEY is not set\e[0m"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! -s "${REMOTE_SSH_KEY}" ]]; then
|
||||
>&2 echo -e "\e[31mKeyfile ${REMOTE_SSH_KEY} is empty\e[0m"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ $(stat -c "%a" "${REMOTE_SSH_KEY}") -ne 600 ]]; then
|
||||
>&2 echo -e "\e[31mKeyfile ${REMOTE_SSH_KEY} has insecure permissions\e[0m"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! -z "${REMOTE_SSH_PORT}" ]]; then
|
||||
if [[ ${REMOTE_SSH_PORT} != ?(-)+([0-9]) ]] || [[ ${REMOTE_SSH_PORT} -gt 65535 ]]; then
|
||||
>&2 echo -e "\e[31mREMOTE_SSH_PORT is set but not an integer < 65535\e[0m"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -z "${REMOTE_SSH_HOST}" ]]; then
|
||||
>&2 echo -e "\e[31mREMOTE_SSH_HOST cannot be empty\e[0m"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
for bin in rsync docker grep cut; do
|
||||
if [[ -z $(which ${bin}) ]]; then
|
||||
>&2 echo -e "\e[31mCannot find ${bin} in local PATH, exiting...\e[0m"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
if grep --help 2>&1 | head -n 1 | grep -q -i "busybox"; then
|
||||
echo -e "\e[31mBusyBox grep detected on local system, please install GNU grep\e[0m"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function preflight_remote_checks() {
|
||||
|
||||
if ! ssh -o StrictHostKeyChecking=no \
|
||||
-i "${REMOTE_SSH_KEY}" \
|
||||
${REMOTE_SSH_HOST} \
|
||||
-p ${REMOTE_SSH_PORT} \
|
||||
rsync --version > /dev/null ; then
|
||||
>&2 echo -e "\e[31mCould not verify connection to ${REMOTE_SSH_HOST}\e[0m"
|
||||
>&2 echo -e "\e[31mPlease check the output above (is rsync >= 3.1.0 installed on the remote system?)\e[0m"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ssh -o StrictHostKeyChecking=no \
|
||||
-i "${REMOTE_SSH_KEY}" \
|
||||
${REMOTE_SSH_HOST} \
|
||||
-p ${REMOTE_SSH_PORT} \
|
||||
grep --help 2>&1 | head -n 1 | grep -q -i "busybox" ; then
|
||||
>&2 echo -e "\e[31mBusyBox grep detected on remote system ${REMOTE_SSH_HOST}, please install GNU grep\e[0m"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
for bin in rsync docker; do
|
||||
if ! ssh -o StrictHostKeyChecking=no \
|
||||
-i "${REMOTE_SSH_KEY}" \
|
||||
${REMOTE_SSH_HOST} \
|
||||
-p ${REMOTE_SSH_PORT} \
|
||||
which ${bin} > /dev/null ; then
|
||||
>&2 echo -e "\e[31mCannot find ${bin} in remote PATH, exiting...\e[0m"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
ssh -o StrictHostKeyChecking=no \
|
||||
-i "${REMOTE_SSH_KEY}" \
|
||||
${REMOTE_SSH_HOST} \
|
||||
-p ${REMOTE_SSH_PORT} \
|
||||
"bash -s" << "EOF"
|
||||
if docker compose > /dev/null 2>&1; then
|
||||
exit 0
|
||||
elif docker-compose version --short | grep "^2." > /dev/null 2>&1; then
|
||||
exit 1
|
||||
else
|
||||
exit 2
|
||||
fi
|
||||
EOF
|
||||
|
||||
if [ $? = 0 ]; then
|
||||
COMPOSE_COMMAND="docker compose"
|
||||
echo "DEBUG: Using native docker compose on remote"
|
||||
|
||||
elif [ $? = 1 ]; then
|
||||
COMPOSE_COMMAND="docker-compose"
|
||||
echo "DEBUG: Using standalone docker compose on remote"
|
||||
|
||||
else
|
||||
echo -e "\e[31mCannot find any Docker Compose on remote, exiting...\e[0m"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
REMOTE_ARCH=$(ssh -o StrictHostKeyChecking=no -i "${REMOTE_SSH_KEY}" ${REMOTE_SSH_HOST} -p ${REMOTE_SSH_PORT} "uname -m")
|
||||
|
||||
}
|
||||
|
||||
SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
|
||||
source "${SCRIPT_DIR}/../mailcow.conf"
|
||||
COMPOSE_FILE="${SCRIPT_DIR}/../docker-compose.yml"
|
||||
CMPS_PRJ=$(echo ${COMPOSE_PROJECT_NAME} | tr -cd 'A-Za-z-_')
|
||||
SQLIMAGE=$(grep -iEo '(mysql|mariadb)\:.+' "${COMPOSE_FILE}")
|
||||
|
||||
preflight_local_checks
|
||||
preflight_remote_checks
|
||||
|
||||
echo
|
||||
echo -e "\033[1mFound compose project name ${CMPS_PRJ} for ${MAILCOW_HOSTNAME}\033[0m"
|
||||
echo -e "\033[1mFound SQL ${SQLIMAGE}\033[0m"
|
||||
echo
|
||||
|
||||
# Print Message if Local Arch and Remote Arch is not the same
|
||||
if [[ $LOCAL_ARCH != $REMOTE_ARCH ]]; then
|
||||
echo
|
||||
echo -e "\e[1;33m!!!!!!!!!!!!!!!!!!!!!!!!!! CAUTION !!!!!!!!!!!!!!!!!!!!!!!!!!\e[0m"
|
||||
echo -e "\e[3;33mDetected Architecture missmatch from source to destination...\e[0m"
|
||||
echo -e "\e[3;33mYour backup is transferred but some volumes might be skipped!\e[0m"
|
||||
echo -e "\e[1;33m!!!!!!!!!!!!!!!!!!!!!!!!!! CAUTION !!!!!!!!!!!!!!!!!!!!!!!!!!\e[0m"
|
||||
echo
|
||||
sleep 2
|
||||
fi
|
||||
|
||||
# Make sure destination exists, rsync can fail under some circumstances
|
||||
echo -e "\033[1mPreparing remote...\033[0m"
|
||||
if ! ssh -o StrictHostKeyChecking=no \
|
||||
-i "${REMOTE_SSH_KEY}" \
|
||||
${REMOTE_SSH_HOST} \
|
||||
-p ${REMOTE_SSH_PORT} \
|
||||
mkdir -p "${SCRIPT_DIR}/../" ; then
|
||||
>&2 echo -e "\e[31m[ERR]\e[0m - Could not prepare remote for mailcow base directory transfer"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Syncing the mailcow base directory
|
||||
echo -e "\033[1mSynchronizing mailcow base directory...\033[0m"
|
||||
rsync --delete -aH -e "ssh -o StrictHostKeyChecking=no \
|
||||
-i \"${REMOTE_SSH_KEY}\" \
|
||||
-p ${REMOTE_SSH_PORT}" \
|
||||
"${SCRIPT_DIR}/../" root@${REMOTE_SSH_HOST}:"${SCRIPT_DIR}/../"
|
||||
ec=$?
|
||||
if [ ${ec} -ne 0 ] && [ ${ec} -ne 24 ]; then
|
||||
>&2 echo -e "\e[31m[ERR]\e[0m - Could not transfer mailcow base directory to remote"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Trigger a Redis save for a consistent Redis copy
|
||||
echo -ne "\033[1mRunning redis-cli save... \033[0m"
|
||||
docker exec $(docker ps -qf name=redis-mailcow) redis-cli save
|
||||
|
||||
# Syncing volumes related to compose project
|
||||
# Same here: make sure destination exists
|
||||
for vol in $(docker volume ls -qf name="${CMPS_PRJ}"); do
|
||||
|
||||
mountpoint="$(docker inspect ${vol} | grep Mountpoint | cut -d '"' -f4)"
|
||||
|
||||
echo -e "\033[1mCreating remote mountpoint ${mountpoint} for ${vol}...\033[0m"
|
||||
|
||||
ssh -o StrictHostKeyChecking=no \
|
||||
-i "${REMOTE_SSH_KEY}" \
|
||||
${REMOTE_SSH_HOST} \
|
||||
-p ${REMOTE_SSH_PORT} \
|
||||
mkdir -p "${mountpoint}"
|
||||
|
||||
if [[ "${vol}" =~ "mysql-vol-1" ]]; then
|
||||
|
||||
# Make sure a previous backup does not exist
|
||||
rm -rf "${SCRIPT_DIR}/../_tmp_mariabackup/"
|
||||
|
||||
echo -e "\033[1mCreating consistent backup of MariaDB volume...\033[0m"
|
||||
if ! docker run --rm \
|
||||
--network $(docker network ls -qf name=${CMPS_PRJ}_) \
|
||||
-v $(docker volume ls -qf name=${CMPS_PRJ}_mysql-vol-1):/var/lib/mysql/:ro \
|
||||
--entrypoint= \
|
||||
-v "${SCRIPT_DIR}/../_tmp_mariabackup":/backup \
|
||||
${SQLIMAGE} mariabackup --host mysql --user root --password ${DBROOT} --backup --target-dir=/backup 2>/dev/null ; then
|
||||
>&2 echo -e "\e[31m[ERR]\e[0m - Could not create MariaDB backup on source"
|
||||
rm -rf "${SCRIPT_DIR}/../_tmp_mariabackup/"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! docker run --rm \
|
||||
--network $(docker network ls -qf name=${CMPS_PRJ}_) \
|
||||
--entrypoint= \
|
||||
-v "${SCRIPT_DIR}/../_tmp_mariabackup":/backup \
|
||||
${SQLIMAGE} mariabackup --prepare --target-dir=/backup 2> /dev/null ; then
|
||||
>&2 echo -e "\e[31m[ERR]\e[0m - Could not transfer MariaDB backup to remote"
|
||||
rm -rf "${SCRIPT_DIR}/../_tmp_mariabackup/"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
chown -R 999:999 "${SCRIPT_DIR}/../_tmp_mariabackup"
|
||||
|
||||
echo -e "\033[1mSynchronizing MariaDB backup...\033[0m"
|
||||
rsync --delete --info=progress2 -aH -e "ssh -o StrictHostKeyChecking=no \
|
||||
-i \"${REMOTE_SSH_KEY}\" \
|
||||
-p ${REMOTE_SSH_PORT}" \
|
||||
"${SCRIPT_DIR}/../_tmp_mariabackup/" root@${REMOTE_SSH_HOST}:"${mountpoint}"
|
||||
ec=$?
|
||||
if [ ${ec} -ne 0 ] && [ ${ec} -ne 24 ]; then
|
||||
>&2 echo -e "\e[31m[ERR]\e[0m - Could not transfer MariaDB backup to remote"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Cleanup
|
||||
rm -rf "${SCRIPT_DIR}/../_tmp_mariabackup/"
|
||||
|
||||
elif [[ "${vol}" =~ "rspamd-vol-1" ]]; then
|
||||
# Exclude rspamd-vol-1 if the Architectures are not the same on source and destination due to compatibility issues.
|
||||
if [[ $LOCAL_ARCH == $REMOTE_ARCH ]]; then
|
||||
echo -e "\033[1mSynchronizing ${vol} from local ${mountpoint}...\033[0m"
|
||||
rsync --delete --info=progress2 -aH -e "ssh -o StrictHostKeyChecking=no \
|
||||
-i \"${REMOTE_SSH_KEY}\" \
|
||||
-p ${REMOTE_SSH_PORT}" \
|
||||
"${mountpoint}/" root@${REMOTE_SSH_HOST}:"${mountpoint}"
|
||||
else
|
||||
echo -e "\e[1;31mSkipping ${vol} from local maschine due to incompatiblity between different architecture...\e[0m"
|
||||
sleep 2
|
||||
continue
|
||||
fi
|
||||
|
||||
else
|
||||
echo -e "\033[1mSynchronizing ${vol} from local ${mountpoint}...\033[0m"
|
||||
rsync --delete --info=progress2 -aH -e "ssh -o StrictHostKeyChecking=no \
|
||||
-i \"${REMOTE_SSH_KEY}\" \
|
||||
-p ${REMOTE_SSH_PORT}" \
|
||||
"${mountpoint}/" root@${REMOTE_SSH_HOST}:"${mountpoint}"
|
||||
ec=$?
|
||||
if [ ${ec} -ne 0 ] && [ ${ec} -ne 24 ]; then
|
||||
>&2 echo -e "\e[31m[ERR]\e[0m - Could not transfer ${vol} from local ${mountpoint} to remote"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
echo -e "\e[32mCompleted\e[0m"
|
||||
|
||||
done
|
||||
|
||||
# Restart Dockerd on destination
|
||||
echo -ne "\033[1mRestarting Docker daemon on remote to detect new volumes... \033[0m"
|
||||
if ! ssh -o StrictHostKeyChecking=no \
|
||||
-i "${REMOTE_SSH_KEY}" \
|
||||
${REMOTE_SSH_HOST} \
|
||||
-p ${REMOTE_SSH_PORT} \
|
||||
systemctl restart docker ; then
|
||||
>&2 echo -e "\e[31m[ERR]\e[0m - Could not restart Docker daemon on remote"
|
||||
exit 1
|
||||
fi
|
||||
echo "OK"
|
||||
|
||||
echo -e "\e[33mPulling images on remote...\e[0m"
|
||||
echo -e "\e[33mProcess is NOT stuck! Please wait...\e[0m"
|
||||
|
||||
if ! ssh -o StrictHostKeyChecking=no \
|
||||
-i "${REMOTE_SSH_KEY}" \
|
||||
${REMOTE_SSH_HOST} \
|
||||
-p ${REMOTE_SSH_PORT} \
|
||||
${COMPOSE_COMMAND} -f "${SCRIPT_DIR}/../docker-compose.yml" pull --no-parallel --quiet 2>&1 ; then
|
||||
>&2 echo -e "\e[31m[ERR]\e[0m - Could not pull images on remote"
|
||||
fi
|
||||
|
||||
echo -e "\033[1mExecuting update script and forcing garbage cleanup on remote...\033[0m"
|
||||
if ! ssh -o StrictHostKeyChecking=no \
|
||||
-i "${REMOTE_SSH_KEY}" \
|
||||
${REMOTE_SSH_HOST} \
|
||||
-p ${REMOTE_SSH_PORT} \
|
||||
${SCRIPT_DIR}/../update.sh -f --gc ; then
|
||||
>&2 echo -e "\e[31m[ERR]\e[0m - Could not cleanup old images on remote"
|
||||
fi
|
||||
|
||||
echo -e "\e[32mDone\e[0m"
|
||||
63
helper-scripts/add-new-lang-keys.php
Executable file
63
helper-scripts/add-new-lang-keys.php
Executable file
|
|
@ -0,0 +1,63 @@
|
|||
<?php
|
||||
|
||||
function array_diff_key_recursive (array $arr1, array $arr2) {
|
||||
$diff = array_diff_key($arr1, $arr2);
|
||||
$intersect = array_intersect_key($arr1, $arr2);
|
||||
|
||||
foreach ($intersect as $k => $v) {
|
||||
if (is_array($arr1[$k]) && is_array($arr2[$k])) {
|
||||
$d = array_diff_key_recursive($arr1[$k], $arr2[$k]);
|
||||
|
||||
if ($d) {
|
||||
$diff[$k] = $d;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return $diff;
|
||||
}
|
||||
|
||||
// target lang
|
||||
$targetLang = $argv[1];
|
||||
|
||||
if(empty($targetLang)) {
|
||||
die('Please specify target lang as the first argument, to which you want to add missing keys from master lang (EN). Use the lowercase name,
|
||||
for example `sk` for the Slovak language'."\n");
|
||||
}
|
||||
|
||||
// load master lang
|
||||
$masterLang = file_get_contents(__DIR__.'/../data/web/lang/lang.en-gb.json');
|
||||
$masterLang = json_decode($masterLang, true);
|
||||
|
||||
// load target lang
|
||||
$lang = file_get_contents(__DIR__.'/../data/web/lang/lang.'.$targetLang.'.json');
|
||||
$lang = json_decode($lang, true);
|
||||
|
||||
// compare lang keys
|
||||
$result = array_diff_key_recursive($masterLang, $lang);
|
||||
|
||||
if(empty($result)) {
|
||||
die('No new keys were added. Looks like target lang is up to date.'."\n");
|
||||
}
|
||||
|
||||
foreach($result as $key => $val) {
|
||||
// check if section key exists in target lang
|
||||
if(array_key_exists($key, $lang)) {
|
||||
// add only missing section keys
|
||||
foreach ($val as $k => $v) {
|
||||
$lang[$key][$k] = $v;
|
||||
}
|
||||
// sort keys
|
||||
ksort($lang[$key]);
|
||||
} else {
|
||||
// add whole section
|
||||
$lang[$key] = $val;
|
||||
ksort($lang);
|
||||
}
|
||||
}
|
||||
|
||||
$lang = json_encode($lang, JSON_PRETTY_PRINT|JSON_UNESCAPED_UNICODE|JSON_UNESCAPED_SLASHES);
|
||||
file_put_contents(__DIR__.'/../data/web/lang/lang.'.$targetLang.'.json', $lang);
|
||||
|
||||
echo 'Following new lang keys were added and need translation:'."\n";
|
||||
print_r($result);
|
||||
410
helper-scripts/backup_and_restore.sh
Executable file
410
helper-scripts/backup_and_restore.sh
Executable file
|
|
@ -0,0 +1,410 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
DEBIAN_DOCKER_IMAGE="mailcow/backup:latest"
|
||||
|
||||
if [[ ! -z ${MAILCOW_BACKUP_LOCATION} ]]; then
|
||||
BACKUP_LOCATION="${MAILCOW_BACKUP_LOCATION}"
|
||||
fi
|
||||
|
||||
if [[ ! ${1} =~ (backup|restore) ]]; then
|
||||
echo "First parameter needs to be 'backup' or 'restore'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ${1} == "backup" && ! ${2} =~ (crypt|vmail|redis|rspamd|postfix|mysql|all|--delete-days) ]]; then
|
||||
echo "Second parameter needs to be 'vmail', 'crypt', 'redis', 'rspamd', 'postfix', 'mysql', 'all' or '--delete-days'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z ${BACKUP_LOCATION} ]]; then
|
||||
while [[ -z ${BACKUP_LOCATION} ]]; do
|
||||
read -ep "Backup location (absolute path, starting with /): " BACKUP_LOCATION
|
||||
done
|
||||
fi
|
||||
|
||||
if [[ ! ${BACKUP_LOCATION} =~ ^/ ]]; then
|
||||
echo "Backup directory needs to be given as absolute path (starting with /)."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -f ${BACKUP_LOCATION} ]]; then
|
||||
echo "${BACKUP_LOCATION} is a file!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! -d ${BACKUP_LOCATION} ]]; then
|
||||
echo "${BACKUP_LOCATION} is not a directory"
|
||||
read -p "Create it now? [y|N] " CREATE_BACKUP_LOCATION
|
||||
if [[ ! ${CREATE_BACKUP_LOCATION,,} =~ ^(yes|y)$ ]]; then
|
||||
exit 1
|
||||
else
|
||||
mkdir -p ${BACKUP_LOCATION}
|
||||
chmod 755 ${BACKUP_LOCATION}
|
||||
fi
|
||||
else
|
||||
if [[ ${1} == "backup" ]] && [[ -z $(echo $(stat -Lc %a ${BACKUP_LOCATION}) | grep -oE '[0-9][0-9][5-7]') ]]; then
|
||||
echo "${BACKUP_LOCATION} is not write-able for others, that's required for a backup."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
BACKUP_LOCATION=$(echo ${BACKUP_LOCATION} | sed 's#/$##')
|
||||
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
COMPOSE_FILE=${SCRIPT_DIR}/../docker-compose.yml
|
||||
ENV_FILE=${SCRIPT_DIR}/../.env
|
||||
THREADS=$(echo ${THREADS:-1})
|
||||
ARCH=$(uname -m)
|
||||
|
||||
if ! [[ "${THREADS}" =~ ^[1-9][0-9]?$ ]] ; then
|
||||
echo "Thread input is not a number!"
|
||||
exit 1
|
||||
elif [[ "${THREADS}" =~ ^[1-9][0-9]?$ ]] ; then
|
||||
echo "Using ${THREADS} Thread(s) for this run."
|
||||
echo "Notice: You can set the Thread count with the THREADS Variable before you run this script."
|
||||
fi
|
||||
|
||||
if [ ! -f ${COMPOSE_FILE} ]; then
|
||||
echo "Compose file not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -f ${ENV_FILE} ]; then
|
||||
echo "Environment file not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Using ${BACKUP_LOCATION} as backup/restore location."
|
||||
echo
|
||||
|
||||
source ${SCRIPT_DIR}/../mailcow.conf
|
||||
|
||||
if [[ -z ${COMPOSE_PROJECT_NAME} ]]; then
|
||||
echo "Could not determine compose project name"
|
||||
exit 1
|
||||
else
|
||||
echo "Found project name ${COMPOSE_PROJECT_NAME}"
|
||||
CMPS_PRJ=$(echo ${COMPOSE_PROJECT_NAME} | tr -cd "[0-9A-Za-z-_]")
|
||||
fi
|
||||
|
||||
if grep --help 2>&1 | head -n 1 | grep -q -i "busybox"; then
|
||||
>&2 echo -e "\e[31mBusyBox grep detected on local system, please install GNU grep\e[0m"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
function backup() {
|
||||
DATE=$(date +"%Y-%m-%d-%H-%M-%S")
|
||||
mkdir -p "${BACKUP_LOCATION}/mailcow-${DATE}"
|
||||
chmod 755 "${BACKUP_LOCATION}/mailcow-${DATE}"
|
||||
cp "${SCRIPT_DIR}/../mailcow.conf" "${BACKUP_LOCATION}/mailcow-${DATE}"
|
||||
touch "${BACKUP_LOCATION}/mailcow-${DATE}/.$ARCH"
|
||||
for bin in docker; do
|
||||
if [[ -z $(which ${bin}) ]]; then
|
||||
>&2 echo -e "\e[31mCannot find ${bin} in local PATH, exiting...\e[0m"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
while (( "$#" )); do
|
||||
case "$1" in
|
||||
vmail|all)
|
||||
docker run --name mailcow-backup --rm \
|
||||
-v ${BACKUP_LOCATION}/mailcow-${DATE}:/backup:z \
|
||||
-v $(docker volume ls -qf name=^${CMPS_PRJ}_vmail-vol-1$):/vmail:ro,z \
|
||||
${DEBIAN_DOCKER_IMAGE} /bin/tar --warning='no-file-ignored' --use-compress-program="pigz --rsyncable -p ${THREADS}" -Pcvpf /backup/backup_vmail.tar.gz /vmail
|
||||
;;&
|
||||
crypt|all)
|
||||
docker run --name mailcow-backup --rm \
|
||||
-v ${BACKUP_LOCATION}/mailcow-${DATE}:/backup:z \
|
||||
-v $(docker volume ls -qf name=^${CMPS_PRJ}_crypt-vol-1$):/crypt:ro,z \
|
||||
${DEBIAN_DOCKER_IMAGE} /bin/tar --warning='no-file-ignored' --use-compress-program="pigz --rsyncable -p ${THREADS}" -Pcvpf /backup/backup_crypt.tar.gz /crypt
|
||||
;;&
|
||||
redis|all)
|
||||
docker exec $(docker ps -qf name=redis-mailcow) redis-cli save
|
||||
docker run --name mailcow-backup --rm \
|
||||
-v ${BACKUP_LOCATION}/mailcow-${DATE}:/backup:z \
|
||||
-v $(docker volume ls -qf name=^${CMPS_PRJ}_redis-vol-1$):/redis:ro,z \
|
||||
${DEBIAN_DOCKER_IMAGE} /bin/tar --warning='no-file-ignored' --use-compress-program="pigz --rsyncable -p ${THREADS}" -Pcvpf /backup/backup_redis.tar.gz /redis
|
||||
;;&
|
||||
rspamd|all)
|
||||
docker run --name mailcow-backup --rm \
|
||||
-v ${BACKUP_LOCATION}/mailcow-${DATE}:/backup:z \
|
||||
-v $(docker volume ls -qf name=^${CMPS_PRJ}_rspamd-vol-1$):/rspamd:ro,z \
|
||||
${DEBIAN_DOCKER_IMAGE} /bin/tar --warning='no-file-ignored' --use-compress-program="pigz --rsyncable -p ${THREADS}" -Pcvpf /backup/backup_rspamd.tar.gz /rspamd
|
||||
;;&
|
||||
postfix|all)
|
||||
docker run --name mailcow-backup --rm \
|
||||
-v ${BACKUP_LOCATION}/mailcow-${DATE}:/backup:z \
|
||||
-v $(docker volume ls -qf name=^${CMPS_PRJ}_postfix-vol-1$):/postfix:ro,z \
|
||||
${DEBIAN_DOCKER_IMAGE} /bin/tar --warning='no-file-ignored' --use-compress-program="pigz --rsyncable -p ${THREADS}" -Pcvpf /backup/backup_postfix.tar.gz /postfix
|
||||
;;&
|
||||
mysql|all)
|
||||
SQLIMAGE=$(grep -iEo '(mysql|mariadb)\:.+' ${COMPOSE_FILE})
|
||||
if [[ -z "${SQLIMAGE}" ]]; then
|
||||
echo "Could not determine SQL image version, skipping backup..."
|
||||
shift
|
||||
continue
|
||||
else
|
||||
echo "Using SQL image ${SQLIMAGE}, starting..."
|
||||
docker run --name mailcow-backup --rm \
|
||||
--network $(docker network ls -qf name=^${CMPS_PRJ}_mailcow-network$) \
|
||||
-v $(docker volume ls -qf name=^${CMPS_PRJ}_mysql-vol-1$):/var/lib/mysql/:ro,z \
|
||||
-t --entrypoint= \
|
||||
--sysctl net.ipv6.conf.all.disable_ipv6=1 \
|
||||
-v ${BACKUP_LOCATION}/mailcow-${DATE}:/backup:z \
|
||||
${SQLIMAGE} /bin/sh -c "mariabackup --host mysql --user root --password ${DBROOT} --backup --rsync --target-dir=/backup_mariadb ; \
|
||||
mariabackup --prepare --target-dir=/backup_mariadb ; \
|
||||
chown -R 999:999 /backup_mariadb ; \
|
||||
/bin/tar --warning='no-file-ignored' --use-compress-program='gzip --rsyncable' -Pcvpf /backup/backup_mariadb.tar.gz /backup_mariadb ;"
|
||||
fi
|
||||
;;&
|
||||
--delete-days)
|
||||
shift
|
||||
if [[ "${1}" =~ ^[0-9]+$ ]]; then
|
||||
find ${BACKUP_LOCATION}/mailcow-* -maxdepth 0 -mmin +$((${1}*60*24)) -exec rm -rvf {} \;
|
||||
else
|
||||
echo "Parameter of --delete-days is not a number."
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
}
|
||||
|
||||
function restore() {
|
||||
for bin in docker; do
|
||||
if [[ -z $(which ${bin}) ]]; then
|
||||
>&2 echo -e "\e[31mCannot find ${bin} in local PATH, exiting...\e[0m"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "${DOCKER_COMPOSE_VERSION}" == "native" ]; then
|
||||
COMPOSE_COMMAND="docker compose"
|
||||
|
||||
elif [ "${DOCKER_COMPOSE_VERSION}" == "standalone" ]; then
|
||||
COMPOSE_COMMAND="docker-compose"
|
||||
|
||||
else
|
||||
echo -e "\e[31mCan not read DOCKER_COMPOSE_VERSION variable from mailcow.conf! Is your mailcow up to date? Exiting...\e[0m"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo
|
||||
echo "Stopping watchdog-mailcow..."
|
||||
docker stop $(docker ps -qf name=watchdog-mailcow)
|
||||
echo
|
||||
RESTORE_LOCATION="${1}"
|
||||
shift
|
||||
while (( "$#" )); do
|
||||
case "$1" in
|
||||
vmail)
|
||||
docker stop $(docker ps -qf name=dovecot-mailcow)
|
||||
docker run -i --name mailcow-backup --rm \
|
||||
-v ${RESTORE_LOCATION}:/backup:z \
|
||||
-v $(docker volume ls -qf name=^${CMPS_PRJ}_vmail-vol-1$):/vmail:z \
|
||||
${DEBIAN_DOCKER_IMAGE} /bin/tar --use-compress-program="pigz -d -p ${THREADS}" -Pxvf /backup/backup_vmail.tar.gz
|
||||
docker start $(docker ps -aqf name=dovecot-mailcow)
|
||||
echo
|
||||
echo "In most cases it is not required to run a full resync, you can run the command printed below at any time after testing wether the restore process broke a mailbox:"
|
||||
echo
|
||||
echo "docker exec $(docker ps -qf name=dovecot-mailcow) doveadm force-resync -A '*'"
|
||||
echo
|
||||
read -p "Force a resync now? [y|N] " FORCE_RESYNC
|
||||
if [[ ${FORCE_RESYNC,,} =~ ^(yes|y)$ ]]; then
|
||||
docker exec $(docker ps -qf name=dovecot-mailcow) doveadm force-resync -A '*'
|
||||
else
|
||||
echo "OK, skipped."
|
||||
fi
|
||||
;;
|
||||
redis)
|
||||
docker stop $(docker ps -qf name=redis-mailcow)
|
||||
docker run -i --name mailcow-backup --rm \
|
||||
-v ${RESTORE_LOCATION}:/backup:z \
|
||||
-v $(docker volume ls -qf name=^${CMPS_PRJ}_redis-vol-1$):/redis:z \
|
||||
${DEBIAN_DOCKER_IMAGE} /bin/tar --use-compress-program="pigz -d -p ${THREADS}" -Pxvf /backup/backup_redis.tar.gz
|
||||
docker start $(docker ps -aqf name=redis-mailcow)
|
||||
;;
|
||||
crypt)
|
||||
docker stop $(docker ps -qf name=dovecot-mailcow)
|
||||
docker run -i --name mailcow-backup --rm \
|
||||
-v ${RESTORE_LOCATION}:/backup:z \
|
||||
-v $(docker volume ls -qf name=^${CMPS_PRJ}_crypt-vol-1$):/crypt:z \
|
||||
${DEBIAN_DOCKER_IMAGE} /bin/tar --use-compress-program="pigz -d -p ${THREADS}" -Pxvf /backup/backup_crypt.tar.gz
|
||||
docker start $(docker ps -aqf name=dovecot-mailcow)
|
||||
;;
|
||||
rspamd)
|
||||
if [[ $(find "${RESTORE_LOCATION}" \( -name '*x86*' -o -name '*aarch*' \) -exec basename {} \; | sed 's/^\.//' | sed 's/^\.//') == "" ]]; then
|
||||
echo -e "\e[33mCould not find a architecture signature of the loaded backup... Maybe the backup was done before the multiarch update?"
|
||||
sleep 2
|
||||
echo -e "Continuing anyhow. If rspamd is crashing opon boot try remove the rspamd volume with docker volume rm ${CMPS_PRJ}_rspamd-vol-1 after you've stopped the stack.\e[0m"
|
||||
sleep 2
|
||||
docker stop $(docker ps -qf name=rspamd-mailcow)
|
||||
docker run -i --name mailcow-backup --rm \
|
||||
-v ${RESTORE_LOCATION}:/backup:z \
|
||||
-v $(docker volume ls -qf name=^${CMPS_PRJ}_rspamd-vol-1$):/rspamd:z \
|
||||
${DEBIAN_DOCKER_IMAGE} /bin/tar --use-compress-program="pigz -d -p ${THREADS}" -Pxvf /backup/backup_rspamd.tar.gz
|
||||
docker start $(docker ps -aqf name=rspamd-mailcow)
|
||||
elif [[ $ARCH != $(find "${RESTORE_LOCATION}" \( -name '*x86*' -o -name '*aarch*' \) -exec basename {} \; | sed 's/^\.//' | sed 's/^\.//') ]]; then
|
||||
echo -e "\e[31mThe Architecture of the backed up mailcow OS is different then your restoring mailcow OS..."
|
||||
sleep 2
|
||||
echo -e "Skipping rspamd due to compatibility issues!\e[0m"
|
||||
else
|
||||
docker stop $(docker ps -qf name=rspamd-mailcow)
|
||||
docker run -i --name mailcow-backup --rm \
|
||||
-v ${RESTORE_LOCATION}:/backup:z \
|
||||
-v $(docker volume ls -qf name=^${CMPS_PRJ}_rspamd-vol-1$):/rspamd:z \
|
||||
${DEBIAN_DOCKER_IMAGE} /bin/tar --use-compress-program="pigz -d -p ${THREADS}" -Pxvf /backup/backup_rspamd.tar.gz
|
||||
docker start $(docker ps -aqf name=rspamd-mailcow)
|
||||
fi
|
||||
;;
|
||||
postfix)
|
||||
docker stop $(docker ps -qf name=postfix-mailcow)
|
||||
docker run -i --name mailcow-backup --rm \
|
||||
-v ${RESTORE_LOCATION}:/backup:z \
|
||||
-v $(docker volume ls -qf name=^${CMPS_PRJ}_postfix-vol-1$):/postfix:z \
|
||||
${DEBIAN_DOCKER_IMAGE} /bin/tar --use-compress-program="pigz -d -p ${THREADS}" -Pxvf /backup/backup_postfix.tar.gz
|
||||
docker start $(docker ps -aqf name=postfix-mailcow)
|
||||
;;
|
||||
mysql|mariadb)
|
||||
SQLIMAGE=$(grep -iEo '(mysql|mariadb)\:.+' ${COMPOSE_FILE})
|
||||
if [[ -z "${SQLIMAGE}" ]]; then
|
||||
echo "Could not determine SQL image version, skipping restore..."
|
||||
shift
|
||||
continue
|
||||
elif [ ! -f "${RESTORE_LOCATION}/mailcow.conf" ]; then
|
||||
echo "Could not find the corresponding mailcow.conf in ${RESTORE_LOCATION}, skipping restore."
|
||||
echo "If you lost that file, copy the last working mailcow.conf file to ${RESTORE_LOCATION} and restart the restore process."
|
||||
shift
|
||||
continue
|
||||
else
|
||||
read -p "mailcow will be stopped and the currently active mailcow.conf will be modified to use the DB parameters found in ${RESTORE_LOCATION}/mailcow.conf - do you want to proceed? [Y|n] " MYSQL_STOP_MAILCOW
|
||||
if [[ ${MYSQL_STOP_MAILCOW,,} =~ ^(no|n|N)$ ]]; then
|
||||
echo "OK, skipped."
|
||||
shift
|
||||
continue
|
||||
else
|
||||
echo "Stopping mailcow..."
|
||||
${COMPOSE_COMMAND} -f ${COMPOSE_FILE} --env-file ${ENV_FILE} down
|
||||
fi
|
||||
#docker stop $(docker ps -qf name=mysql-mailcow)
|
||||
if [[ -d "${RESTORE_LOCATION}/mysql" ]]; then
|
||||
docker run --name mailcow-backup --rm \
|
||||
-v $(docker volume ls -qf name=^${CMPS_PRJ}_mysql-vol-1$):/var/lib/mysql/:rw,z \
|
||||
--entrypoint= \
|
||||
-v ${RESTORE_LOCATION}/mysql:/backup:z \
|
||||
${SQLIMAGE} /bin/bash -c "shopt -s dotglob ; /bin/rm -rf /var/lib/mysql/* ; rsync -avh --usermap=root:mysql --groupmap=root:mysql /backup/ /var/lib/mysql/"
|
||||
elif [[ -f "${RESTORE_LOCATION}/backup_mysql.gz" ]]; then
|
||||
docker run \
|
||||
-i --name mailcow-backup --rm \
|
||||
-v $(docker volume ls -qf name=^${CMPS_PRJ}_mysql-vol-1$):/var/lib/mysql/:z \
|
||||
--entrypoint= \
|
||||
-u mysql \
|
||||
-v ${RESTORE_LOCATION}:/backup:z \
|
||||
${SQLIMAGE} /bin/sh -c "mysqld --skip-grant-tables & \
|
||||
until mysqladmin ping; do sleep 3; done && \
|
||||
echo Restoring... && \
|
||||
gunzip < backup/backup_mysql.gz | mysql -uroot && \
|
||||
mysql -uroot -e SHUTDOWN;"
|
||||
elif [[ -f "${RESTORE_LOCATION}/backup_mariadb.tar.gz" ]]; then
|
||||
docker run --name mailcow-backup --rm \
|
||||
-v $(docker volume ls -qf name=^${CMPS_PRJ}_mysql-vol-1$):/backup_mariadb/:rw,z \
|
||||
--entrypoint= \
|
||||
-v ${RESTORE_LOCATION}:/backup:z \
|
||||
${SQLIMAGE} /bin/bash -c "shopt -s dotglob ; \
|
||||
/bin/rm -rf /backup_mariadb/* ; \
|
||||
/bin/tar -Pxvzf /backup/backup_mariadb.tar.gz"
|
||||
fi
|
||||
echo "Modifying mailcow.conf..."
|
||||
source ${RESTORE_LOCATION}/mailcow.conf
|
||||
sed -i --follow-symlinks "/DBNAME/c\DBNAME=${DBNAME}" ${SCRIPT_DIR}/../mailcow.conf
|
||||
sed -i --follow-symlinks "/DBUSER/c\DBUSER=${DBUSER}" ${SCRIPT_DIR}/../mailcow.conf
|
||||
sed -i --follow-symlinks "/DBPASS/c\DBPASS=${DBPASS}" ${SCRIPT_DIR}/../mailcow.conf
|
||||
sed -i --follow-symlinks "/DBROOT/c\DBROOT=${DBROOT}" ${SCRIPT_DIR}/../mailcow.conf
|
||||
source ${SCRIPT_DIR}/../mailcow.conf
|
||||
echo "Starting mailcow..."
|
||||
${COMPOSE_COMMAND} -f ${COMPOSE_FILE} --env-file ${ENV_FILE} up -d
|
||||
#docker start $(docker ps -aqf name=mysql-mailcow)
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
echo
|
||||
echo "Starting watchdog-mailcow..."
|
||||
docker start $(docker ps -aqf name=watchdog-mailcow)
|
||||
}
|
||||
|
||||
if [[ ${1} == "backup" ]]; then
|
||||
backup ${@,,}
|
||||
elif [[ ${1} == "restore" ]]; then
|
||||
i=1
|
||||
declare -A FOLDER_SELECTION
|
||||
if [[ $(find ${BACKUP_LOCATION}/mailcow-* -maxdepth 1 -type d 2> /dev/null| wc -l) -lt 1 ]]; then
|
||||
echo "Selected backup location has no subfolders"
|
||||
exit 1
|
||||
fi
|
||||
for folder in $(ls -d ${BACKUP_LOCATION}/mailcow-*/); do
|
||||
echo "[ ${i} ] - ${folder}"
|
||||
FOLDER_SELECTION[${i}]="${folder}"
|
||||
((i++))
|
||||
done
|
||||
echo
|
||||
input_sel=0
|
||||
while [[ ${input_sel} -lt 1 || ${input_sel} -gt ${i} ]]; do
|
||||
read -p "Select a restore point: " input_sel
|
||||
done
|
||||
i=1
|
||||
echo
|
||||
declare -A FILE_SELECTION
|
||||
RESTORE_POINT="${FOLDER_SELECTION[${input_sel}]}"
|
||||
if [[ -z $(find "${FOLDER_SELECTION[${input_sel}]}" -maxdepth 1 \( -type d -o -type f \) -regex ".*\(redis\|rspamd\|mariadb\|mysql\|crypt\|vmail\|postfix\).*") ]]; then
|
||||
echo "No datasets found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "[ 0 ] - all"
|
||||
# find all files in folder with *.gz extension, print their base names, remove backup_, remove .tar (if present), remove .gz
|
||||
FILE_SELECTION[0]=$(find "${FOLDER_SELECTION[${input_sel}]}" -maxdepth 1 \( -type d -o -type f \) \( -name '*.gz' -o -name 'mysql' \) -printf '%f\n' | sed 's/backup_*//' | sed 's/\.[^.]*$//' | sed 's/\.[^.]*$//')
|
||||
for file in $(ls -f "${FOLDER_SELECTION[${input_sel}]}"); do
|
||||
if [[ ${file} =~ vmail ]]; then
|
||||
echo "[ ${i} ] - Mail directory (/var/vmail)"
|
||||
FILE_SELECTION[${i}]="vmail"
|
||||
((i++))
|
||||
elif [[ ${file} =~ crypt ]]; then
|
||||
echo "[ ${i} ] - Crypt data"
|
||||
FILE_SELECTION[${i}]="crypt"
|
||||
((i++))
|
||||
elif [[ ${file} =~ redis ]]; then
|
||||
echo "[ ${i} ] - Redis DB"
|
||||
FILE_SELECTION[${i}]="redis"
|
||||
((i++))
|
||||
elif [[ ${file} =~ rspamd ]]; then
|
||||
if [[ $(find "${FOLDER_SELECTION[${input_sel}]}" \( -name '*x86*' -o -name '*aarch*' \) -exec basename {} \; | sed 's/^\.//' | sed 's/^\.//') == "" ]]; then
|
||||
echo "[ ${i} ] - Rspamd data (unkown Arch detected, restore with caution!)"
|
||||
FILE_SELECTION[${i}]="rspamd"
|
||||
((i++))
|
||||
elif [[ $ARCH != $(find "${FOLDER_SELECTION[${input_sel}]}" \( -name '*x86*' -o -name '*aarch*' \) -exec basename {} \; | sed 's/^\.//' | sed 's/^\.//') ]]; then
|
||||
echo -e "\e[31m[ NaN ] - Rspamd data (incompatible Arch, cannot restore it)\e[0m"
|
||||
else
|
||||
echo "[ ${i} ] - Rspamd data"
|
||||
FILE_SELECTION[${i}]="rspamd"
|
||||
((i++))
|
||||
fi
|
||||
elif [[ ${file} =~ postfix ]]; then
|
||||
echo "[ ${i} ] - Postfix data"
|
||||
FILE_SELECTION[${i}]="postfix"
|
||||
((i++))
|
||||
elif [[ ${file} =~ mysql ]] || [[ ${file} =~ mariadb ]]; then
|
||||
echo "[ ${i} ] - SQL DB"
|
||||
FILE_SELECTION[${i}]="mysql"
|
||||
((i++))
|
||||
fi
|
||||
done
|
||||
echo
|
||||
input_sel=-1
|
||||
while [[ ${input_sel} -lt 0 || ${input_sel} -gt ${i} ]]; do
|
||||
read -p "Select a dataset to restore: " input_sel
|
||||
done
|
||||
echo "Restoring ${FILE_SELECTION[${input_sel}]} from ${RESTORE_POINT}..."
|
||||
restore "${RESTORE_POINT}" ${FILE_SELECTION[${input_sel}]}
|
||||
fi
|
||||
34
helper-scripts/check_translations.rb
Executable file
34
helper-scripts/check_translations.rb
Executable file
|
|
@ -0,0 +1,34 @@
|
|||
#!/usr/bin/env ruby
|
||||
|
||||
MASTER="en-gb"
|
||||
|
||||
DIR = "#{__dir__}/.."
|
||||
|
||||
keys = %x[sed -r 's/.*(\\['.*'\\]\\['.*'\\]).*/\\1/g' #{DIR}/data/web/lang/lang.#{MASTER}.php | grep '^\\\[' | sed 's/\\[/\\\\[/g' | sed 's/\\]/\\\\]/g'|sort | uniq]
|
||||
|
||||
not_used_in_php = []
|
||||
keys.split("\n").each do |key|
|
||||
%x[git grep "#{key}" -- #{DIR}/data/web/*.php #{DIR}/data/web/inc #{DIR}/data/web/modals]
|
||||
if $?.exitstatus > 0
|
||||
not_used_in_php << key
|
||||
end
|
||||
end
|
||||
|
||||
# \['user'\]\['username'\]
|
||||
# \['user'\]\['waiting'\]
|
||||
# \['warning'\]\['spam_alias_temp_error'\]
|
||||
|
||||
not_used = []
|
||||
not_used_in_php.each do |string|
|
||||
section = string.scan(/([a-z]+)/)[0][0]
|
||||
key = string.scan(/([a-z]+)/)[1][0]
|
||||
%x[git grep lang.#{key} -- #{DIR}/data/web/js/#{section}.js #{DIR}/data/web/js/debug.js]
|
||||
if $?.exitstatus > 0
|
||||
not_used << string
|
||||
end
|
||||
end
|
||||
|
||||
puts "# Remove unused translation keys:"
|
||||
not_used.each do |key|
|
||||
puts "sed -i \"/\\$lang#{key}.*;/d\" data/web/lang/lang.??.php"
|
||||
end
|
||||
|
|
@ -0,0 +1,41 @@
|
|||
services:
|
||||
unbound-mailcow:
|
||||
build: ./data/Dockerfiles/unbound
|
||||
|
||||
clamd-mailcow:
|
||||
build: ./data/Dockerfiles/clamd
|
||||
|
||||
rspamd-mailcow:
|
||||
build: ./data/Dockerfiles/rspamd
|
||||
|
||||
php-fpm-mailcow:
|
||||
build: ./data/Dockerfiles/phpfpm
|
||||
|
||||
sogo-mailcow:
|
||||
build:
|
||||
context: ./data/Dockerfiles/sogo
|
||||
dockerfile: Dockerfile
|
||||
|
||||
dovecot-mailcow:
|
||||
build: ./data/Dockerfiles/dovecot
|
||||
|
||||
postfix-mailcow:
|
||||
build: ./data/Dockerfiles/postfix
|
||||
|
||||
acme-mailcow:
|
||||
build: ./data/Dockerfiles/acme
|
||||
|
||||
netfilter-mailcow:
|
||||
build: ./data/Dockerfiles/netfilter
|
||||
|
||||
watchdog-mailcow:
|
||||
build: ./data/Dockerfiles/watchdog
|
||||
|
||||
dockerapi-mailcow:
|
||||
build: ./data/Dockerfiles/dockerapi
|
||||
|
||||
solr-mailcow:
|
||||
build: ./data/Dockerfiles/solr
|
||||
|
||||
olefy-mailcow:
|
||||
build: ./data/Dockerfiles/olefy
|
||||
|
|
@ -0,0 +1,43 @@
|
|||
services:
|
||||
|
||||
clamd-mailcow:
|
||||
dns:
|
||||
- my.resolvers.ip.addr
|
||||
|
||||
rspamd-mailcow:
|
||||
dns:
|
||||
- my.resolvers.ip.addr
|
||||
|
||||
php-fpm-mailcow:
|
||||
dns:
|
||||
- my.resolvers.ip.addr
|
||||
|
||||
sogo-mailcow:
|
||||
dns:
|
||||
- my.resolvers.ip.addr
|
||||
|
||||
dovecot-mailcow:
|
||||
dns:
|
||||
- my.resolvers.ip.addr
|
||||
|
||||
postfix-mailcow:
|
||||
dns:
|
||||
- my.resolvers.ip.addr
|
||||
|
||||
nginx-mailcow:
|
||||
dns:
|
||||
- my.resolvers.ip.addr
|
||||
|
||||
acme-mailcow:
|
||||
dns:
|
||||
- my.resolvers.ip.addr
|
||||
|
||||
watchdog-mailcow:
|
||||
environment:
|
||||
- CHECK_UNBOUND=0
|
||||
dns:
|
||||
- my.resolvers.ip.addr
|
||||
|
||||
dockerapi-mailcow:
|
||||
dns:
|
||||
- my.resolvers.ip.addr
|
||||
|
|
@ -0,0 +1,30 @@
|
|||
services:
|
||||
|
||||
php-fpm-mailcow:
|
||||
volumes:
|
||||
- /var/run/mysqld/mysqld.sock:/var/run/mysqld/mysqld.sock
|
||||
|
||||
sogo-mailcow:
|
||||
volumes:
|
||||
- /var/run/mysqld/mysqld.sock:/var/run/mysqld/mysqld.sock
|
||||
|
||||
dovecot-mailcow:
|
||||
volumes:
|
||||
- /var/run/mysqld/mysqld.sock:/var/run/mysqld/mysqld.sock
|
||||
|
||||
postfix-mailcow:
|
||||
volumes:
|
||||
- /var/run/mysqld/mysqld.sock:/var/run/mysqld/mysqld.sock
|
||||
|
||||
acme-mailcow:
|
||||
volumes:
|
||||
- /var/run/mysqld/mysqld.sock:/var/run/mysqld/mysqld.sock
|
||||
|
||||
watchdog-mailcow:
|
||||
volumes:
|
||||
- /var/run/mysqld/mysqld.sock:/var/run/mysqld/mysqld.sock
|
||||
|
||||
mysql-mailcow:
|
||||
image: alpine:3.20
|
||||
command: /bin/true
|
||||
restart: "no"
|
||||
|
|
@ -0,0 +1,19 @@
|
|||
##
|
||||
## Set haproxy_trusted_networks in Dovecots extra.conf!
|
||||
##
|
||||
|
||||
services:
|
||||
|
||||
dovecot-mailcow:
|
||||
ports:
|
||||
- "${IMAP_PORT_HAPROXY:-127.0.0.1:10143}:10143"
|
||||
- "${IMAPS_PORT_HAPROXY:-127.0.0.1:10993}:10993"
|
||||
- "${POP_PORT_HAPROXY:-127.0.0.1:10110}:10110"
|
||||
- "${POPS_PORT_HAPROXY:-127.0.0.1:10995}:10995"
|
||||
- "${SIEVE_PORT_HAPROXY:-127.0.0.1:14190}:14190"
|
||||
|
||||
postfix-mailcow:
|
||||
ports:
|
||||
- "${SMTP_PORT_HAPROXY:-127.0.0.1:10025}:10025"
|
||||
- "${SMTPS_PORT_HAPROXY:-127.0.0.1:10465}:10465"
|
||||
- "${SUBMISSION_PORT_HAPROXY:-127.0.0.1:10587}:10587"
|
||||
13
helper-scripts/expiry-dates.sh
Executable file
13
helper-scripts/expiry-dates.sh
Executable file
|
|
@ -0,0 +1,13 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
[[ -f mailcow.conf ]] && source mailcow.conf
|
||||
[[ -f ../mailcow.conf ]] && source ../mailcow.conf
|
||||
|
||||
POSTFIX=$(echo | openssl s_client -connect ${MAILCOW_HOSTNAME}:${SMTP_PORT} -starttls smtp 2>/dev/null | openssl x509 -inform pem -noout -enddate | cut -d "=" -f 2)
|
||||
DOVECOT=$(echo | openssl s_client -connect ${MAILCOW_HOSTNAME}:${IMAP_PORT} -starttls imap 2>/dev/null | openssl x509 -inform pem -noout -enddate | cut -d "=" -f 2)
|
||||
NGINX=$(echo | openssl s_client -connect ${MAILCOW_HOSTNAME}:${HTTPS_PORT} 2>/dev/null | openssl x509 -inform pem -noout -enddate | cut -d "=" -f 2)
|
||||
|
||||
echo "TLS expiry dates:"
|
||||
echo "Postfix: ${POSTFIX}"
|
||||
echo "Dovecot: ${DOVECOT}"
|
||||
echo "Nginx: ${NGINX}"
|
||||
122
helper-scripts/generate_caa_record.py
Executable file
122
helper-scripts/generate_caa_record.py
Executable file
|
|
@ -0,0 +1,122 @@
|
|||
#!/usr/bin/env python3
|
||||
# Based on github.com/diafygi/acme-tiny, original copyright:
|
||||
# Copyright Daniel Roesler, under MIT license, see LICENSE at github.com/diafygi/acme-tiny
|
||||
import argparse, subprocess, json, os, sys, base64, binascii, time, hashlib, re, copy, textwrap, logging
|
||||
try:
|
||||
from urllib.request import urlopen, Request # Python 3
|
||||
except ImportError: # pragma: no cover
|
||||
from urllib2 import urlopen, Request # Python 2
|
||||
|
||||
DEFAULT_DIRECTORY_URL = "https://acme-v02.api.letsencrypt.org/directory"
|
||||
|
||||
LOGGER = logging.getLogger(__name__)
|
||||
LOGGER.addHandler(logging.StreamHandler())
|
||||
LOGGER.setLevel(logging.INFO)
|
||||
|
||||
def get_id(account_key, log=LOGGER, directory_url=DEFAULT_DIRECTORY_URL, contact=None):
|
||||
directory, acct_headers, alg, jwk = None, None, None, None # global variables
|
||||
|
||||
# helper functions - base64 encode for jose spec
|
||||
def _b64(b):
|
||||
return base64.urlsafe_b64encode(b).decode('utf8').replace("=", "")
|
||||
|
||||
# helper function - run external commands
|
||||
def _cmd(cmd_list, stdin=None, cmd_input=None, err_msg="Command Line Error"):
|
||||
proc = subprocess.Popen(cmd_list, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
out, err = proc.communicate(cmd_input)
|
||||
if proc.returncode != 0:
|
||||
raise IOError("{0}\n{1}".format(err_msg, err))
|
||||
return out
|
||||
|
||||
# helper function - make request and automatically parse json response
|
||||
def _do_request(url, data=None, err_msg="Error", depth=0):
|
||||
try:
|
||||
resp = urlopen(Request(url, data=data, headers={"Content-Type": "application/jose+json", "User-Agent": "acme-tiny"}))
|
||||
resp_data, code, headers = resp.read().decode("utf8"), resp.getcode(), resp.headers
|
||||
except IOError as e:
|
||||
resp_data = e.read().decode("utf8") if hasattr(e, "read") else str(e)
|
||||
code, headers = getattr(e, "code", None), {}
|
||||
try:
|
||||
resp_data = json.loads(resp_data) # try to parse json results
|
||||
except ValueError:
|
||||
pass # ignore json parsing errors
|
||||
if depth < 100 and code == 400 and resp_data['type'] == "urn:ietf:params:acme:error:badNonce":
|
||||
raise IndexError(resp_data) # allow 100 retrys for bad nonces
|
||||
if code not in [200, 201, 204]:
|
||||
raise ValueError("{0}:\nUrl: {1}\nData: {2}\nResponse Code: {3}\nResponse: {4}".format(err_msg, url, data, code, resp_data))
|
||||
return resp_data, code, headers
|
||||
|
||||
# helper function - make signed requests
|
||||
def _send_signed_request(url, payload, err_msg, depth=0):
|
||||
payload64 = "" if payload is None else _b64(json.dumps(payload).encode('utf8'))
|
||||
new_nonce = _do_request(directory['newNonce'])[2]['Replay-Nonce']
|
||||
protected = {"url": url, "alg": alg, "nonce": new_nonce}
|
||||
protected.update({"jwk": jwk} if acct_headers is None else {"kid": acct_headers['Location']})
|
||||
protected64 = _b64(json.dumps(protected).encode('utf8'))
|
||||
protected_input = "{0}.{1}".format(protected64, payload64).encode('utf8')
|
||||
out = _cmd(["openssl", "dgst", "-sha256", "-sign", account_key], stdin=subprocess.PIPE, cmd_input=protected_input, err_msg="OpenSSL Error")
|
||||
data = json.dumps({"protected": protected64, "payload": payload64, "signature": _b64(out)})
|
||||
try:
|
||||
return _do_request(url, data=data.encode('utf8'), err_msg=err_msg, depth=depth)
|
||||
except IndexError: # retry bad nonces (they raise IndexError)
|
||||
return _send_signed_request(url, payload, err_msg, depth=(depth + 1))
|
||||
|
||||
# helper function - poll until complete
|
||||
def _poll_until_not(url, pending_statuses, err_msg):
|
||||
result, t0 = None, time.time()
|
||||
while result is None or result['status'] in pending_statuses:
|
||||
assert (time.time() - t0 < 3600), "Polling timeout" # 1 hour timeout
|
||||
time.sleep(0 if result is None else 2)
|
||||
result, _, _ = _send_signed_request(url, None, err_msg)
|
||||
return result
|
||||
|
||||
# parse account key to get public key
|
||||
log.info("Parsing account key...")
|
||||
out = _cmd(["openssl", "rsa", "-in", account_key, "-noout", "-text"], err_msg="OpenSSL Error")
|
||||
pub_pattern = r"modulus:[\s]+?00:([a-f0-9\:\s]+?)\npublicExponent: ([0-9]+)"
|
||||
pub_hex, pub_exp = re.search(pub_pattern, out.decode('utf8'), re.MULTILINE|re.DOTALL).groups()
|
||||
pub_exp = "{0:x}".format(int(pub_exp))
|
||||
pub_exp = "0{0}".format(pub_exp) if len(pub_exp) % 2 else pub_exp
|
||||
alg, jwk = "RS256", {
|
||||
"e": _b64(binascii.unhexlify(pub_exp.encode("utf-8"))),
|
||||
"kty": "RSA",
|
||||
"n": _b64(binascii.unhexlify(re.sub(r"(\s|:)", "", pub_hex).encode("utf-8"))),
|
||||
}
|
||||
accountkey_json = json.dumps(jwk, sort_keys=True, separators=(',', ':'))
|
||||
thumbprint = _b64(hashlib.sha256(accountkey_json.encode('utf8')).digest())
|
||||
|
||||
# get the ACME directory of urls
|
||||
log.info("Getting directory...")
|
||||
directory, _, _ = _do_request(directory_url, err_msg="Error getting directory")
|
||||
log.info("Directory found!")
|
||||
|
||||
# create account and get the global key identifier
|
||||
log.info("Registering account...")
|
||||
reg_payload = {"termsOfServiceAgreed": True} if contact is None else {"termsOfServiceAgreed": True, "contact": contact}
|
||||
account, code, acct_headers = _send_signed_request(directory['newAccount'], reg_payload, "Error registering")
|
||||
log.info("Registered!" if code == 201 else "Already registered!")
|
||||
|
||||
return acct_headers['Location']
|
||||
|
||||
def main(argv=None):
|
||||
parser = argparse.ArgumentParser(
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
description=textwrap.dedent("""\
|
||||
Generate a CAA record for Mailcow.
|
||||
|
||||
Example Usage: python mailcow_gencaa.py --account-key data/assets/ssl/acme/account.pem
|
||||
""")
|
||||
)
|
||||
parser.add_argument("--account-key", required=True, help="path to your Let's Encrypt account private key")
|
||||
parser.add_argument("--quiet", action="store_const", const=logging.ERROR, help="suppress output except for errors")
|
||||
parser.add_argument("--directory-url", default=DEFAULT_DIRECTORY_URL, help="certificate authority directory url, default is Let's Encrypt")
|
||||
parser.add_argument("--contact", metavar="CONTACT", default=None, nargs="*", help="Contact details (e.g. mailto:aaa@bbb.com) for your account-key")
|
||||
|
||||
args = parser.parse_args(argv)
|
||||
LOGGER.setLevel(args.quiet or LOGGER.level)
|
||||
id = get_id(args.account_key, log=LOGGER, directory_url=args.directory_url, contact=args.contact)
|
||||
print("Use this as your CAA record:")
|
||||
print('issue 128 "letsencrypt.org;accounturi={}"'.format(id))
|
||||
|
||||
if __name__ == "__main__": # pragma: no cover
|
||||
main(sys.argv[1:])
|
||||
37
helper-scripts/mailcow-reset-admin.sh
Executable file
37
helper-scripts/mailcow-reset-admin.sh
Executable file
|
|
@ -0,0 +1,37 @@
|
|||
#!/usr/bin/env bash
|
||||
[[ -f mailcow.conf ]] && source mailcow.conf
|
||||
[[ -f ../mailcow.conf ]] && source ../mailcow.conf
|
||||
|
||||
if [[ -z ${DBUSER} ]] || [[ -z ${DBPASS} ]] || [[ -z ${DBNAME} ]]; then
|
||||
echo "Cannot find mailcow.conf, make sure this script is run from within the mailcow folder."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -n "Checking MySQL service... "
|
||||
if [[ -z $(docker ps -qf name=mysql-mailcow) ]]; then
|
||||
echo "failed"
|
||||
echo "MySQL (mysql-mailcow) is not up and running, exiting..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "OK"
|
||||
read -r -p "Are you sure you want to reset the mailcow administrator account? [y/N] " response
|
||||
response=${response,,} # tolower
|
||||
if [[ "$response" =~ ^(yes|y)$ ]]; then
|
||||
echo -e "\nWorking, please wait..."
|
||||
random=$(</dev/urandom tr -dc _A-Z-a-z-0-9 2> /dev/null | head -c${1:-16})
|
||||
password=$(docker exec -it $(docker ps -qf name=dovecot-mailcow) doveadm pw -s SSHA256 -p ${random} | tr -d '\r')
|
||||
docker exec -it $(docker ps -qf name=mysql-mailcow) mysql -u${DBUSER} -p${DBPASS} ${DBNAME} -e "DELETE FROM admin WHERE username='admin';"
|
||||
docker exec -it $(docker ps -qf name=mysql-mailcow) mysql -u${DBUSER} -p${DBPASS} ${DBNAME} -e "DELETE FROM domain_admins WHERE username='admin';"
|
||||
docker exec -it $(docker ps -qf name=mysql-mailcow) mysql -u${DBUSER} -p${DBPASS} ${DBNAME} -e "INSERT INTO admin (username, password, superadmin, active) VALUES ('admin', '${password}', 1, 1);"
|
||||
docker exec -it $(docker ps -qf name=mysql-mailcow) mysql -u${DBUSER} -p${DBPASS} ${DBNAME} -e "DELETE FROM tfa WHERE username='admin';"
|
||||
echo "
|
||||
Reset credentials:
|
||||
---
|
||||
Username: admin
|
||||
Password: ${random}
|
||||
TFA: none
|
||||
"
|
||||
else
|
||||
echo "Operation canceled."
|
||||
fi
|
||||
264
helper-scripts/nextcloud.sh
Executable file
264
helper-scripts/nextcloud.sh
Executable file
|
|
@ -0,0 +1,264 @@
|
|||
#!/usr/bin/env bash
|
||||
# renovate: datasource=github-releases depName=nextcloud/server versioning=semver extractVersion=^v(?<version>.*)$
|
||||
NEXTCLOUD_VERSION=28.0.6
|
||||
|
||||
display_warning() {
|
||||
local message=("$@")
|
||||
local max_length=0
|
||||
|
||||
for line in "${message[@]}"; do
|
||||
if (( ${#line} > max_length )); then
|
||||
max_length=${#line}
|
||||
fi
|
||||
done
|
||||
|
||||
local border=$(printf '%*s' "$((max_length + 4))" '' | tr ' ' '#')
|
||||
|
||||
echo -e "\e[31m${border}"
|
||||
for line in "${message[@]}"; do
|
||||
printf "\e[31m# %-*s #\n" "$max_length" "$line"
|
||||
done
|
||||
echo -e "\e[31m${border}"
|
||||
echo -e "\e[0m"
|
||||
}
|
||||
|
||||
display_warning "WARNING: This Script is deprecated and will be removed in December 2024!" \
|
||||
"mailcow will drop this installation/maintenance script within December 2024..." \
|
||||
"To ensure you can still use your Nextcloud Datas, please migrate to a standalone" \
|
||||
"Nextcloud instance either on a new Host or this host." \
|
||||
"You can either use Nextcloud in Docker or install it manually." \
|
||||
" "\
|
||||
"mailcow will NOT DELETE any Nextcloud Data, even when this script was removed!!"
|
||||
|
||||
echo -e "Waiting 5 seconds before continuing..."
|
||||
|
||||
|
||||
sleep 5
|
||||
|
||||
echo -ne "Checking prerequisites..."
|
||||
sleep 1
|
||||
for bin in curl dirmngr tar bzip2; do
|
||||
if [[ -z $(which ${bin}) ]]; then echo -ne "\r\033[31mCannot find ${bin}, exiting...\033[0m\n"; exit 1; fi
|
||||
done
|
||||
echo -ne "\r\033[32mFound all prerequisites! Continuing...\033[0m\n"
|
||||
|
||||
[[ -z ${1} ]] && NC_HELP=y
|
||||
|
||||
while [ "$1" != '' ]; do
|
||||
if [[ $# -ne 1 ]]; then
|
||||
echo -e "\033[31mPlease use only one parameter at the same time!\033[0m" >&2
|
||||
exit 2
|
||||
fi
|
||||
case "${1}" in
|
||||
-p|--purge) NC_PURGE=y && shift;;
|
||||
-i|--install) NC_INSTALL=y && shift;;
|
||||
-u|--update) NC_UPDATE=y && shift;;
|
||||
-r|--resetpw) NC_RESETPW=y && shift;;
|
||||
-h|--help) NC_HELP=y && shift;;
|
||||
*) echo "Unknown parameter: ${1}" && shift;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ ${NC_HELP} == "y" ]]; then
|
||||
printf 'Usage:\n\n'
|
||||
printf ' -p|--purge\n Purge Nextcloud\n'
|
||||
printf ' -i|--install\n Install Nextcloud\n'
|
||||
printf ' -u|--update\n Update Nextcloud\n'
|
||||
printf ' -r|--resetpw\n Reset password\n\n'
|
||||
exit 0
|
||||
fi
|
||||
|
||||
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
cd ${SCRIPT_DIR}/../
|
||||
source mailcow.conf
|
||||
|
||||
if [[ ${NC_PURGE} == "y" ]]; then
|
||||
read -r -p "Are you sure you want to purge Nextcloud? [y/N] " response
|
||||
response=${response,,}
|
||||
if [[ ! "$response" =~ ^(yes|y)$ ]]; then
|
||||
echo "OK, aborting."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "\033[33mDetecting Database information...\033[0m"
|
||||
if [[ $(docker exec -it $(docker ps -f name=mysql-mailcow -q) mysql -uroot -p${DBROOT} -e "Show databases" | grep "nextcloud") ]]; then
|
||||
echo -e "\033[32mFound seperate Nextcloud database (newer scheme)!\033[0m"
|
||||
echo -e "\033[31mPurging...\033[0m"
|
||||
docker exec -it $(docker ps -f name=mysql-mailcow -q) mysql -uroot -p${DBROOT} -e "DROP DATABASE nextcloud;" > /dev/null
|
||||
docker exec -it $(docker ps -f name=mysql-mailcow -q) mysql -uroot -p${DBROOT} -e "DROP USER 'nextcloud'@'%';" > /dev/null
|
||||
elif [[ $(docker exec -it $(docker ps -f name=mysql-mailcow -q) mysql -uroot -p${DBROOT} mailcow -e "SHOW TABLES LIKE 'oc_%'") && $? -eq 0 ]]; then
|
||||
echo -e "\033[32mFound Nextcloud (oc) tables inside of mailcow database (old scheme)!\033[0m"
|
||||
echo -e "\033[31mPurging...\033[0m"
|
||||
docker exec -it $(docker ps -f name=mysql-mailcow -q) mysql -uroot -p${DBROOT} -e \
|
||||
"$(docker exec -it $(docker ps -f name=mysql-mailcow -q) mysql -uroot -p${DBROOT} -e "SELECT IFNULL(GROUP_CONCAT('DROP TABLE ', TABLE_SCHEMA, '.', TABLE_NAME SEPARATOR ';'),'SELECT NULL;') FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME LIKE 'oc_%' AND TABLE_SCHEMA = '${DBNAME}';" -BN)" > /dev/null
|
||||
elif [[ $(docker exec -it $(docker ps -f name=mysql-mailcow -q) mysql -uroot -p${DBROOT} mailcow -e "SHOW TABLES LIKE 'nc_%'") && $? -eq 0 ]]; then
|
||||
echo -e "\033[32mFound Nextcloud (nc) tables inside of mailcow database (old scheme)!\033[0m"
|
||||
echo -e "\033[31mPurging...\033[0m"
|
||||
docker exec -it $(docker ps -f name=mysql-mailcow -q) mysql -uroot -p${DBROOT} -e \
|
||||
"$(docker exec -it $(docker ps -f name=mysql-mailcow -q) mysql -uroot -p${DBROOT} -e "SELECT IFNULL(GROUP_CONCAT('DROP TABLE ', TABLE_SCHEMA, '.', TABLE_NAME SEPARATOR ';'),'SELECT NULL;') FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME LIKE 'nc_%' AND TABLE_SCHEMA = '${DBNAME}';" -BN)" > /dev/null
|
||||
else
|
||||
echo -e "\033[31mError: No Nextcloud databases/tables found!"
|
||||
echo -e "\033[33mNot purging anything...\033[0m"
|
||||
exit 1
|
||||
fi
|
||||
docker exec -it $(docker ps -f name=redis-mailcow -q) /bin/sh -c ' cat <<EOF | redis-cli
|
||||
SELECT 10
|
||||
FLUSHDB
|
||||
EOF
|
||||
'
|
||||
if [ -d ./data/web/nextcloud/config ]; then
|
||||
mv ./data/web/nextcloud/config/ ./data/conf/nextcloud-config-folder-$(date +%s).bak
|
||||
fi
|
||||
[[ -d ./data/web/nextcloud ]] && rm -rf ./data/web/nextcloud
|
||||
|
||||
[[ -f ./data/conf/nginx/site.nextcloud.custom ]] && mv ./data/conf/nginx/site.nextcloud.custom ./data/conf/nginx/site.nextcloud.custom-$(date +%s).bak
|
||||
[[ -f ./data/conf/nginx/nextcloud.conf ]] && mv ./data/conf/nginx/nextcloud.conf ./data/conf/nginx/nextcloud.conf-$(date +%s).bak
|
||||
|
||||
docker restart $(docker ps -aqf name=nginx-mailcow)
|
||||
|
||||
echo -e "\033[32mNextcloud has been uninstalled sucessfully!\033[0m"
|
||||
|
||||
elif [[ ${NC_UPDATE} == "y" ]]; then
|
||||
read -r -p "Are you sure you want to update Nextcloud (with Nextclouds own updater)? [y/N] " response
|
||||
response=${response,,}
|
||||
if [[ ! "$response" =~ ^(yes|y)$ ]]; then
|
||||
echo "OK, aborting."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -f data/web/nextcloud/occ ]; then
|
||||
echo -e "\033[31mError: Nextcloud occ not found. Is Nextcloud installed?\033[0m"
|
||||
exit 1
|
||||
fi
|
||||
if grep -Pq 'This version of Nextcloud is not compatible with (?:PHP)?(?>=?)(?:PHP)?(?>.+)' <<<$(docker exec -it -u www-data $(docker ps -f name=php-fpm-mailcow -q) bash -c "/web/nextcloud/occ --no-warnings status"); then
|
||||
echo -e "\033[31mError: This version of Nextcloud is not compatible with the current PHP version of php-fpm-mailcow, we'll fix it\033[0m"
|
||||
wget -q https://raw.githubusercontent.com/nextcloud/server/v26.0.0/lib/versioncheck.php -O ./data/web/nextcloud/lib/versioncheck.php
|
||||
echo -e "\e[33mPlease restart the update again.\e[0m"
|
||||
elif ! grep -q 'installed: true' <<<$(docker exec -it -u www-data $(docker ps -f name=php-fpm-mailcow -q) bash -c "/web/nextcloud/occ --no-warnings status"); then
|
||||
echo -e "\033[31mError: Nextcloud seems not to be installed.\033[0m"
|
||||
exit 1
|
||||
else
|
||||
docker exec -it -u www-data $(docker ps -f name=php-fpm-mailcow -q) bash -c "php /web/nextcloud/updater/updater.phar"
|
||||
NC_SUBD=$(docker exec -i -u www-data $(docker ps -f name=php-fpm-mailcow -q) /web/nextcloud/occ config:system:get overwritehost)
|
||||
mv ./data/conf/nginx/nextcloud.conf ./data/conf/nginx/nextcloud.conf-$(date +%s).bak
|
||||
cp ./data/assets/nextcloud/nextcloud.conf ./data/conf/nginx/
|
||||
sed -i "s/NC_SUBD/${NC_SUBD}/g" ./data/conf/nginx/nextcloud.conf
|
||||
fi
|
||||
|
||||
elif [[ ${NC_INSTALL} == "y" ]]; then
|
||||
NC_SUBD=
|
||||
while [[ -z ${NC_SUBD} ]]; do
|
||||
read -p "Subdomain to run Nextcloud from [format: nextcloud.domain.tld]: " NC_SUBD
|
||||
done
|
||||
if ! ping -q -c2 ${NC_SUBD} > /dev/null 2>&1 ; then
|
||||
read -p "Cannot ping subdomain, continue anyway? [y|N] " NC_CONT_FAIL
|
||||
[[ ! ${NC_CONT_FAIL,,} =~ ^(yes|y)$ ]] && { echo "Ok, exiting..."; exit 1; }
|
||||
fi
|
||||
|
||||
echo -e "\033[33mDownloading \033[34mNextcloud ${NEXTCLOUD_VERSION}\033[33m...\033[0m"
|
||||
curl -L# -o nextcloud.tar.bz2 "https://download.nextcloud.com/server/releases/nextcloud-$NEXTCLOUD_VERSION.tar.bz2" || { echo "Failed to download Nextcloud archive."; exit 1; } \
|
||||
&& tar -xjf nextcloud.tar.bz2 -C ./data/web/ \
|
||||
&& rm nextcloud.tar.bz2 \
|
||||
&& mkdir -p ./data/web/nextcloud/data \
|
||||
&& chmod +x ./data/web/nextcloud/occ
|
||||
|
||||
echo -e "\033[33mCreating 'nextcloud' database...\033[0m"
|
||||
NC_DBPASS=$(</dev/urandom tr -dc A-Za-z0-9 2> /dev/null | head -c 28)
|
||||
NC_DBUSER=nextcloud
|
||||
NC_DBNAME=nextcloud
|
||||
|
||||
echo -ne "[1/3] Creating 'nextcloud' database"
|
||||
docker exec -it $(docker ps -f name=mysql-mailcow -q) mysql -uroot -p${DBROOT} -e "CREATE DATABASE ${NC_DBNAME};"
|
||||
sleep 2
|
||||
echo -ne "\r[2/3] Creating 'nextcloud' database user"
|
||||
docker exec -it $(docker ps -f name=mysql-mailcow -q) mysql -uroot -p${DBROOT} -e "CREATE USER '${NC_DBUSER}'@'%' IDENTIFIED BY '${NC_DBPASS}';"
|
||||
sleep 2
|
||||
echo -ne "\r[3/3] Granting 'nextcloud' user all permissions on database 'nextcloud'"
|
||||
docker exec -it $(docker ps -f name=mysql-mailcow -q) mysql -uroot -p${DBROOT} -e "GRANT ALL PRIVILEGES ON ${NC_DBNAME}.* TO '${NC_DBUSER}'@'%';"
|
||||
sleep 2
|
||||
|
||||
echo ""
|
||||
echo -e "\033[33mInstalling Nextcloud...\033[0m"
|
||||
ADMIN_NC_PASS=$(</dev/urandom tr -dc A-Za-z0-9 2> /dev/null | head -c 28)
|
||||
|
||||
echo -ne "[1/4] Setting correct permissions for www-data"
|
||||
docker exec -it $(docker ps -f name=php-fpm-mailcow -q) /bin/bash -c "chown -R www-data:www-data /web/nextcloud"
|
||||
sleep 2
|
||||
echo -ne "\r[2/4] Running occ maintenance:install to install Nextcloud"
|
||||
docker exec -it -u www-data $(docker ps -f name=php-fpm-mailcow -q) /web/nextcloud/occ --no-warnings maintenance:install \
|
||||
--database mysql \
|
||||
--database-host mysql \
|
||||
--database-name ${NC_DBNAME} \
|
||||
--database-user ${NC_DBUSER} \
|
||||
--database-pass ${NC_DBPASS} \
|
||||
--admin-user admin \
|
||||
--admin-pass ${ADMIN_NC_PASS} \
|
||||
--data-dir /web/nextcloud/data > /dev/null 2>&1
|
||||
|
||||
echo -ne "\r[3/4] Setting custom parameters inside the Nextcloud config file"
|
||||
echo ""
|
||||
docker exec -it -u www-data $(docker ps -f name=php-fpm-mailcow -q) bash -c "/web/nextcloud/occ --no-warnings config:system:set redis host --value=redis --type=string; \
|
||||
/web/nextcloud/occ --no-warnings config:system:set redis port --value=6379 --type=integer; \
|
||||
/web/nextcloud/occ --no-warnings config:system:set redis timeout --value=0.0 --type=integer; \
|
||||
/web/nextcloud/occ --no-warnings config:system:set redis dbindex --value=10 --type=integer; \
|
||||
/web/nextcloud/occ --no-warnings config:system:set memcache.locking --value='\OC\Memcache\Redis' --type=string; \
|
||||
/web/nextcloud/occ --no-warnings config:system:set memcache.local --value='\OC\Memcache\Redis' --type=string; \
|
||||
/web/nextcloud/occ --no-warnings config:system:set trusted_domains 1 --value=${NC_SUBD}; \
|
||||
/web/nextcloud/occ --no-warnings config:system:set trusted_proxies 0 --value=${IPV6_NETWORK}; \
|
||||
/web/nextcloud/occ --no-warnings config:system:set trusted_proxies 1 --value=${IPV4_NETWORK}.0/24; \
|
||||
/web/nextcloud/occ --no-warnings config:system:set overwritehost --value=${NC_SUBD}; \
|
||||
/web/nextcloud/occ --no-warnings config:system:set overwriteprotocol --value=https; \
|
||||
/web/nextcloud/occ --no-warnings config:system:set overwritewebroot --value=/; \
|
||||
/web/nextcloud/occ --no-warnings config:system:set mail_smtpmode --value=smtp; \
|
||||
/web/nextcloud/occ --no-warnings config:system:set mail_smtpauthtype --value=LOGIN; \
|
||||
/web/nextcloud/occ --no-warnings config:system:set mail_from_address --value=nextcloud; \
|
||||
/web/nextcloud/occ --no-warnings config:system:set mail_domain --value=${MAILCOW_HOSTNAME}; \
|
||||
/web/nextcloud/occ --no-warnings config:system:set mail_smtphost --value=postfix; \
|
||||
/web/nextcloud/occ --no-warnings config:system:set mail_smtpport --value=588; \
|
||||
/web/nextcloud/occ --no-warnings config:system:set mail_smtpstreamoptions ssl verify_peer --value=false --type=boolean
|
||||
/web/nextcloud/occ --no-warnings config:system:set mail_smtpstreamoptions ssl verify_peer_name --value=false --type=boolean
|
||||
/web/nextcloud/occ --no-warnings db:convert-filecache-bigint -n"
|
||||
|
||||
# Not installing by default, broke too often
|
||||
#/web/nextcloud/occ --no-warnings app:install user_external; \
|
||||
#/web/nextcloud/occ --no-warnings config:system:set user_backends 0 arguments 0 --value={dovecot:143/imap/tls/novalidate-cert}; \
|
||||
#/web/nextcloud/occ --no-warnings config:system:set user_backends 0 class --value=OC_User_IMAP; \
|
||||
|
||||
echo -e "\r[4/4] Enabling Nginx Configuration"
|
||||
cp ./data/assets/nextcloud/nextcloud.conf ./data/conf/nginx/
|
||||
sed -i "s/NC_SUBD/${NC_SUBD}/g" ./data/conf/nginx/nextcloud.conf
|
||||
sleep 2
|
||||
|
||||
echo ""
|
||||
echo -e "\033[33mFinalizing installation...\033[0m"
|
||||
docker restart $(docker ps -aqf name=nginx-mailcow)
|
||||
|
||||
echo ""
|
||||
echo "******************************************"
|
||||
echo "* SAVE THESE CREDENTIALS *"
|
||||
echo "* INSTALL DATE: $(date +%Y-%m-%d_%H-%M-%S) *"
|
||||
echo "******************************************"
|
||||
echo ""
|
||||
echo -e "\033[36mDatabase name: ${NC_DBNAME}\033[0m"
|
||||
echo -e "\033[36mDatabase user: ${NC_DBUSER}\033[0m"
|
||||
echo -e "\033[36mDatabase password: ${NC_DBPASS}\033[0m"
|
||||
echo ""
|
||||
echo -e "\033[31mUI admin password: ${ADMIN_NC_PASS}\033[0m"
|
||||
echo ""
|
||||
|
||||
|
||||
elif [[ ${NC_RESETPW} == "y" ]]; then
|
||||
printf 'You are about to set a new password for a Nextcloud user.\n\nDo not use this option if your Nextcloud is configured to use mailcow for authentication.\nSet a new password for the corresponding mailbox in mailcow, instead.\n\n'
|
||||
read -r -p "Continue? [y/N] " response
|
||||
response=${response,,}
|
||||
if [[ ! "$response" =~ ^(yes|y)$ ]]; then
|
||||
echo "OK, aborting."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
NC_USER=
|
||||
while [[ -z ${NC_USER} ]]; do
|
||||
read -p "Enter the username: " NC_USER
|
||||
done
|
||||
docker exec -it -u www-data $(docker ps -f name=php-fpm-mailcow -q) /web/nextcloud/occ user:resetpassword ${NC_USER}
|
||||
fi
|
||||
30
helper-scripts/reset-learns.sh
Executable file
30
helper-scripts/reset-learns.sh
Executable file
|
|
@ -0,0 +1,30 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
read -r -p "Are you sure you want to reset learned hashes from Rspamd (fuzzy, bayes, neural)? [y/N] " response
|
||||
response=${response,,} # tolower
|
||||
if [[ "$response" =~ ^(yes|y)$ ]]; then
|
||||
echo "Working, please wait..."
|
||||
REDIS_ID=$(docker ps -qf name=redis-mailcow)
|
||||
RSPAMD_ID=$(docker ps -qf name=rspamd-mailcow)
|
||||
|
||||
if [ -z ${REDIS_ID} ] || [ -z ${RSPAMD_ID} ]; then
|
||||
echo "Cannot determine Redis or Rspamd container ID"
|
||||
exit 1
|
||||
else
|
||||
echo "Stopping Rspamd container"
|
||||
docker stop ${RSPAMD_ID}
|
||||
echo "LUA will return nil when it succeeds or print a warning/error when it fails."
|
||||
echo "Deleting all RS* keys - if any"
|
||||
docker exec -it ${REDIS_ID} redis-cli EVAL "for _,k in ipairs(redis.call('keys', ARGV[1])) do redis.call('del', k) end" 0 'RS*'
|
||||
echo "Deleting all BAYES* keys - if any"
|
||||
docker exec -it ${REDIS_ID} redis-cli EVAL "for _,k in ipairs(redis.call('keys', ARGV[1])) do redis.call('del', k) end" 0 'BAYES*'
|
||||
echo "Deleting all learned* keys - if any"
|
||||
docker exec -it ${REDIS_ID} redis-cli EVAL "for _,k in ipairs(redis.call('keys', ARGV[1])) do redis.call('del', k) end" 0 'learned*'
|
||||
echo "Deleting all fuzzy* keys - if any"
|
||||
docker exec -it ${REDIS_ID} redis-cli EVAL "for _,k in ipairs(redis.call('keys', ARGV[1])) do redis.call('del', k) end" 0 'fuzzy*'
|
||||
echo "Deleting all tRFANN* keys - if any"
|
||||
docker exec -it ${REDIS_ID} redis-cli EVAL "for _,k in ipairs(redis.call('keys', ARGV[1])) do redis.call('del', k) end" 0 'tRFANN*'
|
||||
echo "Starting Rspamd container"
|
||||
docker start ${RSPAMD_ID}
|
||||
fi
|
||||
fi
|
||||
72
helper-scripts/update_compose.sh
Executable file
72
helper-scripts/update_compose.sh
Executable file
|
|
@ -0,0 +1,72 @@
|
|||
#!/bin/bash
|
||||
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
|
||||
source ${SCRIPT_DIR}/../mailcow.conf
|
||||
|
||||
if [ "${DOCKER_COMPOSE_VERSION}" == "standalone" ]; then
|
||||
LATEST_COMPOSE=$(curl -Ls -w %{url_effective} -o /dev/null https://github.com/docker/compose/releases/latest) # redirect to latest release
|
||||
LATEST_COMPOSE=${LATEST_COMPOSE##*/v} #get the latest version from the redirect, excluding the "v" prefix
|
||||
COMPOSE_VERSION=$(docker-compose version --short)
|
||||
if [[ "$LATEST_COMPOSE" != "$COMPOSE_VERSION" ]]; then
|
||||
echo -e "\e[33mA new docker-compose Version is available: $LATEST_COMPOSE\e[0m"
|
||||
echo -e "\e[33mYour Version is: $COMPOSE_VERSION\e[0m"
|
||||
else
|
||||
echo -e "\e[32mYour docker-compose Version is up to date! Not updating it...\e[0m"
|
||||
exit 0
|
||||
fi
|
||||
read -r -p "Do you want to update your docker-compose Version? It will automatic upgrade your docker-compose installation (recommended)? [y/N] " updatecomposeresponse
|
||||
if [[ ! "${updatecomposeresponse}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
echo "OK, not updating docker-compose."
|
||||
exit 0
|
||||
fi
|
||||
echo -e "\e[32mFetching new docker-compose (standalone) version...\e[0m"
|
||||
echo -e "\e[32mTrying to determine GLIBC version...\e[0m"
|
||||
if ldd --version > /dev/null; then
|
||||
GLIBC_V=$(ldd --version | grep -E '(GLIBC|GNU libc)' | rev | cut -d ' ' -f1 | rev | cut -d '.' -f2)
|
||||
if [ ! -z "${GLIBC_V}" ] && [ ${GLIBC_V} -gt 27 ]; then
|
||||
DC_DL_SUFFIX=
|
||||
else
|
||||
DC_DL_SUFFIX=legacy
|
||||
fi
|
||||
else
|
||||
DC_DL_SUFFIX=legacy
|
||||
fi
|
||||
sleep 1
|
||||
if [[ $(command -v pip 2>&1) && $(pip list --local 2>&1 | grep -v DEPRECATION | grep -c docker-compose) == 1 || $(command -v pip3 2>&1) && $(pip3 list --local 2>&1 | grep -v DEPRECATION | grep -c docker-compose) == 1 ]]; then
|
||||
echo -e "\e[33mFound a docker-compose Version installed with pip!\e[0m"
|
||||
echo -e "\e[31mPlease uninstall the pip Version of docker-compose since it doesn't support Versions higher than 1.29.2.\e[0m"
|
||||
sleep 2
|
||||
echo -e "\e[33mExiting...\e[0m"
|
||||
exit 1
|
||||
#prevent breaking a working docker-compose installed with pip
|
||||
elif [[ $(curl -sL -w "%{http_code}" https://github.com/docker/compose/releases/latest -o /dev/null) == "200" ]]; then
|
||||
LATEST_COMPOSE=$(curl -Ls -w %{url_effective} -o /dev/null https://github.com/docker/compose/releases/latest) # redirect to latest release
|
||||
LATEST_COMPOSE=${LATEST_COMPOSE##*/} #get the latest version from the redirect, inlcuding the "v" prefix
|
||||
COMPOSE_VERSION=$(docker-compose version --short)
|
||||
if [[ "$LATEST_COMPOSE" != "$COMPOSE_VERSION" ]]; then
|
||||
COMPOSE_PATH=$(command -v docker-compose)
|
||||
if [[ -w ${COMPOSE_PATH} ]]; then
|
||||
curl -#L https://github.com/docker/compose/releases/download/${LATEST_COMPOSE}/docker-compose-$(uname -s)-$(uname -m) > $COMPOSE_PATH
|
||||
chmod +x $COMPOSE_PATH
|
||||
echo -e "\e[32mYour Docker Compose (standalone) has been updated to: $LATEST_COMPOSE\e[0m"
|
||||
exit 0
|
||||
else
|
||||
echo -e "\e[33mWARNING: $COMPOSE_PATH is not writable, but new version $LATEST_COMPOSE is available (installed: $COMPOSE_VERSION)\e[0m"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
else
|
||||
echo -e "\e[33mCannot determine latest docker-compose version, skipping...\e[0m"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
elif [ "${DOCKER_COMPOSE_VERSION}" == "native" ]; then
|
||||
echo -e "\e[31mYou are using the native Docker Compose Plugin. This Script is for the standalone Docker Compose Version only.\e[0m"
|
||||
sleep 2
|
||||
echo -e "\e[33mNotice: You'll have to update this Compose Version via your Package Manager manually!\e[0m"
|
||||
exit 1
|
||||
|
||||
else
|
||||
echo -e "\e[31mCan not read DOCKER_COMPOSE_VERSION variable from mailcow.conf! Is your mailcow up to date? Exiting...\e[0m"
|
||||
exit 1
|
||||
fi
|
||||
41
helper-scripts/update_postscreen_whitelist.sh
Executable file
41
helper-scripts/update_postscreen_whitelist.sh
Executable file
|
|
@ -0,0 +1,41 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
SCRIPT_DIR="$( cd "$( dirname "$0" )" && pwd )"
|
||||
WORKING_DIR=${SCRIPT_DIR}/postwhite_tmp
|
||||
SPFTOOLS_DIR=${WORKING_DIR}/spf-tools
|
||||
POSTWHITE_DIR=${WORKING_DIR}/postwhite
|
||||
POSTWHITE_CONF=${POSTWHITE_DIR}/postwhite.conf
|
||||
|
||||
CUSTOM_HOSTS='"web.de gmx.net mail.de freenet.de arcor.de unity-mail.de"'
|
||||
STATIC_HOSTS=(
|
||||
"194.25.134.0/24 permit # t-online.de"
|
||||
)
|
||||
|
||||
mkdir ${SCRIPT_DIR}/postwhite_tmp
|
||||
git clone https://github.com/spf-tools/spf-tools.git ${SPFTOOLS_DIR}
|
||||
git clone https://github.com/stevejenkins/postwhite.git ${POSTWHITE_DIR}
|
||||
|
||||
function set_config() {
|
||||
sudo sed -i "s@^\($1\s*=\s*\).*\$@\1$2@" ${POSTWHITE_CONF}
|
||||
}
|
||||
|
||||
set_config custom_hosts "${CUSTOM_HOSTS}"
|
||||
set_config reload_postfix no
|
||||
set_config postfixpath /.
|
||||
set_config spftoolspath ${WORKING_DIR}/spf-tools
|
||||
set_config whitelist .${SCRIPT_DIR}/../data/conf/postfix/postscreen_access.cidr
|
||||
set_config yahoo_static_hosts ${POSTWHITE_DIR}/yahoo_static_hosts.txt
|
||||
|
||||
#Fix URL for Yahoo!: https://github.com/stevejenkins/postwhite/issues/59
|
||||
sudo sed -i \
|
||||
-e 's#yahoo_url="https://help.yahoo.com/kb/SLN23997.html"#yahoo_url="https://senders.yahooinc.com/outbound-mail-servers/"#' \
|
||||
-e 's#echo "ipv6:$line";#echo "ipv6:$line" | grep -v "ipv6:::";#' \
|
||||
-e 's#`command -v wget`#`command -v skip-wget`#' \
|
||||
${POSTWHITE_DIR}/scrape_yahoo
|
||||
|
||||
cd ${POSTWHITE_DIR}
|
||||
./postwhite ${POSTWHITE_CONF}
|
||||
|
||||
( IFS=$'\n'; echo "${STATIC_HOSTS[*]}" >> "${SCRIPT_DIR}/../data/conf/postfix/postscreen_access.cidr")
|
||||
|
||||
rm -r ${WORKING_DIR}
|
||||
Loading…
Add table
Add a link
Reference in a new issue