Closed andrasbacsai closed 2 weeks ago
I stumbled on this issue on my first use of Coolify. I implemented a temporary backup solution until this is implemented within Coolify. Sharing my workaround below in case it's useful for anyone else.
Any use is at your own risk.
# Dockerfile for backup service
FROM postgres:16.3
RUN apt-get update && \
apt-get install -y cron awscli curl jq && \
rm -rf /var/lib/apt/lists/*
# Copy the crontab file to the cron.d directory
COPY crontab /etc/cron.d/backup-cron
# Copy the backup script to the root directory in the container
COPY backup.sh /backup.sh
# Give execution rights on the cron job and backup script
RUN chmod 0644 /etc/cron.d/backup-cron && chmod +x /backup.sh
# Apply the cron job
RUN crontab /etc/cron.d/backup-cron
# Create the log file to be able to run tail
RUN touch /var/log/cron.log
# On container startup, write the environment variables to /etc/environment to
# expose them to cron, start cron, and tail the cron log
CMD printenv | grep -v "no_proxy" >> /etc/environment && cron && tail -f /var/log/cron.log
#!/bin/bash
# Fail on first error
set -e
# Define webhook URLs
SUCCESS_WEBHOOK_URL=$DISCORD_BACKUP_SUCCESS_WEBHOOK_URL
FAILURE_WEBHOOK_URL=$DISCORD_BACKUP_FAILURE_WEBHOOK_URL
# Function to send a message to Discord
send_discord_message() {
local webhook_url=$1
local message=$2
# Properly escape the message content
local payload=$(jq -n --arg content "$message" '{content: $content}')
curl -H "Content-Type: application/json" -X POST -d "$payload" $webhook_url
}
# Function to handle errors
error_handler() {
local error_message="$BACKUP_IDENTIFIER: Backup failed at $(date +\%Y-\%m-\%d_\%H:\%M:\%S)"
echo $error_message
send_discord_message $FAILURE_WEBHOOK_URL "$error_message"
exit 1
}
# Trap any error and call the error_handler
trap 'error_handler' ERR
# Create the backup
backup_file="/backup/$(date +\%Y-\%m-\%d-\%H-\%M-\%S).dump"
PGPASSWORD=$POSTGRES_PASSWORD pg_dump -h db -U $POSTGRES_USER -Fc $POSTGRES_DB > $backup_file
echo "Backup done at $(date +\%Y-\%m-\%d_\%H:\%M:\%S)"
# Upload the backup to S3
s3_path="s3://$S3_BUCKET_NAME/backup/$BACKUP_IDENTIFIER/$(basename $backup_file)"
aws s3 cp $backup_file $s3_path --endpoint-url $S3_ENDPOINT_URL
echo "Backup uploaded to S3 at $(date +\%Y-\%m-\%d_\%H:\%M:\%S)"
# Construct the URL of the uploaded file
file_url="$S3_ENDPOINT_URL/$S3_BUCKET_NAME/backup/$BACKUP_IDENTIFIER/$(basename $backup_file)"
# Remove old backups locally
ls -1 /backup/*.dump | head -n -2 | xargs rm -f
# Send success message to Discord
success_message="$BACKUP_IDENTIFIER: Backup succeeded at $(date +\%Y-\%m-\%d_\%H:\%M:\%S).\nFile URL: $file_url"
echo -e $success_message
send_discord_message $SUCCESS_WEBHOOK_URL "$success_message"
# Crontab file for backup service
# Run the backup script at midnight every day
0 0 * * * root /backup.sh >> /var/log/cron.log 2>&1
version: "3.8"
services:
app-database:
container_name: app-database
image: postgres:16.3
restart: always
volumes:
- database-data:/var/lib/postgresql/data/
environment:
- POSTGRES_USER=${POSTGRES_USER}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
- POSTGRES_DB=${POSTGRES_DB}
healthcheck:
test:
- CMD-SHELL
- "pg_isready -U $${POSTGRES_USER} -d $${POSTGRES_DB}"
interval: 5s
timeout: 20s
retries: 10
app-backup:
container_name: app-backup
restart: always
build:
context: ./backup
dockerfile: Dockerfile
depends_on:
app-database:
condition: service_healthy
volumes:
- ./backup:/backup
links:
- app-database:db
environment:
POSTGRES_USER: ${POSTGRES_USER}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
POSTGRES_DB: ${POSTGRES_DB}
AWS_ACCESS_KEY_ID: ${AWS_ACCESS_KEY_ID}
AWS_SECRET_ACCESS_KEY: ${AWS_SECRET_ACCESS_KEY}
S3_BUCKET_NAME: ${S3_BUCKET_NAME}
S3_ENDPOINT_URL: ${S3_ENDPOINT_URL}
DISCORD_BACKUP_SUCCESS_WEBHOOK_URL: ${DISCORD_BACKUP_SUCCESS_WEBHOOK_URL}
DISCORD_BACKUP_FAILURE_WEBHOOK_URL: ${DISCORD_BACKUP_FAILURE_WEBHOOK_URL}
BACKUP_IDENTIFIER: ${BACKUP_IDENTIFIER} # some identifier to segment backups in S3
volumes:
database-data:
backup:
It should be detected and make a
backup
view to setup scheduled backups.