mirror of
https://github.com/mail-in-a-box/mailinabox.git
synced 2024-12-22 07:17:05 +00:00
show the status of backups in the control panel
This commit is contained in:
parent
4ec6692f21
commit
3853e8dd93
@ -9,111 +9,168 @@
|
||||
# backup/secret_key.txt) to STORAGE_ROOT/backup/encrypted.
|
||||
# 5) STORAGE_ROOT/backup/after-backup is executd if it exists.
|
||||
|
||||
import sys, os, os.path, shutil, glob
|
||||
import os, os.path, shutil, glob, re, datetime
|
||||
import dateutil.parser, dateutil.relativedelta, dateutil.tz
|
||||
|
||||
from utils import exclusive_process, load_environment, shell
|
||||
|
||||
# settings
|
||||
full_backup = "--full" in sys.argv
|
||||
keep_backups_for = "31D" # destroy backups older than 31 days except the most recent full backup
|
||||
|
||||
env = load_environment()
|
||||
def backup_status(env):
|
||||
# What is the current status of backups?
|
||||
# Loop through all of the files in STORAGE_ROOT/backup/duplicity to
|
||||
# get a list of all of the backups taken and sum up file sizes to
|
||||
# see how large the storage is.
|
||||
|
||||
exclusive_process("backup")
|
||||
now = datetime.datetime.now(dateutil.tz.tzlocal())
|
||||
def reldate(date):
|
||||
rd = dateutil.relativedelta.relativedelta(now, date)
|
||||
if rd.days >= 7: return "%d days" % rd.days
|
||||
if rd.days > 1: return "%d days, %d hours" % (rd.days, rd.hours)
|
||||
if rd.days == 1: return "%d day, %d hours" % (rd.days, rd.hours)
|
||||
return "%d hours, %d minutes" % (rd.hours, rd.minutes)
|
||||
|
||||
# Ensure the backup directory exists.
|
||||
backup_dir = os.path.join(env["STORAGE_ROOT"], 'backup')
|
||||
backup_duplicity_dir = os.path.join(backup_dir, 'duplicity')
|
||||
os.makedirs(backup_duplicity_dir, exist_ok=True)
|
||||
backups = { }
|
||||
basedir = os.path.join(env['STORAGE_ROOT'], 'backup/duplicity/')
|
||||
encdir = os.path.join(env['STORAGE_ROOT'], 'backup/encrypted/')
|
||||
for fn in os.listdir(basedir):
|
||||
m = re.match(r"duplicity-(full|full-signatures|(inc|new-signatures)\.(?P<incbase>\d+T\d+Z)\.to)\.(?P<date>\d+T\d+Z)\.", fn)
|
||||
if not m: raise ValueError(fn)
|
||||
|
||||
# On the first run, always do a full backup. Incremental
|
||||
# will fail.
|
||||
if len(os.listdir(backup_duplicity_dir)) == 0:
|
||||
full_backup = True
|
||||
else:
|
||||
# When the size of incremental backups exceeds the size of existing
|
||||
# full backups, take a new full backup. We want to avoid full backups
|
||||
# because they are costly to synchronize off-site.
|
||||
full_sz = sum(os.path.getsize(f) for f in glob.glob(backup_duplicity_dir + '/*-full.*'))
|
||||
inc_sz = sum(os.path.getsize(f) for f in glob.glob(backup_duplicity_dir + '/*-inc.*'))
|
||||
# (n.b. not counting size of new-signatures files because they are relatively small)
|
||||
if inc_sz > full_sz * 1.5:
|
||||
key = m.group("date")
|
||||
if key not in backups:
|
||||
date = dateutil.parser.parse(m.group("date"))
|
||||
backups[key] = {
|
||||
"date": m.group("date"),
|
||||
"date_str": date.strftime("%x %X"),
|
||||
"date_delta": reldate(date),
|
||||
"full": m.group("incbase") is None,
|
||||
"previous": m.group("incbase") is None,
|
||||
"size": 0,
|
||||
"encsize": 0,
|
||||
}
|
||||
|
||||
backups[key]["size"] += os.path.getsize(os.path.join(basedir, fn))
|
||||
|
||||
# Also check encrypted size.
|
||||
encfn = os.path.join(encdir, fn + ".enc")
|
||||
if os.path.exists(encfn):
|
||||
backups[key]["encsize"] += os.path.getsize(encfn)
|
||||
|
||||
backups = sorted(backups.values(), key = lambda b : b["date"], reverse=True)
|
||||
|
||||
return {
|
||||
"directory": basedir,
|
||||
"encpwfile": os.path.join(env['STORAGE_ROOT'], 'backup/secret_key.txt'),
|
||||
"encdirectory": encdir,
|
||||
"tz": now.tzname(),
|
||||
"backups": backups,
|
||||
}
|
||||
|
||||
def perform_backup(full_backup):
|
||||
env = load_environment()
|
||||
|
||||
exclusive_process("backup")
|
||||
|
||||
# Ensure the backup directory exists.
|
||||
backup_dir = os.path.join(env["STORAGE_ROOT"], 'backup')
|
||||
backup_duplicity_dir = os.path.join(backup_dir, 'duplicity')
|
||||
os.makedirs(backup_duplicity_dir, exist_ok=True)
|
||||
|
||||
# On the first run, always do a full backup. Incremental
|
||||
# will fail.
|
||||
if len(os.listdir(backup_duplicity_dir)) == 0:
|
||||
full_backup = True
|
||||
else:
|
||||
# When the size of incremental backups exceeds the size of existing
|
||||
# full backups, take a new full backup. We want to avoid full backups
|
||||
# because they are costly to synchronize off-site.
|
||||
full_sz = sum(os.path.getsize(f) for f in glob.glob(backup_duplicity_dir + '/*-full.*'))
|
||||
inc_sz = sum(os.path.getsize(f) for f in glob.glob(backup_duplicity_dir + '/*-inc.*'))
|
||||
# (n.b. not counting size of new-signatures files because they are relatively small)
|
||||
if inc_sz > full_sz * 1.5:
|
||||
full_backup = True
|
||||
|
||||
# Stop services.
|
||||
shell('check_call', ["/usr/sbin/service", "dovecot", "stop"])
|
||||
shell('check_call', ["/usr/sbin/service", "postfix", "stop"])
|
||||
# Stop services.
|
||||
shell('check_call', ["/usr/sbin/service", "dovecot", "stop"])
|
||||
shell('check_call', ["/usr/sbin/service", "postfix", "stop"])
|
||||
|
||||
# Update the backup mirror directory which mirrors the current
|
||||
# STORAGE_ROOT (but excluding the backups themselves!).
|
||||
try:
|
||||
# Update the backup mirror directory which mirrors the current
|
||||
# STORAGE_ROOT (but excluding the backups themselves!).
|
||||
try:
|
||||
shell('check_call', [
|
||||
"/usr/bin/duplicity",
|
||||
"full" if full_backup else "incr",
|
||||
"--no-encryption",
|
||||
"--archive-dir", "/tmp/duplicity-archive-dir",
|
||||
"--name", "mailinabox",
|
||||
"--exclude", backup_dir,
|
||||
"--volsize", "100",
|
||||
"--verbosity", "warning",
|
||||
env["STORAGE_ROOT"],
|
||||
"file://" + backup_duplicity_dir
|
||||
])
|
||||
finally:
|
||||
# Start services again.
|
||||
shell('check_call', ["/usr/sbin/service", "dovecot", "start"])
|
||||
shell('check_call', ["/usr/sbin/service", "postfix", "start"])
|
||||
|
||||
# Remove old backups. This deletes all backup data no longer needed
|
||||
# from more than 31 days ago. Must do this before destroying the
|
||||
# cache directory or else this command will re-create it.
|
||||
shell('check_call', [
|
||||
"/usr/bin/duplicity",
|
||||
"full" if full_backup else "incr",
|
||||
"--no-encryption",
|
||||
"remove-older-than",
|
||||
keep_backups_for,
|
||||
"--archive-dir", "/tmp/duplicity-archive-dir",
|
||||
"--name", "mailinabox",
|
||||
"--exclude", backup_dir,
|
||||
"--volsize", "100",
|
||||
"--force",
|
||||
"--verbosity", "warning",
|
||||
env["STORAGE_ROOT"],
|
||||
"file://" + backup_duplicity_dir
|
||||
])
|
||||
finally:
|
||||
# Start services again.
|
||||
shell('check_call', ["/usr/sbin/service", "dovecot", "start"])
|
||||
shell('check_call', ["/usr/sbin/service", "postfix", "start"])
|
||||
|
||||
# Remove old backups. This deletes all backup data no longer needed
|
||||
# from more than 31 days ago. Must do this before destroying the
|
||||
# cache directory or else this command will re-create it.
|
||||
shell('check_call', [
|
||||
"/usr/bin/duplicity",
|
||||
"remove-older-than",
|
||||
keep_backups_for,
|
||||
"--archive-dir", "/tmp/duplicity-archive-dir",
|
||||
"--name", "mailinabox",
|
||||
"--force",
|
||||
"--verbosity", "warning",
|
||||
"file://" + backup_duplicity_dir
|
||||
])
|
||||
# Remove duplicity's cache directory because it's redundant with our backup directory.
|
||||
shutil.rmtree("/tmp/duplicity-archive-dir")
|
||||
|
||||
# Remove duplicity's cache directory because it's redundant with our backup directory.
|
||||
shutil.rmtree("/tmp/duplicity-archive-dir")
|
||||
# Encrypt all of the new files.
|
||||
backup_encrypted_dir = os.path.join(backup_dir, 'encrypted')
|
||||
os.makedirs(backup_encrypted_dir, exist_ok=True)
|
||||
for fn in os.listdir(backup_duplicity_dir):
|
||||
fn2 = os.path.join(backup_encrypted_dir, fn) + ".enc"
|
||||
if os.path.exists(fn2): continue
|
||||
|
||||
# Encrypt all of the new files.
|
||||
backup_encrypted_dir = os.path.join(backup_dir, 'encrypted')
|
||||
os.makedirs(backup_encrypted_dir, exist_ok=True)
|
||||
for fn in os.listdir(backup_duplicity_dir):
|
||||
fn2 = os.path.join(backup_encrypted_dir, fn) + ".enc"
|
||||
if os.path.exists(fn2): continue
|
||||
# Encrypt the backup using the backup private key.
|
||||
shell('check_call', [
|
||||
"/usr/bin/openssl",
|
||||
"enc",
|
||||
"-aes-256-cbc",
|
||||
"-a",
|
||||
"-salt",
|
||||
"-in", os.path.join(backup_duplicity_dir, fn),
|
||||
"-out", fn2,
|
||||
"-pass", "file:%s" % os.path.join(backup_dir, "secret_key.txt"),
|
||||
])
|
||||
|
||||
# Encrypt the backup using the backup private key.
|
||||
shell('check_call', [
|
||||
"/usr/bin/openssl",
|
||||
"enc",
|
||||
"-aes-256-cbc",
|
||||
"-a",
|
||||
"-salt",
|
||||
"-in", os.path.join(backup_duplicity_dir, fn),
|
||||
"-out", fn2,
|
||||
"-pass", "file:%s" % os.path.join(backup_dir, "secret_key.txt"),
|
||||
])
|
||||
# The backup can be decrypted with:
|
||||
# openssl enc -d -aes-256-cbc -a -in latest.tgz.enc -out /dev/stdout -pass file:secret_key.txt | tar -z
|
||||
|
||||
# The backup can be decrypted with:
|
||||
# openssl enc -d -aes-256-cbc -a -in latest.tgz.enc -out /dev/stdout -pass file:secret_key.txt | tar -z
|
||||
# Remove encrypted backups that are no longer needed.
|
||||
for fn in os.listdir(backup_encrypted_dir):
|
||||
fn2 = os.path.join(backup_duplicity_dir, fn.replace(".enc", ""))
|
||||
if os.path.exists(fn2): continue
|
||||
os.unlink(os.path.join(backup_encrypted_dir, fn))
|
||||
|
||||
# Remove encrypted backups that are no longer needed.
|
||||
for fn in os.listdir(backup_encrypted_dir):
|
||||
fn2 = os.path.join(backup_duplicity_dir, fn.replace(".enc", ""))
|
||||
if os.path.exists(fn2): continue
|
||||
os.unlink(os.path.join(backup_encrypted_dir, fn))
|
||||
# Execute a post-backup script that does the copying to a remote server.
|
||||
# Run as the STORAGE_USER user, not as root. Pass our settings in
|
||||
# environment variables so the script has access to STORAGE_ROOT.
|
||||
post_script = os.path.join(backup_dir, 'after-backup')
|
||||
if os.path.exists(post_script):
|
||||
shell('check_call',
|
||||
['su', env['STORAGE_USER'], '-c', post_script],
|
||||
env=env)
|
||||
|
||||
# Execute a post-backup script that does the copying to a remote server.
|
||||
# Run as the STORAGE_USER user, not as root. Pass our settings in
|
||||
# environment variables so the script has access to STORAGE_ROOT.
|
||||
post_script = os.path.join(backup_dir, 'after-backup')
|
||||
if os.path.exists(post_script):
|
||||
shell('check_call',
|
||||
['su', env['STORAGE_USER'], '-c', post_script],
|
||||
env=env)
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
full_backup = "--full" in sys.argv
|
||||
perform_backup(full_backup)
|
||||
|
@ -250,6 +250,12 @@ def do_updates():
|
||||
"DEBIAN_FRONTEND": "noninteractive"
|
||||
})
|
||||
|
||||
@app.route('/system/backup/status')
|
||||
@authorized_personnel_only
|
||||
def backup_status():
|
||||
from backup import backup_status
|
||||
return json_response(backup_status(env))
|
||||
|
||||
# APP
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
@ -85,7 +85,10 @@
|
||||
<a href="#" class="dropdown-toggle" data-toggle="dropdown">System <b class="caret"></b></a>
|
||||
<ul class="dropdown-menu">
|
||||
<li><a href="#system_status" onclick="return show_panel(this);">Status Checks</a></li>
|
||||
<li><a href="#system_dns" onclick="return show_panel(this);">DNS (Advanced)</a></li>
|
||||
<li><a href="#system_backup" onclick="return show_panel(this);">Backup Status</a></li>
|
||||
<li class="divider"></li>
|
||||
<li class="dropdown-header">Super Advanced Options</li>
|
||||
<li><a href="#system_dns" onclick="return show_panel(this);">DNS (Custom/External)</a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="dropdown active">
|
||||
@ -115,6 +118,10 @@
|
||||
{% include "system-status.html" %}
|
||||
</div>
|
||||
|
||||
<div id="panel_system_backup" class="container panel">
|
||||
{% include "system-backup.html" %}
|
||||
</div>
|
||||
|
||||
<div id="panel_system_dns" class="container panel">
|
||||
{% include "system-dns.html" %}
|
||||
</div>
|
||||
|
70
management/templates/system-backup.html
Normal file
70
management/templates/system-backup.html
Normal file
@ -0,0 +1,70 @@
|
||||
<style>
|
||||
#backup-status tr.full-backup td { font-weight: bold; }
|
||||
</style>
|
||||
|
||||
<h2>Backup Status</h2>
|
||||
|
||||
<p>The box makes an incremental backup each night. The backup is stored on the machine itself. You are responsible for copying the backup files off of the machine. Many cloud providers make this easy by allowing you to take snapshots of the machine's disk.</p>
|
||||
|
||||
<h3>Copying Backup Files</h3>
|
||||
|
||||
<p>Use SFTP (FTP over SSH) to copy files from <tt id="backup-location"></tt>. These files are encrpyted, so they are safe to store anywhere. Copy the encryption password from <tt id="backup-encpassword-file"></tt> also but keep it in a safe location.</p>
|
||||
|
||||
<h3>Current Backups</h3>
|
||||
|
||||
<p>The backup directory currently contains the backups listed below. The total size on disk of the backups is <span id="backup-total-size"></span>.</p>
|
||||
|
||||
<table id="backup-status" class="table" style="width: auto">
|
||||
<thead>
|
||||
<th colspan="2">When</th>
|
||||
<th>Type</th>
|
||||
<th>Size</th>
|
||||
</thead>
|
||||
<tbody>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
<p><small>The size column in the table indicates the size of the encrpyted backup, but the total size on disk shown above includes storage for unencrpyted intermediate files.</small></p>
|
||||
|
||||
<script>
|
||||
function nice_size(bytes) {
|
||||
var powers = ['bytes', 'KB', 'MB', 'GB', 'TB'];
|
||||
while (true) {
|
||||
if (powers.length == 1) break;
|
||||
if (bytes < 1000) break;
|
||||
bytes /= 1024;
|
||||
powers.shift();
|
||||
}
|
||||
return (Math.round(bytes*10)/10) + " " + powers[0];
|
||||
}
|
||||
|
||||
function show_system_backup() {
|
||||
$('#backup-status tbody').html("<tr><td colspan='2' class='text-muted'>Loading...</td></tr>")
|
||||
api(
|
||||
"/system/backup/status",
|
||||
"GET",
|
||||
{ },
|
||||
function(r) {
|
||||
$('#backup-location').text(r.encdirectory);
|
||||
$('#backup-encpassword-file').text(r.encpwfile);
|
||||
|
||||
$('#backup-status tbody').html("");
|
||||
var total_disk_size = 0;
|
||||
for (var i = 0; i < r.backups.length; i++) {
|
||||
var b = r.backups[i];
|
||||
var tr = $('<tr/>');
|
||||
if (b.full) tr.addClass("full-backup");
|
||||
tr.append( $('<td/>').text(b.date_str + " " + r.tz) );
|
||||
tr.append( $('<td/>').text(b.date_delta + " ago") );
|
||||
tr.append( $('<td/>').text(b.full ? "full" : "incremental") );
|
||||
tr.append( $('<td style="text-align: right"/>').text( nice_size(b.encsize)) );
|
||||
$('#backup-status tbody').append(tr);
|
||||
|
||||
total_disk_size += b.size;
|
||||
total_disk_size += b.encsize;
|
||||
}
|
||||
|
||||
$('#backup-total-size').text(nice_size(total_disk_size));
|
||||
})
|
||||
}
|
||||
</script>
|
@ -2,7 +2,7 @@
|
||||
|
||||
source setup/functions.sh
|
||||
|
||||
apt_install python3-flask links duplicity libyaml-dev python3-dnspython
|
||||
apt_install python3-flask links duplicity libyaml-dev python3-dnspython python3-dateutil
|
||||
hide_output pip3 install rtyaml
|
||||
|
||||
# Create a backup directory and a random key for encrypting backups.
|
||||
|
Loading…
Reference in New Issue
Block a user