1
0
mirror of https://github.com/mail-in-a-box/mailinabox.git synced 2024-11-22 02:17:26 +00:00

show the status of backups in the control panel

This commit is contained in:
Joshua Tauberer 2014-09-01 13:06:38 +00:00
parent 4ec6692f21
commit 3853e8dd93
5 changed files with 224 additions and 84 deletions

View File

@ -9,111 +9,168 @@
# backup/secret_key.txt) to STORAGE_ROOT/backup/encrypted. # backup/secret_key.txt) to STORAGE_ROOT/backup/encrypted.
# 5) STORAGE_ROOT/backup/after-backup is executd if it exists. # 5) STORAGE_ROOT/backup/after-backup is executd if it exists.
import sys, os, os.path, shutil, glob import os, os.path, shutil, glob, re, datetime
import dateutil.parser, dateutil.relativedelta, dateutil.tz
from utils import exclusive_process, load_environment, shell from utils import exclusive_process, load_environment, shell
# settings # settings
full_backup = "--full" in sys.argv
keep_backups_for = "31D" # destroy backups older than 31 days except the most recent full backup keep_backups_for = "31D" # destroy backups older than 31 days except the most recent full backup
env = load_environment() def backup_status(env):
# What is the current status of backups?
# Loop through all of the files in STORAGE_ROOT/backup/duplicity to
# get a list of all of the backups taken and sum up file sizes to
# see how large the storage is.
exclusive_process("backup") now = datetime.datetime.now(dateutil.tz.tzlocal())
def reldate(date):
rd = dateutil.relativedelta.relativedelta(now, date)
if rd.days >= 7: return "%d days" % rd.days
if rd.days > 1: return "%d days, %d hours" % (rd.days, rd.hours)
if rd.days == 1: return "%d day, %d hours" % (rd.days, rd.hours)
return "%d hours, %d minutes" % (rd.hours, rd.minutes)
# Ensure the backup directory exists. backups = { }
backup_dir = os.path.join(env["STORAGE_ROOT"], 'backup') basedir = os.path.join(env['STORAGE_ROOT'], 'backup/duplicity/')
backup_duplicity_dir = os.path.join(backup_dir, 'duplicity') encdir = os.path.join(env['STORAGE_ROOT'], 'backup/encrypted/')
os.makedirs(backup_duplicity_dir, exist_ok=True) for fn in os.listdir(basedir):
m = re.match(r"duplicity-(full|full-signatures|(inc|new-signatures)\.(?P<incbase>\d+T\d+Z)\.to)\.(?P<date>\d+T\d+Z)\.", fn)
if not m: raise ValueError(fn)
# On the first run, always do a full backup. Incremental key = m.group("date")
# will fail. if key not in backups:
if len(os.listdir(backup_duplicity_dir)) == 0: date = dateutil.parser.parse(m.group("date"))
full_backup = True backups[key] = {
else: "date": m.group("date"),
# When the size of incremental backups exceeds the size of existing "date_str": date.strftime("%x %X"),
# full backups, take a new full backup. We want to avoid full backups "date_delta": reldate(date),
# because they are costly to synchronize off-site. "full": m.group("incbase") is None,
full_sz = sum(os.path.getsize(f) for f in glob.glob(backup_duplicity_dir + '/*-full.*')) "previous": m.group("incbase") is None,
inc_sz = sum(os.path.getsize(f) for f in glob.glob(backup_duplicity_dir + '/*-inc.*')) "size": 0,
# (n.b. not counting size of new-signatures files because they are relatively small) "encsize": 0,
if inc_sz > full_sz * 1.5: }
backups[key]["size"] += os.path.getsize(os.path.join(basedir, fn))
# Also check encrypted size.
encfn = os.path.join(encdir, fn + ".enc")
if os.path.exists(encfn):
backups[key]["encsize"] += os.path.getsize(encfn)
backups = sorted(backups.values(), key = lambda b : b["date"], reverse=True)
return {
"directory": basedir,
"encpwfile": os.path.join(env['STORAGE_ROOT'], 'backup/secret_key.txt'),
"encdirectory": encdir,
"tz": now.tzname(),
"backups": backups,
}
def perform_backup(full_backup):
env = load_environment()
exclusive_process("backup")
# Ensure the backup directory exists.
backup_dir = os.path.join(env["STORAGE_ROOT"], 'backup')
backup_duplicity_dir = os.path.join(backup_dir, 'duplicity')
os.makedirs(backup_duplicity_dir, exist_ok=True)
# On the first run, always do a full backup. Incremental
# will fail.
if len(os.listdir(backup_duplicity_dir)) == 0:
full_backup = True full_backup = True
else:
# When the size of incremental backups exceeds the size of existing
# full backups, take a new full backup. We want to avoid full backups
# because they are costly to synchronize off-site.
full_sz = sum(os.path.getsize(f) for f in glob.glob(backup_duplicity_dir + '/*-full.*'))
inc_sz = sum(os.path.getsize(f) for f in glob.glob(backup_duplicity_dir + '/*-inc.*'))
# (n.b. not counting size of new-signatures files because they are relatively small)
if inc_sz > full_sz * 1.5:
full_backup = True
# Stop services. # Stop services.
shell('check_call', ["/usr/sbin/service", "dovecot", "stop"]) shell('check_call', ["/usr/sbin/service", "dovecot", "stop"])
shell('check_call', ["/usr/sbin/service", "postfix", "stop"]) shell('check_call', ["/usr/sbin/service", "postfix", "stop"])
# Update the backup mirror directory which mirrors the current # Update the backup mirror directory which mirrors the current
# STORAGE_ROOT (but excluding the backups themselves!). # STORAGE_ROOT (but excluding the backups themselves!).
try: try:
shell('check_call', [
"/usr/bin/duplicity",
"full" if full_backup else "incr",
"--no-encryption",
"--archive-dir", "/tmp/duplicity-archive-dir",
"--name", "mailinabox",
"--exclude", backup_dir,
"--volsize", "100",
"--verbosity", "warning",
env["STORAGE_ROOT"],
"file://" + backup_duplicity_dir
])
finally:
# Start services again.
shell('check_call', ["/usr/sbin/service", "dovecot", "start"])
shell('check_call', ["/usr/sbin/service", "postfix", "start"])
# Remove old backups. This deletes all backup data no longer needed
# from more than 31 days ago. Must do this before destroying the
# cache directory or else this command will re-create it.
shell('check_call', [ shell('check_call', [
"/usr/bin/duplicity", "/usr/bin/duplicity",
"full" if full_backup else "incr", "remove-older-than",
"--no-encryption", keep_backups_for,
"--archive-dir", "/tmp/duplicity-archive-dir", "--archive-dir", "/tmp/duplicity-archive-dir",
"--name", "mailinabox", "--name", "mailinabox",
"--exclude", backup_dir, "--force",
"--volsize", "100",
"--verbosity", "warning", "--verbosity", "warning",
env["STORAGE_ROOT"],
"file://" + backup_duplicity_dir "file://" + backup_duplicity_dir
]) ])
finally:
# Start services again.
shell('check_call', ["/usr/sbin/service", "dovecot", "start"])
shell('check_call', ["/usr/sbin/service", "postfix", "start"])
# Remove old backups. This deletes all backup data no longer needed # Remove duplicity's cache directory because it's redundant with our backup directory.
# from more than 31 days ago. Must do this before destroying the shutil.rmtree("/tmp/duplicity-archive-dir")
# cache directory or else this command will re-create it.
shell('check_call', [
"/usr/bin/duplicity",
"remove-older-than",
keep_backups_for,
"--archive-dir", "/tmp/duplicity-archive-dir",
"--name", "mailinabox",
"--force",
"--verbosity", "warning",
"file://" + backup_duplicity_dir
])
# Remove duplicity's cache directory because it's redundant with our backup directory. # Encrypt all of the new files.
shutil.rmtree("/tmp/duplicity-archive-dir") backup_encrypted_dir = os.path.join(backup_dir, 'encrypted')
os.makedirs(backup_encrypted_dir, exist_ok=True)
for fn in os.listdir(backup_duplicity_dir):
fn2 = os.path.join(backup_encrypted_dir, fn) + ".enc"
if os.path.exists(fn2): continue
# Encrypt all of the new files. # Encrypt the backup using the backup private key.
backup_encrypted_dir = os.path.join(backup_dir, 'encrypted') shell('check_call', [
os.makedirs(backup_encrypted_dir, exist_ok=True) "/usr/bin/openssl",
for fn in os.listdir(backup_duplicity_dir): "enc",
fn2 = os.path.join(backup_encrypted_dir, fn) + ".enc" "-aes-256-cbc",
if os.path.exists(fn2): continue "-a",
"-salt",
"-in", os.path.join(backup_duplicity_dir, fn),
"-out", fn2,
"-pass", "file:%s" % os.path.join(backup_dir, "secret_key.txt"),
])
# Encrypt the backup using the backup private key. # The backup can be decrypted with:
shell('check_call', [ # openssl enc -d -aes-256-cbc -a -in latest.tgz.enc -out /dev/stdout -pass file:secret_key.txt | tar -z
"/usr/bin/openssl",
"enc",
"-aes-256-cbc",
"-a",
"-salt",
"-in", os.path.join(backup_duplicity_dir, fn),
"-out", fn2,
"-pass", "file:%s" % os.path.join(backup_dir, "secret_key.txt"),
])
# The backup can be decrypted with: # Remove encrypted backups that are no longer needed.
# openssl enc -d -aes-256-cbc -a -in latest.tgz.enc -out /dev/stdout -pass file:secret_key.txt | tar -z for fn in os.listdir(backup_encrypted_dir):
fn2 = os.path.join(backup_duplicity_dir, fn.replace(".enc", ""))
if os.path.exists(fn2): continue
os.unlink(os.path.join(backup_encrypted_dir, fn))
# Remove encrypted backups that are no longer needed. # Execute a post-backup script that does the copying to a remote server.
for fn in os.listdir(backup_encrypted_dir): # Run as the STORAGE_USER user, not as root. Pass our settings in
fn2 = os.path.join(backup_duplicity_dir, fn.replace(".enc", "")) # environment variables so the script has access to STORAGE_ROOT.
if os.path.exists(fn2): continue post_script = os.path.join(backup_dir, 'after-backup')
os.unlink(os.path.join(backup_encrypted_dir, fn)) if os.path.exists(post_script):
shell('check_call',
['su', env['STORAGE_USER'], '-c', post_script],
env=env)
# Execute a post-backup script that does the copying to a remote server. if __name__ == "__main__":
# Run as the STORAGE_USER user, not as root. Pass our settings in import sys
# environment variables so the script has access to STORAGE_ROOT. full_backup = "--full" in sys.argv
post_script = os.path.join(backup_dir, 'after-backup') perform_backup(full_backup)
if os.path.exists(post_script):
shell('check_call',
['su', env['STORAGE_USER'], '-c', post_script],
env=env)

View File

@ -250,6 +250,12 @@ def do_updates():
"DEBIAN_FRONTEND": "noninteractive" "DEBIAN_FRONTEND": "noninteractive"
}) })
@app.route('/system/backup/status')
@authorized_personnel_only
def backup_status():
from backup import backup_status
return json_response(backup_status(env))
# APP # APP
if __name__ == '__main__': if __name__ == '__main__':

View File

@ -85,7 +85,10 @@
<a href="#" class="dropdown-toggle" data-toggle="dropdown">System <b class="caret"></b></a> <a href="#" class="dropdown-toggle" data-toggle="dropdown">System <b class="caret"></b></a>
<ul class="dropdown-menu"> <ul class="dropdown-menu">
<li><a href="#system_status" onclick="return show_panel(this);">Status Checks</a></li> <li><a href="#system_status" onclick="return show_panel(this);">Status Checks</a></li>
<li><a href="#system_dns" onclick="return show_panel(this);">DNS (Advanced)</a></li> <li><a href="#system_backup" onclick="return show_panel(this);">Backup Status</a></li>
<li class="divider"></li>
<li class="dropdown-header">Super Advanced Options</li>
<li><a href="#system_dns" onclick="return show_panel(this);">DNS (Custom/External)</a></li>
</ul> </ul>
</li> </li>
<li class="dropdown active"> <li class="dropdown active">
@ -115,6 +118,10 @@
{% include "system-status.html" %} {% include "system-status.html" %}
</div> </div>
<div id="panel_system_backup" class="container panel">
{% include "system-backup.html" %}
</div>
<div id="panel_system_dns" class="container panel"> <div id="panel_system_dns" class="container panel">
{% include "system-dns.html" %} {% include "system-dns.html" %}
</div> </div>

View File

@ -0,0 +1,70 @@
<style>
#backup-status tr.full-backup td { font-weight: bold; }
</style>
<h2>Backup Status</h2>
<p>The box makes an incremental backup each night. The backup is stored on the machine itself. You are responsible for copying the backup files off of the machine. Many cloud providers make this easy by allowing you to take snapshots of the machine's disk.</p>
<h3>Copying Backup Files</h3>
<p>Use SFTP (FTP over SSH) to copy files from <tt id="backup-location"></tt>. These files are encrpyted, so they are safe to store anywhere. Copy the encryption password from <tt id="backup-encpassword-file"></tt> also but keep it in a safe location.</p>
<h3>Current Backups</h3>
<p>The backup directory currently contains the backups listed below. The total size on disk of the backups is <span id="backup-total-size"></span>.</p>
<table id="backup-status" class="table" style="width: auto">
<thead>
<th colspan="2">When</th>
<th>Type</th>
<th>Size</th>
</thead>
<tbody>
</tbody>
</table>
<p><small>The size column in the table indicates the size of the encrpyted backup, but the total size on disk shown above includes storage for unencrpyted intermediate files.</small></p>
<script>
function nice_size(bytes) {
var powers = ['bytes', 'KB', 'MB', 'GB', 'TB'];
while (true) {
if (powers.length == 1) break;
if (bytes < 1000) break;
bytes /= 1024;
powers.shift();
}
return (Math.round(bytes*10)/10) + " " + powers[0];
}
function show_system_backup() {
$('#backup-status tbody').html("<tr><td colspan='2' class='text-muted'>Loading...</td></tr>")
api(
"/system/backup/status",
"GET",
{ },
function(r) {
$('#backup-location').text(r.encdirectory);
$('#backup-encpassword-file').text(r.encpwfile);
$('#backup-status tbody').html("");
var total_disk_size = 0;
for (var i = 0; i < r.backups.length; i++) {
var b = r.backups[i];
var tr = $('<tr/>');
if (b.full) tr.addClass("full-backup");
tr.append( $('<td/>').text(b.date_str + " " + r.tz) );
tr.append( $('<td/>').text(b.date_delta + " ago") );
tr.append( $('<td/>').text(b.full ? "full" : "incremental") );
tr.append( $('<td style="text-align: right"/>').text( nice_size(b.encsize)) );
$('#backup-status tbody').append(tr);
total_disk_size += b.size;
total_disk_size += b.encsize;
}
$('#backup-total-size').text(nice_size(total_disk_size));
})
}
</script>

View File

@ -2,7 +2,7 @@
source setup/functions.sh source setup/functions.sh
apt_install python3-flask links duplicity libyaml-dev python3-dnspython apt_install python3-flask links duplicity libyaml-dev python3-dnspython python3-dateutil
hide_output pip3 install rtyaml hide_output pip3 install rtyaml
# Create a backup directory and a random key for encrypting backups. # Create a backup directory and a random key for encrypting backups.