diff --git a/management/auth.py b/management/auth.py
index 1ae46d1e..66b58cfc 100644
--- a/management/auth.py
+++ b/management/auth.py
@@ -1,129 +1,134 @@
-import base64, os, os.path, hmac
+import base64
+import os
+import os.path
+import hmac
 
 from flask import make_response
 
 import utils
 from mailconfig import get_mail_password, get_mail_user_privileges
 
-DEFAULT_KEY_PATH   = '/var/lib/mailinabox/api.key'
+DEFAULT_KEY_PATH = '/var/lib/mailinabox/api.key'
 DEFAULT_AUTH_REALM = 'Mail-in-a-Box Management Server'
 
+
 class KeyAuthService:
-	"""Generate an API key for authenticating clients
+    """Generate an API key for authenticating clients
 
-	Clients must read the key from the key file and send the key with all HTTP
-	requests. The key is passed as the username field in the standard HTTP
-	Basic Auth header.
-	"""
-	def __init__(self):
-		self.auth_realm = DEFAULT_AUTH_REALM
-		self.key = self._generate_key()
-		self.key_path = DEFAULT_KEY_PATH
+    Clients must read the key from the key file and send the key with all HTTP
+    requests. The key is passed as the username field in the standard HTTP
+    Basic Auth header.
+    """
+    def __init__(self):
+        self.auth_realm = DEFAULT_AUTH_REALM
+        self.key = self._generate_key()
+        self.key_path = DEFAULT_KEY_PATH
 
-	def write_key(self):
-		"""Write key to file so authorized clients can get the key
+    def write_key(self):
+        """Write key to file so authorized clients can get the key
 
-		The key file is created with mode 0640 so that additional users can be
-		authorized to access the API by granting group/ACL read permissions on
-		the key file.
-		"""
-		def create_file_with_mode(path, mode):
-			# Based on answer by A-B-B: http://stackoverflow.com/a/15015748
-			old_umask = os.umask(0)
-			try:
-				return os.fdopen(os.open(path, os.O_WRONLY | os.O_CREAT, mode), 'w')
-			finally:
-				os.umask(old_umask)
+        The key file is created with mode 0640 so that additional users can be
+        authorized to access the API by granting group/ACL read permissions on
+        the key file.
+        """
+        def create_file_with_mode(path, mode):
+            # Based on answer by A-B-B: http://stackoverflow.com/a/15015748
+            old_umask = os.umask(0)
+            try:
+                return os.fdopen(os.open(path, os.O_WRONLY | os.O_CREAT, mode), 'w')
+            finally:
+                os.umask(old_umask)
 
-		os.makedirs(os.path.dirname(self.key_path), exist_ok=True)
+        os.makedirs(os.path.dirname(self.key_path), exist_ok=True)
 
-		with create_file_with_mode(self.key_path, 0o640) as key_file:
-			key_file.write(self.key + '\n')
+        with create_file_with_mode(self.key_path, 0o640) as key_file:
+            key_file.write(self.key + '\n')
 
-	def authenticate(self, request, env):
-		"""Test if the client key passed in HTTP Authorization header matches the service key
-		or if the or username/password passed in the header matches an administrator user.
-		Returns a tuple of the user's email address and list of user privileges (e.g.
-		('my@email', []) or ('my@email', ['admin']); raises a ValueError on login failure.
-		If the user used an API key, the user's email is returned as None."""
+    def authenticate(self, request, env):
+        """Test if the client key passed in HTTP Authorization header matches the service key
+        or if the or username/password passed in the header matches an administrator user.
+        Returns a tuple of the user's email address and list of user privileges (e.g.
+        ('my@email', []) or ('my@email', ['admin']); raises a ValueError on login failure.
+        If the user used an API key, the user's email is returned as None."""
 
-		def decode(s):
-			return base64.b64decode(s.encode('ascii')).decode('ascii')
+        def decode(s):
+            return base64.b64decode(s.encode('ascii')).decode('ascii')
 
-		def parse_basic_auth(header):
-			if " " not in header:
-				return None, None
-			scheme, credentials = header.split(maxsplit=1)
-			if scheme != 'Basic':
-				return None, None
+        def parse_basic_auth(header):
+            if " " not in header:
+                return None, None
+            scheme, credentials = header.split(maxsplit=1)
+            if scheme != 'Basic':
+                return None, None
 
-			credentials = decode(credentials)
-			if ":" not in credentials:
-				return None, None
-			username, password = credentials.split(':', maxsplit=1)
-			return username, password
+            credentials = decode(credentials)
+            if ":" not in credentials:
+                return None, None
+            username, password = credentials.split(':', maxsplit=1)
+            return username, password
 
-		header = request.headers.get('Authorization')
-		if not header:
-			raise ValueError("No authorization header provided.")
+        header = request.headers.get('Authorization')
+        if not header:
+            raise ValueError("No authorization header provided.")
 
-		username, password = parse_basic_auth(header)
+        username, password = parse_basic_auth(header)
 
-		if username in (None, ""):
-			raise ValueError("Authorization header invalid.")
-		elif username == self.key:
-			# The user passed the API key which grants administrative privs.
-			return (None, ["admin"])
-		else:
-			# The user is trying to log in with a username and user-specific
-			# API key or password. Raises or returns privs.
-			return (username, self.get_user_credentials(username, password, env))
+        if username in (None, ""):
+            raise ValueError("Authorization header invalid.")
+        elif username == self.key:
+            # The user passed the API key which grants administrative privs.
+            return (None, ["admin"])
+        else:
+            # The user is trying to log in with a username and user-specific
+            # API key or password. Raises or returns privs.
+            return (username, self.get_user_credentials(username, password, env))
 
-	def get_user_credentials(self, email, pw, env):
-		# Validate a user's credentials. On success returns a list of
-		# privileges (e.g. [] or ['admin']). On failure raises a ValueError
-		# with a login error message. 
+    def get_user_credentials(self, email, pw, env):
+        # Validate a user's credentials. On success returns a list of
+        # privileges (e.g. [] or ['admin']). On failure raises a ValueError
+        # with a login error message.
 
-		# Sanity check.
-		if email == "" or pw == "":
-			raise ValueError("Enter an email address and password.")
+        # Sanity check.
+        if email == "" or pw == "":
+            raise ValueError("Enter an email address and password.")
 
-		# The password might be a user-specific API key.
-		if hmac.compare_digest(self.create_user_key(email), pw):
-			# OK.
-			pass
-		else:
-			# Get the hashed password of the user. Raise a ValueError if the
-			# email address does not correspond to a user.
-			pw_hash = get_mail_password(email, env)
+        # The password might be a user-specific API key.
+        if hmac.compare_digest(self.create_user_key(email), pw):
+            # OK.
+            pass
+        else:
+            # Get the hashed password of the user. Raise a ValueError if the
+            # email address does not correspond to a user.
+            pw_hash = get_mail_password(email, env)
 
-			# Authenticate.
-			try:
-				# Use 'doveadm pw' to check credentials. doveadm will return
-				# a non-zero exit status if the credentials are no good,
-				# and check_call will raise an exception in that case.
-				utils.shell('check_call', [
-					"/usr/bin/doveadm", "pw",
-					"-p", pw,
-					"-t", pw_hash,
-					])
-			except:
-				# Login failed.
-				raise ValueError("Invalid password.")
+            # Authenticate.
+            try:
+                # Use 'doveadm pw' to check credentials. doveadm will return
+                # a non-zero exit status if the credentials are no good,
+                # and check_call will raise an exception in that case.
+                utils.shell('check_call', [
+                    "/usr/bin/doveadm", "pw",
+                    "-p", pw,
+                    "-t", pw_hash,
+                    ])
+            except:
+                # Login failed.
+                raise ValueError("Invalid password.")
 
-		# Get privileges for authorization.
+        # Get privileges for authorization.
 
-		# (This call should never fail on a valid user. But if it did fail, it would
-		# return a tuple of an error message and an HTTP status code.)
-		privs = get_mail_user_privileges(email, env)
-		if isinstance(privs, tuple): raise Exception("Error getting privileges.")
+        # (This call should never fail on a valid user. But if it did fail, it would
+        # return a tuple of an error message and an HTTP status code.)
+        privs = get_mail_user_privileges(email, env)
+        if isinstance(privs, tuple):
+            raise Exception("Error getting privileges.")
 
-		# Return a list of privileges.
-		return privs
+        # Return a list of privileges.
+        return privs
 
-	def create_user_key(self, email):
-		return hmac.new(self.key.encode('ascii'), b"AUTH:" + email.encode("utf8"), digestmod="sha1").hexdigest()
+    def create_user_key(self, email):
+        return hmac.new(self.key.encode('ascii'), b"AUTH:" + email.encode("utf8"), digestmod="sha1").hexdigest()
 
-	def _generate_key(self):
-		raw_key = os.urandom(32)
-		return base64.b64encode(raw_key).decode('ascii')
+    def _generate_key(self):
+        raw_key = os.urandom(32)
+        return base64.b64encode(raw_key).decode('ascii')
diff --git a/management/backup.py b/management/backup.py
index d161719d..d337d55c 100755
--- a/management/backup.py
+++ b/management/backup.py
@@ -9,8 +9,15 @@
 #    backup/secret_key.txt) to STORAGE_ROOT/backup/encrypted.
 # 5) STORAGE_ROOT/backup/after-backup is executd if it exists.
 
-import os, os.path, shutil, glob, re, datetime
-import dateutil.parser, dateutil.relativedelta, dateutil.tz
+import os
+import os.path
+import shutil
+import glob
+import re
+import datetime
+import dateutil.parser
+import dateutil.relativedelta
+import dateutil.tz
 
 from utils import exclusive_process, load_environment, shell
 
@@ -18,195 +25,209 @@ from utils import exclusive_process, load_environment, shell
 # that depends on it is this many days old.
 keep_backups_for_days = 3
 
+
 def backup_status(env):
-	# What is the current status of backups?
-	# Loop through all of the files in STORAGE_ROOT/backup/duplicity to
-	# get a list of all of the backups taken and sum up file sizes to
-	# see how large the storage is.
+    # What is the current status of backups?
+    # Loop through all of the files in STORAGE_ROOT/backup/duplicity to
+    # get a list of all of the backups taken and sum up file sizes to
+    # see how large the storage is.
 
-	now = datetime.datetime.now(dateutil.tz.tzlocal())
-	def reldate(date, ref, clip):
-		if ref < date: return clip
-		rd = dateutil.relativedelta.relativedelta(ref, date)
-		if rd.months > 1: return "%d months, %d days" % (rd.months, rd.days)
-		if rd.months == 1: return "%d month, %d days" % (rd.months, rd.days)
-		if rd.days >= 7: return "%d days" % rd.days
-		if rd.days > 1: return "%d days, %d hours" % (rd.days, rd.hours)
-		if rd.days == 1: return "%d day, %d hours" % (rd.days, rd.hours)
-		return "%d hours, %d minutes" % (rd.hours, rd.minutes)
+    now = datetime.datetime.now(dateutil.tz.tzlocal())
 
-	backups = { }
-	basedir = os.path.join(env['STORAGE_ROOT'], 'backup/duplicity/')
-	encdir = os.path.join(env['STORAGE_ROOT'], 'backup/encrypted/')
-	os.makedirs(basedir, exist_ok=True) # os.listdir fails if directory does not exist
-	for fn in os.listdir(basedir):
-		m = re.match(r"duplicity-(full|full-signatures|(inc|new-signatures)\.(?P<incbase>\d+T\d+Z)\.to)\.(?P<date>\d+T\d+Z)\.", fn)
-		if not m: raise ValueError(fn)
+    def reldate(date, ref, clip):
+        if ref < date:
+            return clip
+        rd = dateutil.relativedelta.relativedelta(ref, date)
+        if rd.months > 1:
+            return "%d months, %d days" % (rd.months, rd.days)
+        if rd.months == 1:
+            return "%d month, %d days" % (rd.months, rd.days)
+        if rd.days >= 7:
+            return "%d days" % rd.days
+        if rd.days > 1:
+            return "%d days, %d hours" % (rd.days, rd.hours)
+        if rd.days == 1:
+            return "%d day, %d hours" % (rd.days, rd.hours)
+        return "%d hours, %d minutes" % (rd.hours, rd.minutes)
 
-		key = m.group("date")
-		if key not in backups:
-			date = dateutil.parser.parse(m.group("date"))
-			backups[key] = {
-				"date": m.group("date"),
-				"date_str": date.strftime("%x %X"),
-				"date_delta": reldate(date, now, "the future?"),
-				"full": m.group("incbase") is None,
-				"previous": m.group("incbase"),
-				"size": 0,
-				"encsize": 0,
-			}
+    backups = {}
+    basedir = os.path.join(env['STORAGE_ROOT'], 'backup/duplicity/')
+    encdir = os.path.join(env['STORAGE_ROOT'], 'backup/encrypted/')
+    # os.listdir fails if directory does not exist
+    os.makedirs(basedir, exist_ok=True)
+    for fn in os.listdir(basedir):
+        m = re.match(r"duplicity-(full|full-signatures|(inc|new-signatures)\.(?P<incbase>\d+T\d+Z)\.to)\.(?P<date>\d+T\d+Z)\.", fn)
+        if not m:
+            raise ValueError(fn)
 
-		backups[key]["size"] += os.path.getsize(os.path.join(basedir, fn))
+        key = m.group("date")
+        if key not in backups:
+            date = dateutil.parser.parse(m.group("date"))
+            backups[key] = {
+                "date": m.group("date"),
+                "date_str": date.strftime("%x %X"),
+                "date_delta": reldate(date, now, "the future?"),
+                "full": m.group("incbase") is None,
+                "previous": m.group("incbase"),
+                "size": 0,
+                "encsize": 0,
+            }
 
-		# Also check encrypted size.
-		encfn = os.path.join(encdir, fn + ".enc")
-		if os.path.exists(encfn):
-			backups[key]["encsize"] += os.path.getsize(encfn)
+        backups[key]["size"] += os.path.getsize(os.path.join(basedir, fn))
 
-	# Ensure the rows are sorted reverse chronologically.
-	# This is relied on by should_force_full() and the next step.
-	backups = sorted(backups.values(), key = lambda b : b["date"], reverse=True)
+        # Also check encrypted size.
+        encfn = os.path.join(encdir, fn + ".enc")
+        if os.path.exists(encfn):
+            backups[key]["encsize"] += os.path.getsize(encfn)
 
-	# When will a backup be deleted?
-	saw_full = False
-	deleted_in = None
-	days_ago = now - datetime.timedelta(days=keep_backups_for_days)
-	for bak in backups:
-		if deleted_in:
-			# Subsequent backups are deleted when the most recent increment
-			# in the chain would be deleted.
-			bak["deleted_in"] = deleted_in
-		if bak["full"]:
-			# Reset when we get to a full backup. A new chain start next.
-			saw_full = True
-			deleted_in = None
-		elif saw_full and not deleted_in:
-			# Mark deleted_in only on the first increment after a full backup.
-			deleted_in = reldate(days_ago, dateutil.parser.parse(bak["date"]), "on next daily backup")
-			bak["deleted_in"] = deleted_in
+    # Ensure the rows are sorted reverse chronologically.
+    # This is relied on by should_force_full() and the next step.
+    backups = sorted(backups.values(), key=lambda b: b["date"], reverse=True)
+
+    # When will a backup be deleted?
+    saw_full = False
+    deleted_in = None
+    days_ago = now - datetime.timedelta(days=keep_backups_for_days)
+    for bak in backups:
+        if deleted_in:
+            # Subsequent backups are deleted when the most recent increment
+            # in the chain would be deleted.
+            bak["deleted_in"] = deleted_in
+        if bak["full"]:
+            # Reset when we get to a full backup. A new chain start next.
+            saw_full = True
+            deleted_in = None
+        elif saw_full and not deleted_in:
+            # Mark deleted_in only on the first increment after a full backup.
+            deleted_in = reldate(days_ago, dateutil.parser.parse(bak["date"]), "on next daily backup")
+            bak["deleted_in"] = deleted_in
+
+    return {
+        "directory": basedir,
+        "encpwfile": os.path.join(env['STORAGE_ROOT'], 'backup/secret_key.txt'),
+        "encdirectory": encdir,
+        "tz": now.tzname(),
+        "backups": backups,
+    }
 
-	return {
-		"directory": basedir,
-		"encpwfile": os.path.join(env['STORAGE_ROOT'], 'backup/secret_key.txt'),
-		"encdirectory": encdir,
-		"tz": now.tzname(),
-		"backups": backups,
-	}
 
 def should_force_full(env):
-	# Force a full backup when the total size of the increments
-	# since the last full backup is greater than half the size
-	# of that full backup.
-	inc_size = 0
-	for bak in backup_status(env)["backups"]:
-		if not bak["full"]:
-			# Scan through the incremental backups cumulating
-			# size...
-			inc_size += bak["size"]
-		else:
-			# ...until we reach the most recent full backup.
-			# Return if we should to a full backup.
-			return inc_size > .5*bak["size"]
-	else:
-		# If we got here there are no (full) backups, so make one.
-		# (I love for/else blocks. Here it's just to show off.)
-		return True
+    # Force a full backup when the total size of the increments
+    # since the last full backup is greater than half the size
+    # of that full backup.
+    inc_size = 0
+    for bak in backup_status(env)["backups"]:
+        if not bak["full"]:
+            # Scan through the incremental backups cumulating
+            # size...
+            inc_size += bak["size"]
+        else:
+            # ...until we reach the most recent full backup.
+            # Return if we should to a full backup.
+            return inc_size > .5*bak["size"]
+    else:
+        # If we got here there are no (full) backups, so make one.
+        # (I love for/else blocks. Here it's just to show off.)
+        return True
+
 
 def perform_backup(full_backup):
-	env = load_environment()
+    env = load_environment()
 
-	exclusive_process("backup")
+    exclusive_process("backup")
 
-	# Ensure the backup directory exists.
-	backup_dir = os.path.join(env["STORAGE_ROOT"], 'backup')
-	backup_duplicity_dir = os.path.join(backup_dir, 'duplicity')
-	os.makedirs(backup_duplicity_dir, exist_ok=True)
+    # Ensure the backup directory exists.
+    backup_dir = os.path.join(env["STORAGE_ROOT"], 'backup')
+    backup_duplicity_dir = os.path.join(backup_dir, 'duplicity')
+    os.makedirs(backup_duplicity_dir, exist_ok=True)
 
-	# On the first run, always do a full backup. Incremental
-	# will fail. Otherwise do a full backup when the size of
-	# the increments since the most recent full backup are
-	# large.
-	full_backup = full_backup or should_force_full(env)
+    # On the first run, always do a full backup. Incremental
+    # will fail. Otherwise do a full backup when the size of
+    # the increments since the most recent full backup are
+    # large.
+    full_backup = full_backup or should_force_full(env)
 
-	# Stop services.
-	shell('check_call', ["/usr/sbin/service", "dovecot", "stop"])
-	shell('check_call', ["/usr/sbin/service", "postfix", "stop"])
+    # Stop services.
+    shell('check_call', ["/usr/sbin/service", "dovecot", "stop"])
+    shell('check_call', ["/usr/sbin/service", "postfix", "stop"])
 
-	# Update the backup mirror directory which mirrors the current
-	# STORAGE_ROOT (but excluding the backups themselves!).
-	try:
-		shell('check_call', [
-			"/usr/bin/duplicity",
-			"full" if full_backup else "incr",
-			"--no-encryption",
-			"--archive-dir", "/tmp/duplicity-archive-dir",
-			"--name", "mailinabox",
-			"--exclude", backup_dir,
-			"--volsize", "100",
-			"--verbosity", "warning",
-			env["STORAGE_ROOT"],
-			"file://" + backup_duplicity_dir
-			])
-	finally:
-		# Start services again.
-		shell('check_call', ["/usr/sbin/service", "dovecot", "start"])
-		shell('check_call', ["/usr/sbin/service", "postfix", "start"])
+    # Update the backup mirror directory which mirrors the current
+    # STORAGE_ROOT (but excluding the backups themselves!).
+    try:
+        shell('check_call', [
+            "/usr/bin/duplicity",
+            "full" if full_backup else "incr",
+            "--no-encryption",
+            "--archive-dir", "/tmp/duplicity-archive-dir",
+            "--name", "mailinabox",
+            "--exclude", backup_dir,
+            "--volsize", "100",
+            "--verbosity", "warning",
+            env["STORAGE_ROOT"],
+            "file://" + backup_duplicity_dir
+            ])
+    finally:
+        # Start services again.
+        shell('check_call', ["/usr/sbin/service", "dovecot", "start"])
+        shell('check_call', ["/usr/sbin/service", "postfix", "start"])
 
-	# Remove old backups. This deletes all backup data no longer needed
-	# from more than 31 days ago. Must do this before destroying the
-	# cache directory or else this command will re-create it.
-	shell('check_call', [
-		"/usr/bin/duplicity",
-		"remove-older-than",
-		"%dD" % keep_backups_for_days,
-		"--archive-dir", "/tmp/duplicity-archive-dir",
-		"--name", "mailinabox",
-		"--force",
-		"--verbosity", "warning",
-		"file://" + backup_duplicity_dir
-		])
+    # Remove old backups. This deletes all backup data no longer needed
+    # from more than 31 days ago. Must do this before destroying the
+    # cache directory or else this command will re-create it.
+    shell('check_call', [
+        "/usr/bin/duplicity",
+        "remove-older-than",
+        "%dD" % keep_backups_for_days,
+        "--archive-dir", "/tmp/duplicity-archive-dir",
+        "--name", "mailinabox",
+        "--force",
+        "--verbosity", "warning",
+        "file://" + backup_duplicity_dir
+        ])
 
-	# Remove duplicity's cache directory because it's redundant with our backup directory.
-	shutil.rmtree("/tmp/duplicity-archive-dir")
+    # Remove duplicity's cache directory because it's redundant with our backup directory.
+    shutil.rmtree("/tmp/duplicity-archive-dir")
 
-	# Encrypt all of the new files.
-	backup_encrypted_dir = os.path.join(backup_dir, 'encrypted')
-	os.makedirs(backup_encrypted_dir, exist_ok=True)
-	for fn in os.listdir(backup_duplicity_dir):
-		fn2 = os.path.join(backup_encrypted_dir, fn) + ".enc"
-		if os.path.exists(fn2): continue
+    # Encrypt all of the new files.
+    backup_encrypted_dir = os.path.join(backup_dir, 'encrypted')
+    os.makedirs(backup_encrypted_dir, exist_ok=True)
+    for fn in os.listdir(backup_duplicity_dir):
+        fn2 = os.path.join(backup_encrypted_dir, fn) + ".enc"
+        if os.path.exists(fn2):
+            continue
 
-		# Encrypt the backup using the backup private key.
-		shell('check_call', [
-			"/usr/bin/openssl",
-			"enc",
-			"-aes-256-cbc",
-			"-a",
-			"-salt",
-			"-in", os.path.join(backup_duplicity_dir, fn),
-			"-out", fn2,
-			"-pass", "file:%s" % os.path.join(backup_dir, "secret_key.txt"),
-			])
+        # Encrypt the backup using the backup private key.
+        shell('check_call', [
+            "/usr/bin/openssl",
+            "enc",
+            "-aes-256-cbc",
+            "-a",
+            "-salt",
+            "-in", os.path.join(backup_duplicity_dir, fn),
+            "-out", fn2,
+            "-pass", "file:%s" % os.path.join(backup_dir, "secret_key.txt"),
+            ])
 
-		# The backup can be decrypted with:
-		# openssl enc -d -aes-256-cbc -a -in latest.tgz.enc -out /dev/stdout -pass file:secret_key.txt | tar -z
+        # The backup can be decrypted with:
+        # openssl enc -d -aes-256-cbc -a -in latest.tgz.enc -out /dev/stdout -pass file:secret_key.txt | tar -z
 
-	# Remove encrypted backups that are no longer needed.
-	for fn in os.listdir(backup_encrypted_dir):
-		fn2 = os.path.join(backup_duplicity_dir, fn.replace(".enc", ""))
-		if os.path.exists(fn2): continue
-		os.unlink(os.path.join(backup_encrypted_dir, fn))
+    # Remove encrypted backups that are no longer needed.
+    for fn in os.listdir(backup_encrypted_dir):
+        fn2 = os.path.join(backup_duplicity_dir, fn.replace(".enc", ""))
+        if os.path.exists(fn2):
+            continue
+        os.unlink(os.path.join(backup_encrypted_dir, fn))
 
-	# Execute a post-backup script that does the copying to a remote server.
-	# Run as the STORAGE_USER user, not as root. Pass our settings in
-	# environment variables so the script has access to STORAGE_ROOT.
-	post_script = os.path.join(backup_dir, 'after-backup')
-	if os.path.exists(post_script):
-		shell('check_call',
-			['su', env['STORAGE_USER'], '-c', post_script],
-			env=env)
+    # Execute a post-backup script that does the copying to a remote server.
+    # Run as the STORAGE_USER user, not as root. Pass our settings in
+    # environment variables so the script has access to STORAGE_ROOT.
+    post_script = os.path.join(backup_dir, 'after-backup')
+    if os.path.exists(post_script):
+        shell('check_call',
+              ['su', env['STORAGE_USER'], '-c', post_script],
+              env=env)
 
 if __name__ == "__main__":
-	import sys
-	full_backup = "--full" in sys.argv
-	perform_backup(full_backup)
+    import sys
+    full_backup = "--full" in sys.argv
+    perform_backup(full_backup)
diff --git a/management/daemon.py b/management/daemon.py
index 2480eada..78f8ffd6 100755
--- a/management/daemon.py
+++ b/management/daemon.py
@@ -1,12 +1,16 @@
 #!/usr/bin/python3
 
-import os, os.path, re, json
+import os
+import os.path
+import re
+import json
 
 from functools import wraps
 
 from flask import Flask, request, render_template, abort, Response
 
-import auth, utils
+import auth
+import utils
 from mailconfig import get_mail_users, get_mail_users_ex, get_admins, add_mail_user, set_mail_password, remove_mail_user
 from mailconfig import get_mail_user_privileges, add_remove_mail_user_privilege
 from mailconfig import get_mail_aliases, get_mail_aliases_ex, get_mail_domains, add_mail_alias, remove_mail_alias
@@ -24,175 +28,192 @@ auth_service = auth.KeyAuthService()
 # We may deploy via a symbolic link, which confuses flask's template finding.
 me = __file__
 try:
-	me = os.readlink(__file__)
+    me = os.readlink(__file__)
 except OSError:
-	pass
+    pass
 
 app = Flask(__name__, template_folder=os.path.abspath(os.path.join(os.path.dirname(me), "templates")))
 
+
 # Decorator to protect views that require a user with 'admin' privileges.
 def authorized_personnel_only(viewfunc):
-	@wraps(viewfunc)
-	def newview(*args, **kwargs):
-		# Authenticate the passed credentials, which is either the API key or a username:password pair.
-		error = None
-		try:
-			email, privs = auth_service.authenticate(request, env)
-		except ValueError as e:
-			# Authentication failed.
-			privs = []
-			error = str(e)
+    @wraps(viewfunc)
+    def newview(*args, **kwargs):
+        # Authenticate the passed credentials, which is either the API key or a username:password pair.
+        error = None
+        try:
+            email, privs = auth_service.authenticate(request, env)
+        except ValueError as e:
+            # Authentication failed.
+            privs = []
+            error = str(e)
 
-		# Authorized to access an API view?
-		if "admin" in privs:
-			# Call view func.	
-			return viewfunc(*args, **kwargs)
-		elif not error:
-			error = "You are not an administrator."
+        # Authorized to access an API view?
+        if "admin" in privs:
+            # Call view func.
+            return viewfunc(*args, **kwargs)
+        elif not error:
+            error = "You are not an administrator."
 
-		# Not authorized. Return a 401 (send auth) and a prompt to authorize by default.
-		status = 401
-		headers = {
-			'WWW-Authenticate': 'Basic realm="{0}"'.format(auth_service.auth_realm),
-			'X-Reason': error,
-		}
+        # Not authorized. Return a 401 (send auth) and a prompt to authorize by default.
+        status = 401
+        headers = {
+            'WWW-Authenticate': 'Basic realm="{0}"'.format(auth_service.auth_realm),
+            'X-Reason': error,
+        }
 
-		if request.headers.get('X-Requested-With') == 'XMLHttpRequest':
-			# Don't issue a 401 to an AJAX request because the user will
-			# be prompted for credentials, which is not helpful.
-			status = 403
-			headers = None
+        if request.headers.get('X-Requested-With') == 'XMLHttpRequest':
+            # Don't issue a 401 to an AJAX request because the user will
+            # be prompted for credentials, which is not helpful.
+            status = 403
+            headers = None
 
-		if request.headers.get('Accept') in (None, "", "*/*"):
-			# Return plain text output.
-			return Response(error+"\n", status=status, mimetype='text/plain', headers=headers)
-		else:
-			# Return JSON output.
-			return Response(json.dumps({
-				"status": "error",
-				"reason": error,
-				})+"\n", status=status, mimetype='application/json', headers=headers)
+        if request.headers.get('Accept') in (None, "", "*/*"):
+            # Return plain text output.
+            return Response(error+"\n", status=status, mimetype='text/plain', headers=headers)
+        else:
+            # Return JSON output.
+            return Response(json.dumps({
+                "status": "error",
+                "reason": error,
+                })+"\n", status=status, mimetype='application/json', headers=headers)
+
+    return newview
 
-	return newview
 
 @app.errorhandler(401)
 def unauthorized(error):
-	return auth_service.make_unauthorized_response()
+    return auth_service.make_unauthorized_response()
+
 
 def json_response(data):
-	return Response(json.dumps(data), status=200, mimetype='application/json')
+    return Response(json.dumps(data), status=200, mimetype='application/json')
 
 ###################################
 
 # Control Panel (unauthenticated views)
 
+
 @app.route('/')
 def index():
-	# Render the control panel. This route does not require user authentication
-	# so it must be safe!
-	no_admins_exist = (len(get_admins(env)) == 0)
-	return render_template('index.html',
-		hostname=env['PRIMARY_HOSTNAME'],
-		storage_root=env['STORAGE_ROOT'],
-		no_admins_exist=no_admins_exist,
-	)
+    # Render the control panel. This route does not require user authentication
+    # so it must be safe!
+    no_admins_exist = (len(get_admins(env)) == 0)
+    return render_template(
+        'index.html',
+        hostname=env['PRIMARY_HOSTNAME'],
+        storage_root=env['STORAGE_ROOT'],
+        no_admins_exist=no_admins_exist,
+    )
+
 
 @app.route('/me')
 def me():
-	# Is the caller authorized?
-	try:
-		email, privs = auth_service.authenticate(request, env)
-	except ValueError as e:
-		return json_response({
-			"status": "invalid",
-			"reason": str(e),
-			})
+    # Is the caller authorized?
+    try:
+        email, privs = auth_service.authenticate(request, env)
+    except ValueError as e:
+        return json_response({
+            "status": "invalid",
+            "reason": str(e),
+            })
 
-	resp = {
-		"status": "ok",
-		"email": email,
-		"privileges": privs,
-	}
+    resp = {
+        "status": "ok",
+        "email": email,
+        "privileges": privs,
+    }
 
-	# Is authorized as admin? Return an API key for future use.
-	if "admin" in privs:
-		resp["api_key"] = auth_service.create_user_key(email)
+    # Is authorized as admin? Return an API key for future use.
+    if "admin" in privs:
+        resp["api_key"] = auth_service.create_user_key(email)
 
-	# Return.
-	return json_response(resp)
+    # Return.
+    return json_response(resp)
 
 # MAIL
 
+
 @app.route('/mail/users')
 @authorized_personnel_only
 def mail_users():
-	if request.args.get("format", "") == "json":
-		return json_response(get_mail_users_ex(env, with_archived=True, with_slow_info=True))
-	else:
-		return "".join(x+"\n" for x in get_mail_users(env))
+    if request.args.get("format", "") == "json":
+        return json_response(get_mail_users_ex(env, with_archived=True, with_slow_info=True))
+    else:
+        return "".join(x+"\n" for x in get_mail_users(env))
+
 
 @app.route('/mail/users/add', methods=['POST'])
 @authorized_personnel_only
 def mail_users_add():
-	try:
-		return add_mail_user(request.form.get('email', ''), request.form.get('password', ''), request.form.get('privileges', ''), env)
-	except ValueError as e:
-		return (str(e), 400)
+    try:
+        return add_mail_user(request.form.get('email', ''), request.form.get('password', ''), request.form.get('privileges', ''), env)
+    except ValueError as e:
+        return (str(e), 400)
+
 
 @app.route('/mail/users/password', methods=['POST'])
 @authorized_personnel_only
 def mail_users_password():
-	try:
-		return set_mail_password(request.form.get('email', ''), request.form.get('password', ''), env)
-	except ValueError as e:
-		return (str(e), 400)
+    try:
+        return set_mail_password(request.form.get('email', ''), request.form.get('password', ''), env)
+    except ValueError as e:
+        return (str(e), 400)
+
 
 @app.route('/mail/users/remove', methods=['POST'])
 @authorized_personnel_only
 def mail_users_remove():
-	return remove_mail_user(request.form.get('email', ''), env)
+    return remove_mail_user(request.form.get('email', ''), env)
 
 
 @app.route('/mail/users/privileges')
 @authorized_personnel_only
 def mail_user_privs():
-	privs = get_mail_user_privileges(request.args.get('email', ''), env)
-	if isinstance(privs, tuple): return privs # error
-	return "\n".join(privs)
+    privs = get_mail_user_privileges(request.args.get('email', ''), env)
+    # error
+    if isinstance(privs, tuple):
+        return privs
+    return "\n".join(privs)
+
 
 @app.route('/mail/users/privileges/add', methods=['POST'])
 @authorized_personnel_only
 def mail_user_privs_add():
-	return add_remove_mail_user_privilege(request.form.get('email', ''), request.form.get('privilege', ''), "add", env)
+    return add_remove_mail_user_privilege(request.form.get('email', ''), request.form.get('privilege', ''), "add", env)
+
 
 @app.route('/mail/users/privileges/remove', methods=['POST'])
 @authorized_personnel_only
 def mail_user_privs_remove():
-	return add_remove_mail_user_privilege(request.form.get('email', ''), request.form.get('privilege', ''), "remove", env)
+    return add_remove_mail_user_privilege(request.form.get('email', ''), request.form.get('privilege', ''), "remove", env)
 
 
 @app.route('/mail/aliases')
 @authorized_personnel_only
 def mail_aliases():
-	if request.args.get("format", "") == "json":
-		return json_response(get_mail_aliases_ex(env))
-	else:
-		return "".join(x+"\t"+y+"\n" for x, y in get_mail_aliases(env))
+    if request.args.get("format", "") == "json":
+        return json_response(get_mail_aliases_ex(env))
+    else:
+        return "".join(x+"\t"+y+"\n" for x, y in get_mail_aliases(env))
+
 
 @app.route('/mail/aliases/add', methods=['POST'])
 @authorized_personnel_only
 def mail_aliases_add():
-	return add_mail_alias(
-		request.form.get('source', ''),
-		request.form.get('destination', ''),
-		env,
-		update_if_exists=(request.form.get('update_if_exists', '') == '1')
-		)
+    return add_mail_alias(
+        request.form.get('source', ''),
+        request.form.get('destination', ''),
+        env,
+        update_if_exists=(request.form.get('update_if_exists', '') == '1')
+        )
+
 
 @app.route('/mail/aliases/remove', methods=['POST'])
 @authorized_personnel_only
 def mail_aliases_remove():
-	return remove_mail_alias(request.form.get('source', ''), env)
+    return remove_mail_alias(request.form.get('source', ''), env)
+
 
 @app.route('/mail/domains')
 @authorized_personnel_only
@@ -201,172 +222,196 @@ def mail_domains():
 
 # DNS
 
+
 @app.route('/dns/zones')
 @authorized_personnel_only
 def dns_zones():
-	from dns_update import get_dns_zones
-	return json_response([z[0] for z in get_dns_zones(env)])
+    from dns_update import get_dns_zones
+    return json_response([z[0] for z in get_dns_zones(env)])
+
 
 @app.route('/dns/update', methods=['POST'])
 @authorized_personnel_only
 def dns_update():
-	from dns_update import do_dns_update
-	try:
-		return do_dns_update(env, force=request.form.get('force', '') == '1')
-	except Exception as e:
-		return (str(e), 500)
+    from dns_update import do_dns_update
+    try:
+        return do_dns_update(env, force=request.form.get('force', '') == '1')
+    except Exception as e:
+        return (str(e), 500)
+
 
 @app.route('/dns/secondary-nameserver')
 @authorized_personnel_only
 def dns_get_secondary_nameserver():
-	from dns_update import get_custom_dns_config
-	return json_response({ "hostname": get_custom_dns_config(env).get("_secondary_nameserver") })
+    from dns_update import get_custom_dns_config
+    return json_response({"hostname": get_custom_dns_config(env).get("_secondary_nameserver")})
+
 
 @app.route('/dns/secondary-nameserver', methods=['POST'])
 @authorized_personnel_only
 def dns_set_secondary_nameserver():
-	from dns_update import set_secondary_dns
-	try:
-		return set_secondary_dns(request.form.get('hostname'), env)
-	except ValueError as e:
-		return (str(e), 400)
+    from dns_update import set_secondary_dns
+    try:
+        return set_secondary_dns(request.form.get('hostname'), env)
+    except ValueError as e:
+        return (str(e), 400)
+
 
 @app.route('/dns/set')
 @authorized_personnel_only
 def dns_get_records():
-	from dns_update import get_custom_dns_config, get_custom_records
-	additional_records = get_custom_dns_config(env)
-	records = get_custom_records(None, additional_records, env)
-	return json_response([{
-		"qname": r[0],
-		"rtype": r[1],
-		"value": r[2],
-		} for r in records])
+    from dns_update import get_custom_dns_config, get_custom_records
+    additional_records = get_custom_dns_config(env)
+    records = get_custom_records(None, additional_records, env)
+    return json_response([{
+        "qname": r[0],
+        "rtype": r[1],
+        "value": r[2],
+        } for r in records])
+
 
 @app.route('/dns/set/<qname>', methods=['POST'])
 @app.route('/dns/set/<qname>/<rtype>', methods=['POST'])
 @app.route('/dns/set/<qname>/<rtype>/<value>', methods=['POST'])
 @authorized_personnel_only
 def dns_set_record(qname, rtype="A", value=None):
-	from dns_update import do_dns_update, set_custom_dns_record
-	try:
-		# Get the value from the URL, then the POST parameters, or if it is not set then
-		# use the remote IP address of the request --- makes dynamic DNS easy. To clear a
-		# value, '' must be explicitly passed.
-		if value is None:
-			value = request.form.get("value")
-		if value is None:
-			value = request.environ.get("HTTP_X_FORWARDED_FOR") # normally REMOTE_ADDR but we're behind nginx as a reverse proxy
-		if value == '' or value == '__delete__':
-			# request deletion
-			value = None
-		if set_custom_dns_record(qname, rtype, value, env):
-			return do_dns_update(env)
-		return "OK"
-	except ValueError as e:
-		return (str(e), 400)
+    from dns_update import do_dns_update, set_custom_dns_record
+    try:
+        # Get the value from the URL, then the POST parameters, or if it is not set then
+        # use the remote IP address of the request --- makes dynamic DNS easy. To clear a
+        # value, '' must be explicitly passed.
+        if value is None:
+            value = request.form.get("value")
+        if value is None:
+            # normally REMOTE_ADDR but we're behind nginx as a reverse proxy
+            value = request.environ.get("HTTP_X_FORWARDED_FOR")
+        if value == '' or value == '__delete__':
+            # request deletion
+            value = None
+        if set_custom_dns_record(qname, rtype, value, env):
+            return do_dns_update(env)
+        return "OK"
+    except ValueError as e:
+        return (str(e), 400)
+
 
 @app.route('/dns/dump')
 @authorized_personnel_only
 def dns_get_dump():
-	from dns_update import build_recommended_dns
-	return json_response(build_recommended_dns(env))
+    from dns_update import build_recommended_dns
+    return json_response(build_recommended_dns(env))
 
 # SSL
 
+
 @app.route('/ssl/csr/<domain>', methods=['POST'])
 @authorized_personnel_only
 def ssl_get_csr(domain):
-	from web_update import get_domain_ssl_files, create_csr
-	ssl_key, ssl_certificate, ssl_via = get_domain_ssl_files(domain, env)
-	return create_csr(domain, ssl_key, env)
+    from web_update import get_domain_ssl_files, create_csr
+    ssl_key, ssl_certificate, ssl_via = get_domain_ssl_files(domain, env)
+    return create_csr(domain, ssl_key, env)
+
 
 @app.route('/ssl/install', methods=['POST'])
 @authorized_personnel_only
 def ssl_install_cert():
-	from web_update import install_cert
-	domain = request.form.get('domain')
-	ssl_cert = request.form.get('cert')
-	ssl_chain = request.form.get('chain')
-	return install_cert(domain, ssl_cert, ssl_chain, env)
+    from web_update import install_cert
+    domain = request.form.get('domain')
+    ssl_cert = request.form.get('cert')
+    ssl_chain = request.form.get('chain')
+    return install_cert(domain, ssl_cert, ssl_chain, env)
 
 # WEB
 
+
 @app.route('/web/domains')
 @authorized_personnel_only
 def web_get_domains():
-	from web_update import get_web_domains_info
-	return json_response(get_web_domains_info(env))
+    from web_update import get_web_domains_info
+    return json_response(get_web_domains_info(env))
+
 
 @app.route('/web/update', methods=['POST'])
 @authorized_personnel_only
 def web_update():
-	from web_update import do_web_update
-	return do_web_update(env)
+    from web_update import do_web_update
+    return do_web_update(env)
 
 # System
 
+
 @app.route('/system/status', methods=["POST"])
 @authorized_personnel_only
 def system_status():
-	from status_checks import run_checks
-	class WebOutput:
-		def __init__(self):
-			self.items = []
-		def add_heading(self, heading):
-			self.items.append({ "type": "heading", "text": heading, "extra": [] })
-		def print_ok(self, message):
-			self.items.append({ "type": "ok", "text": message, "extra": [] })
-		def print_error(self, message):
-			self.items.append({ "type": "error", "text": message, "extra": [] })
-		def print_warning(self, message):
-			self.items.append({ "type": "warning", "text": message, "extra": [] })
-		def print_line(self, message, monospace=False):
-			self.items[-1]["extra"].append({ "text": message, "monospace": monospace })
-	output = WebOutput()
-	run_checks(env, output, pool)
-	return json_response(output.items)
+    from status_checks import run_checks
+
+    class WebOutput:
+        def __init__(self):
+            self.items = []
+
+        def add_heading(self, heading):
+            self.items.append({"type": "heading", "text": heading, "extra": []})
+
+        def print_ok(self, message):
+            self.items.append({"type": "ok", "text": message, "extra": []})
+
+        def print_error(self, message):
+            self.items.append({"type": "error", "text": message, "extra": []})
+
+        def print_warning(self, message):
+            self.items.append({"type": "warning", "text": message, "extra": []})
+
+        def print_line(self, message, monospace=False):
+            self.items[-1]["extra"].append({"text": message, "monospace": monospace})
+
+    output = WebOutput()
+    run_checks(env, output, pool)
+    return json_response(output.items)
+
 
 @app.route('/system/updates')
 @authorized_personnel_only
 def show_updates():
-	from status_checks import list_apt_updates
-	return "".join(
-		"%s (%s)\n"
-		% (p["package"], p["version"])
-		for p in list_apt_updates())
+    from status_checks import list_apt_updates
+    return "".join(
+        "%s (%s)\n"
+        % (p["package"], p["version"])
+        for p in list_apt_updates())
+
 
 @app.route('/system/update-packages', methods=["POST"])
 @authorized_personnel_only
 def do_updates():
-	utils.shell("check_call", ["/usr/bin/apt-get", "-qq", "update"])
-	return utils.shell("check_output", ["/usr/bin/apt-get", "-y", "upgrade"], env={
-		"DEBIAN_FRONTEND": "noninteractive"
-	})
+    utils.shell("check_call", ["/usr/bin/apt-get", "-qq", "update"])
+    return utils.shell("check_output", ["/usr/bin/apt-get", "-y", "upgrade"], env={
+        "DEBIAN_FRONTEND": "noninteractive"
+    })
+
 
 @app.route('/system/backup/status')
 @authorized_personnel_only
 def backup_status():
-	from backup import backup_status
-	return json_response(backup_status(env))
+    from backup import backup_status
+    return json_response(backup_status(env))
 
 # APP
 
 if __name__ == '__main__':
-	if "DEBUG" in os.environ: app.debug = True
-	if "APIKEY" in os.environ: auth_service.key = os.environ["APIKEY"]
+    if "DEBUG" in os.environ:
+        app.debug = True
+    if "APIKEY" in os.environ:
+        auth_service.key = os.environ["APIKEY"]
 
-	if not app.debug:
-		app.logger.addHandler(utils.create_syslog_handler())
+    if not app.debug:
+        app.logger.addHandler(utils.create_syslog_handler())
 
-	# For testing on the command line, you can use `curl` like so:
-	#    curl --user $(</var/lib/mailinabox/api.key): http://localhost:10222/mail/users
-	auth_service.write_key()
+    # For testing on the command line, you can use `curl` like so:
+    #    curl --user $(</var/lib/mailinabox/api.key): http://localhost:10222/mail/users
+    auth_service.write_key()
 
-	# For testing in the browser, you can copy the API key that's output to the
-	# debug console and enter that as the username
-	app.logger.info('API key: ' + auth_service.key)
-
-	# Start the application server. Listens on 127.0.0.1 (IPv4 only).
-	app.run(port=10222)
+    # For testing in the browser, you can copy the API key that's output to the
+    # debug console and enter that as the username
+    app.logger.info('API key: ' + auth_service.key)
 
+    # Start the application server. Listens on 127.0.0.1 (IPv4 only).
+    app.run(port=10222)
diff --git a/management/dns_update.py b/management/dns_update.py
index fb00854f..a340e268 100755
--- a/management/dns_update.py
+++ b/management/dns_update.py
@@ -4,7 +4,13 @@
 # and mail aliases and restarts nsd.
 ########################################################################
 
-import os, os.path, urllib.parse, datetime, re, hashlib, base64
+import os
+import os.path
+import urllib.parse
+import datetime
+import re
+import hashlib
+import base64
 import ipaddress
 import rtyaml
 import dns.resolver
@@ -12,371 +18,389 @@ import dns.resolver
 from mailconfig import get_mail_domains
 from utils import shell, load_env_vars_from_file, safe_domain_name, sort_domains
 
+
 def get_dns_domains(env):
-	# Add all domain names in use by email users and mail aliases and ensure
-	# PRIMARY_HOSTNAME is in the list.
-	domains = set()
-	domains |= get_mail_domains(env)
-	domains.add(env['PRIMARY_HOSTNAME'])
-	return domains
+    # Add all domain names in use by email users and mail aliases and ensure
+    # PRIMARY_HOSTNAME is in the list.
+    domains = set()
+    domains |= get_mail_domains(env)
+    domains.add(env['PRIMARY_HOSTNAME'])
+    return domains
+
 
 def get_dns_zones(env):
-	# What domains should we create DNS zones for? Never create a zone for
-	# a domain & a subdomain of that domain.
-	domains = get_dns_domains(env)
-	
-	# Exclude domains that are subdomains of other domains we know. Proceed
-	# by looking at shorter domains first.
-	zone_domains = set()
-	for domain in sorted(domains, key=lambda d : len(d)):
-		for d in zone_domains:
-			if domain.endswith("." + d):
-				# We found a parent domain already in the list.
-				break
-		else:
-			# 'break' did not occur: there is no parent domain.
-			zone_domains.add(domain)
+    # What domains should we create DNS zones for? Never create a zone for
+    # a domain & a subdomain of that domain.
+    domains = get_dns_domains(env)
 
-	# Make a nice and safe filename for each domain.
-	zonefiles = []
-	for domain in zone_domains:
-		zonefiles.append([domain, safe_domain_name(domain) + ".txt"])
+    # Exclude domains that are subdomains of other domains we know. Proceed
+    # by looking at shorter domains first.
+    zone_domains = set()
+    for domain in sorted(domains, key=lambda d: len(d)):
+        for d in zone_domains:
+            if domain.endswith("." + d):
+                # We found a parent domain already in the list.
+                break
+        else:
+            # 'break' did not occur: there is no parent domain.
+            zone_domains.add(domain)
+
+    # Make a nice and safe filename for each domain.
+    zonefiles = []
+    for domain in zone_domains:
+        zonefiles.append([domain, safe_domain_name(domain) + ".txt"])
+
+    # Sort the list so that the order is nice and so that nsd.conf has a
+    # stable order so we don't rewrite the file & restart the service
+    # meaninglessly.
+    zone_order = sort_domains([zone[0] for zone in zonefiles], env)
+    zonefiles.sort(key=lambda zone: zone_order.index(zone[0]))
+
+    return zonefiles
 
-	# Sort the list so that the order is nice and so that nsd.conf has a
-	# stable order so we don't rewrite the file & restart the service
-	# meaninglessly.
-	zone_order = sort_domains([ zone[0] for zone in zonefiles ], env)
-	zonefiles.sort(key = lambda zone : zone_order.index(zone[0]) )
 
-	return zonefiles
-	
 def get_custom_dns_config(env):
-	try:
-		return rtyaml.load(open(os.path.join(env['STORAGE_ROOT'], 'dns/custom.yaml')))
-	except:
-		return { }
+    try:
+        return rtyaml.load(open(os.path.join(env['STORAGE_ROOT'], 'dns/custom.yaml')))
+    except:
+        return {}
+
 
 def write_custom_dns_config(config, env):
-	config_yaml = rtyaml.dump(config)
-	with open(os.path.join(env['STORAGE_ROOT'], 'dns/custom.yaml'), "w") as f:
-		f.write(config_yaml)
+    config_yaml = rtyaml.dump(config)
+    with open(os.path.join(env['STORAGE_ROOT'], 'dns/custom.yaml'), "w") as f:
+        f.write(config_yaml)
+
 
 def do_dns_update(env, force=False):
-	# What domains (and their zone filenames) should we build?
-	domains = get_dns_domains(env)
-	zonefiles = get_dns_zones(env)
+    # What domains (and their zone filenames) should we build?
+    domains = get_dns_domains(env)
+    zonefiles = get_dns_zones(env)
 
-	# Custom records to add to zones.
-	additional_records = get_custom_dns_config(env)
+    # Custom records to add to zones.
+    additional_records = get_custom_dns_config(env)
 
-	# Write zone files.
-	os.makedirs('/etc/nsd/zones', exist_ok=True)
-	updated_domains = []
-	for i, (domain, zonefile) in enumerate(zonefiles):
-		# Build the records to put in the zone.
-		records = build_zone(domain, domains, additional_records, env)
+    # Write zone files.
+    os.makedirs('/etc/nsd/zones', exist_ok=True)
+    updated_domains = []
+    for i, (domain, zonefile) in enumerate(zonefiles):
+        # Build the records to put in the zone.
+        records = build_zone(domain, domains, additional_records, env)
 
-		# See if the zone has changed, and if so update the serial number
-		# and write the zone file.
-		if not write_nsd_zone(domain, "/etc/nsd/zones/" + zonefile, records, env, force):
-			# Zone was not updated. There were no changes.
-			continue
+        # See if the zone has changed, and if so update the serial number
+        # and write the zone file.
+        if not write_nsd_zone(domain, "/etc/nsd/zones/" + zonefile, records, env, force):
+            # Zone was not updated. There were no changes.
+            continue
 
-		# If this is a .justtesting.email domain, then post the update.
-		try:
-			justtestingdotemail(domain, records)
-		except:
-			# Hmm. Might be a network issue. If we stop now, will we end
-			# up in an inconsistent state? Let's just continue.
-			pass
+        # If this is a .justtesting.email domain, then post the update.
+        try:
+            justtestingdotemail(domain, records)
+        except:
+            # Hmm. Might be a network issue. If we stop now, will we end
+            # up in an inconsistent state? Let's just continue.
+            pass
 
-		# Mark that we just updated this domain.
-		updated_domains.append(domain)
+        # Mark that we just updated this domain.
+        updated_domains.append(domain)
 
-		# Sign the zone.
-		#
-		# Every time we sign the zone we get a new result, which means
-		# we can't sign a zone without bumping the zone's serial number.
-		# Thus we only sign a zone if write_nsd_zone returned True
-		# indicating the zone changed, and thus it got a new serial number.
-		# write_nsd_zone is smart enough to check if a zone's signature
-		# is nearing expiration and if so it'll bump the serial number
-		# and return True so we get a chance to re-sign it.
-		sign_zone(domain, zonefile, env)
+        # Sign the zone.
+        #
+        # Every time we sign the zone we get a new result, which means
+        # we can't sign a zone without bumping the zone's serial number.
+        # Thus we only sign a zone if write_nsd_zone returned True
+        # indicating the zone changed, and thus it got a new serial number.
+        # write_nsd_zone is smart enough to check if a zone's signature
+        # is nearing expiration and if so it'll bump the serial number
+        # and return True so we get a chance to re-sign it.
+        sign_zone(domain, zonefile, env)
 
-	# Now that all zones are signed (some might not have changed and so didn't
-	# just get signed now, but were before) update the zone filename so nsd.conf
-	# uses the signed file.
-	for i in range(len(zonefiles)):
-		zonefiles[i][1] += ".signed"
+    # Now that all zones are signed (some might not have changed and so didn't
+    # just get signed now, but were before) update the zone filename so nsd.conf
+    # uses the signed file.
+    for i in range(len(zonefiles)):
+        zonefiles[i][1] += ".signed"
 
-	# Write the main nsd.conf file.
-	if write_nsd_conf(zonefiles, additional_records, env):
-		# Make sure updated_domains contains *something* if we wrote an updated
-		# nsd.conf so that we know to restart nsd.
-		if len(updated_domains) == 0:
-			updated_domains.append("DNS configuration")
+    # Write the main nsd.conf file.
+    if write_nsd_conf(zonefiles, additional_records, env):
+        # Make sure updated_domains contains *something* if we wrote an updated
+        # nsd.conf so that we know to restart nsd.
+        if len(updated_domains) == 0:
+            updated_domains.append("DNS configuration")
 
-	# Kick nsd if anything changed.
-	if len(updated_domains) > 0:
-		shell('check_call', ["/usr/sbin/service", "nsd", "restart"])
+    # Kick nsd if anything changed.
+    if len(updated_domains) > 0:
+        shell('check_call', ["/usr/sbin/service", "nsd", "restart"])
 
-	# Write the OpenDKIM configuration tables.
-	if write_opendkim_tables(domains, env):
-		# Settings changed. Kick opendkim.
-		shell('check_call', ["/usr/sbin/service", "opendkim", "restart"])
-		if len(updated_domains) == 0:
-			# If this is the only thing that changed?
-			updated_domains.append("OpenDKIM configuration")
+    # Write the OpenDKIM configuration tables.
+    if write_opendkim_tables(domains, env):
+        # Settings changed. Kick opendkim.
+        shell('check_call', ["/usr/sbin/service", "opendkim", "restart"])
+        if len(updated_domains) == 0:
+            # If this is the only thing that changed?
+            updated_domains.append("OpenDKIM configuration")
 
-	if len(updated_domains) == 0:
-		# if nothing was updated (except maybe OpenDKIM's files), don't show any output
-		return ""
-	else:
-		return "updated DNS: " + ",".join(updated_domains) + "\n"
+    if len(updated_domains) == 0:
+        # if nothing was updated (except maybe OpenDKIM's files), don't show any output
+        return ""
+    else:
+        return "updated DNS: " + ",".join(updated_domains) + "\n"
 
 ########################################################################
 
+
 def build_zone(domain, all_domains, additional_records, env, is_zone=True):
-	records = []
+    records = []
 
-	# For top-level zones, define the authoritative name servers.
-	#
-	# Normally we are our own nameservers. Some TLDs require two distinct IP addresses,
-	# so we allow the user to override the second nameserver definition so that
-	# secondary DNS can be set up elsewhere.
-	#
-	# 'False' in the tuple indicates these records would not be used if the zone
-	# is managed outside of the box.
-	if is_zone:
-		# Obligatory definition of ns1.PRIMARY_HOSTNAME.
-		records.append((None,  "NS",  "ns1.%s." % env["PRIMARY_HOSTNAME"], False))
+    # For top-level zones, define the authoritative name servers.
+    #
+    # Normally we are our own nameservers. Some TLDs require two distinct IP addresses,
+    # so we allow the user to override the second nameserver definition so that
+    # secondary DNS can be set up elsewhere.
+    #
+    # 'False' in the tuple indicates these records would not be used if the zone
+    # is managed outside of the box.
+    if is_zone:
+        # Obligatory definition of ns1.PRIMARY_HOSTNAME.
+        records.append((None,  "NS",  "ns1.%s." % env["PRIMARY_HOSTNAME"], False))
 
-		# Define ns2.PRIMARY_HOSTNAME or whatever the user overrides.
-		secondary_ns = additional_records.get("_secondary_nameserver", "ns2." + env["PRIMARY_HOSTNAME"])
-		records.append((None,  "NS", secondary_ns+'.', False))
+        # Define ns2.PRIMARY_HOSTNAME or whatever the user overrides.
+        secondary_ns = additional_records.get("_secondary_nameserver", "ns2." + env["PRIMARY_HOSTNAME"])
+        records.append((None,  "NS", secondary_ns+'.', False))
 
+    # In PRIMARY_HOSTNAME...
+    if domain == env["PRIMARY_HOSTNAME"]:
+        # Define ns1 and ns2.
+        # 'False' in the tuple indicates these records would not be used if the zone
+        # is managed outside of the box.
+        records.append(("ns1", "A", env["PUBLIC_IP"], False))
+        records.append(("ns2", "A", env["PUBLIC_IP"], False))
+        if env.get('PUBLIC_IPV6'):
+            records.append(("ns1", "AAAA", env["PUBLIC_IPV6"], False))
+            records.append(("ns2", "AAAA", env["PUBLIC_IPV6"], False))
 
-	# In PRIMARY_HOSTNAME...
-	if domain == env["PRIMARY_HOSTNAME"]:
-		# Define ns1 and ns2.
-		# 'False' in the tuple indicates these records would not be used if the zone
-		# is managed outside of the box.
-		records.append(("ns1", "A", env["PUBLIC_IP"], False))
-		records.append(("ns2", "A", env["PUBLIC_IP"], False))
-		if env.get('PUBLIC_IPV6'):
-			records.append(("ns1", "AAAA", env["PUBLIC_IPV6"], False))
-			records.append(("ns2", "AAAA", env["PUBLIC_IPV6"], False))
+        # Set the A/AAAA records. Do this early for the PRIMARY_HOSTNAME so that the user cannot override them
+        # and we can provide different explanatory text.
+        records.append((None, "A", env["PUBLIC_IP"], "Required. Sets the IP address of the box."))
+        if env.get("PUBLIC_IPV6"):
+            records.append((None, "AAAA", env["PUBLIC_IPV6"], "Required. Sets the IPv6 address of the box."))
 
-		# Set the A/AAAA records. Do this early for the PRIMARY_HOSTNAME so that the user cannot override them
-		# and we can provide different explanatory text.
-		records.append((None, "A", env["PUBLIC_IP"], "Required. Sets the IP address of the box."))
-		if env.get("PUBLIC_IPV6"): records.append((None, "AAAA", env["PUBLIC_IPV6"], "Required. Sets the IPv6 address of the box."))
+        # Add a DANE TLSA record for SMTP.
+        records.append(("_25._tcp", "TLSA", build_tlsa_record(env), "Recommended when DNSSEC is enabled. Advertises to mail servers connecting to the box that mandatory encryption should be used."))
 
-		# Add a DANE TLSA record for SMTP.
-		records.append(("_25._tcp", "TLSA", build_tlsa_record(env), "Recommended when DNSSEC is enabled. Advertises to mail servers connecting to the box that mandatory encryption should be used."))
+        # Add a SSHFP records to help SSH key validation. One per available SSH key on this system.
+        for value in build_sshfp_records():
+            records.append((None, "SSHFP", value, "Optional. Provides an out-of-band method for verifying an SSH key before connecting. Use 'VerifyHostKeyDNS yes' (or 'VerifyHostKeyDNS ask') when connecting with ssh."))
 
-		# Add a SSHFP records to help SSH key validation. One per available SSH key on this system.
-		for value in build_sshfp_records():
-			records.append((None, "SSHFP", value, "Optional. Provides an out-of-band method for verifying an SSH key before connecting. Use 'VerifyHostKeyDNS yes' (or 'VerifyHostKeyDNS ask') when connecting with ssh."))
+    # The MX record says where email for the domain should be delivered: Here!
+    records.append((None,  "MX",  "10 %s." % env["PRIMARY_HOSTNAME"], "Required. Specifies the hostname (and priority) of the machine that handles @%s mail." % domain))
 
-	# The MX record says where email for the domain should be delivered: Here!
-	records.append((None,  "MX",  "10 %s." % env["PRIMARY_HOSTNAME"], "Required. Specifies the hostname (and priority) of the machine that handles @%s mail." % domain))
+    # SPF record: Permit the box ('mx', see above) to send mail on behalf of
+    # the domain, and no one else.
+    records.append((None,  "TXT", 'v=spf1 mx -all', "Recommended. Specifies that only the box is permitted to send @%s mail." % domain))
 
-	# SPF record: Permit the box ('mx', see above) to send mail on behalf of
-	# the domain, and no one else.
-	records.append((None,  "TXT", 'v=spf1 mx -all', "Recommended. Specifies that only the box is permitted to send @%s mail." % domain))
+    # Add DNS records for any subdomains of this domain. We should not have a zone for
+    # both a domain and one of its subdomains.
+    subdomains = [d for d in all_domains if d.endswith("." + domain)]
+    for subdomain in subdomains:
+        subdomain_qname = subdomain[0:-len("." + domain)]
+        subzone = build_zone(subdomain, [], additional_records, env, is_zone=False)
+        for child_qname, child_rtype, child_value, child_explanation in subzone:
+            if child_qname is None:
+                child_qname = subdomain_qname
+            else:
+                child_qname += "." + subdomain_qname
+            records.append((child_qname, child_rtype, child_value, child_explanation))
 
-	# Add DNS records for any subdomains of this domain. We should not have a zone for
-	# both a domain and one of its subdomains.
-	subdomains = [d for d in all_domains if d.endswith("." + domain)]
-	for subdomain in subdomains:
-		subdomain_qname = subdomain[0:-len("." + domain)]
-		subzone = build_zone(subdomain, [], additional_records, env, is_zone=False)
-		for child_qname, child_rtype, child_value, child_explanation in subzone:
-			if child_qname == None:
-				child_qname = subdomain_qname
-			else:
-				child_qname += "." + subdomain_qname
-			records.append((child_qname, child_rtype, child_value, child_explanation))
+    def has_rec(qname, rtype, prefix=None):
+        for rec in records:
+            if rec[0] == qname and rec[1] == rtype and (prefix is None or rec[2].startswith(prefix)):
+                return True
+        return False
 
-	def has_rec(qname, rtype, prefix=None):
-		for rec in records:
-			if rec[0] == qname and rec[1] == rtype and (prefix is None or rec[2].startswith(prefix)):
-				return True
-		return False
+    # The user may set other records that don't conflict with our settings.
+    for qname, rtype, value in get_custom_records(domain, additional_records, env):
+        if has_rec(qname, rtype):
+            continue
+        records.append((qname, rtype, value, "(Set by user.)"))
 
-	# The user may set other records that don't conflict with our settings.
-	for qname, rtype, value in get_custom_records(domain, additional_records, env):
-		if has_rec(qname, rtype): continue
-		records.append((qname, rtype, value, "(Set by user.)"))
+    # Add defaults if not overridden by the user's custom settings (and not otherwise configured).
+    # Any "CNAME" record on the qname overrides A and AAAA.
+    defaults = [
+        (None,  "A",    env["PUBLIC_IP"],       "Required. May have a different value. Sets the IP address that %s resolves to for web hosting and other services besides mail. The A record must be present but its value does not affect mail delivery." % domain),
+        ("www", "A",    env["PUBLIC_IP"],       "Optional. Sets the IP address that www.%s resolves to, e.g. for web hosting." % domain),
+        (None,  "AAAA", env.get('PUBLIC_IPV6'), "Optional. Sets the IPv6 address that %s resolves to, e.g. for web hosting. (It is not necessary for receiving mail on this domain.)" % domain),
+        ("www", "AAAA", env.get('PUBLIC_IPV6'), "Optional. Sets the IPv6 address that www.%s resolves to, e.g. for web hosting." % domain),
+    ]
+    for qname, rtype, value, explanation in defaults:
+        # skip IPV6 if not set
+        if value is None or value.strip() == "":
+            continue
+        # don't create any default 'www' subdomains on what are themselves subdomains
+        if not is_zone and qname == "www":
+            continue
+        # Set the default record, but not if:
+        # (1) there is not a user-set record of the same type already
+        # (2) there is not a CNAME record already, since you can't set both and who knows what takes precedence
+        # (2) there is not an A record already (if this is an A record this is a dup of (1), and if this is an AAAA record then don't set a default AAAA record if the user sets a custom A record, since the default wouldn't make sense and it should not resolve if the user doesn't provide a new AAAA record)
+        if not has_rec(qname, rtype) and not has_rec(qname, "CNAME") and not has_rec(qname, "A"):
+            records.append((qname, rtype, value, explanation))
 
-	# Add defaults if not overridden by the user's custom settings (and not otherwise configured).
-	# Any "CNAME" record on the qname overrides A and AAAA.
-	defaults = [
-		(None,  "A",    env["PUBLIC_IP"],       "Required. May have a different value. Sets the IP address that %s resolves to for web hosting and other services besides mail. The A record must be present but its value does not affect mail delivery." % domain),
-		("www", "A",    env["PUBLIC_IP"],       "Optional. Sets the IP address that www.%s resolves to, e.g. for web hosting." % domain),
-		(None,  "AAAA", env.get('PUBLIC_IPV6'), "Optional. Sets the IPv6 address that %s resolves to, e.g. for web hosting. (It is not necessary for receiving mail on this domain.)" % domain),
-		("www", "AAAA", env.get('PUBLIC_IPV6'), "Optional. Sets the IPv6 address that www.%s resolves to, e.g. for web hosting." % domain),
-	]
-	for qname, rtype, value, explanation in defaults:
-		if value is None or value.strip() == "": continue # skip IPV6 if not set
-		if not is_zone and qname == "www": continue # don't create any default 'www' subdomains on what are themselves subdomains
-		# Set the default record, but not if:
-		# (1) there is not a user-set record of the same type already
-		# (2) there is not a CNAME record already, since you can't set both and who knows what takes precedence
-		# (2) there is not an A record already (if this is an A record this is a dup of (1), and if this is an AAAA record then don't set a default AAAA record if the user sets a custom A record, since the default wouldn't make sense and it should not resolve if the user doesn't provide a new AAAA record)
-		if not has_rec(qname, rtype) and not has_rec(qname, "CNAME") and not has_rec(qname, "A"):
-			records.append((qname, rtype, value, explanation))
+    # Append the DKIM TXT record to the zone as generated by OpenDKIM.
+    opendkim_record_file = os.path.join(env['STORAGE_ROOT'], 'mail/dkim/mail.txt')
+    with open(opendkim_record_file) as orf:
+        m = re.match(r'(\S+)\s+IN\s+TXT\s+\( "([^"]+)"\s+"([^"]+)"\s*\)', orf.read(), re.S)
+        val = m.group(2) + m.group(3)
+        records.append((m.group(1), "TXT", val, "Recommended. Provides a way for recipients to verify that this machine sent @%s mail." % domain))
 
-	# Append the DKIM TXT record to the zone as generated by OpenDKIM.
-	opendkim_record_file = os.path.join(env['STORAGE_ROOT'], 'mail/dkim/mail.txt')
-	with open(opendkim_record_file) as orf:
-		m = re.match(r'(\S+)\s+IN\s+TXT\s+\( "([^"]+)"\s+"([^"]+)"\s*\)', orf.read(), re.S)
-		val = m.group(2) + m.group(3)
-		records.append((m.group(1), "TXT", val, "Recommended. Provides a way for recipients to verify that this machine sent @%s mail." % domain))
+    # Append a DMARC record.
+    records.append(("_dmarc", "TXT", 'v=DMARC1; p=quarantine', "Optional. Specifies that mail that does not originate from the box but claims to be from @%s is suspect and should be quarantined by the recipient's mail system." % domain))
 
-	# Append a DMARC record.
-	records.append(("_dmarc", "TXT", 'v=DMARC1; p=quarantine', "Optional. Specifies that mail that does not originate from the box but claims to be from @%s is suspect and should be quarantined by the recipient's mail system." % domain))
+    # For any subdomain with an A record but no SPF or DMARC record, add strict policy records.
+    all_resolvable_qnames = set(r[0] for r in records if r[1] in ("A", "AAAA"))
+    for qname in all_resolvable_qnames:
+        if not has_rec(qname, "TXT", prefix="v=spf1 "):
+            records.append((qname,  "TXT", 'v=spf1 a mx -all', "Prevents unauthorized use of this domain name for outbound mail by requiring outbound mail to originate from the indicated host(s)."))
+        dmarc_qname = "_dmarc" + ("" if qname is None else "." + qname)
+        if not has_rec(dmarc_qname, "TXT", prefix="v=DMARC1; "):
+            records.append((dmarc_qname, "TXT", 'v=DMARC1; p=reject', "Prevents unauthorized use of this domain name for outbound mail by requiring a valid DKIM signature."))
 
-	# For any subdomain with an A record but no SPF or DMARC record, add strict policy records.
-	all_resolvable_qnames = set(r[0] for r in records if r[1] in ("A", "AAAA"))
-	for qname in all_resolvable_qnames:
-		if not has_rec(qname, "TXT", prefix="v=spf1 "):
-			records.append((qname,  "TXT", 'v=spf1 a mx -all', "Prevents unauthorized use of this domain name for outbound mail by requiring outbound mail to originate from the indicated host(s)."))
-		dmarc_qname = "_dmarc" + ("" if qname is None else "." + qname)
-		if not has_rec(dmarc_qname, "TXT", prefix="v=DMARC1; "):
-			records.append((dmarc_qname, "TXT", 'v=DMARC1; p=reject', "Prevents unauthorized use of this domain name for outbound mail by requiring a valid DKIM signature."))
-		
+    # Sort the records. The None records *must* go first in the nsd zone file. Otherwise it doesn't matter.
+    records.sort(key=lambda rec: list(reversed(rec[0].split(".")) if rec[0] is not None else ""))
 
-	# Sort the records. The None records *must* go first in the nsd zone file. Otherwise it doesn't matter.
-	records.sort(key = lambda rec : list(reversed(rec[0].split(".")) if rec[0] is not None else ""))
-
-	return records
+    return records
 
 ########################################################################
 
+
 def get_custom_records(domain, additional_records, env):
-	for qname, value in additional_records.items():
-		# We don't count the secondary nameserver config (if present) as a record - that would just be
-		# confusing to users. Instead it is accessed/manipulated directly via (get/set)_custom_dns_config.
-		if qname == "_secondary_nameserver": continue
+    for qname, value in additional_records.items():
+        # We don't count the secondary nameserver config (if present) as a record - that would just be
+        # confusing to users. Instead it is accessed/manipulated directly via (get/set)_custom_dns_config.
+        if qname == "_secondary_nameserver":
+                continue
 
-		# Is this record for the domain or one of its subdomains?
-		# If `domain` is None, return records for all domains.
-		if domain is not None and qname != domain and not qname.endswith("." + domain): continue
+        # Is this record for the domain or one of its subdomains?
+        # If `domain` is None, return records for all domains.
+        if domain is not None and qname != domain and not qname.endswith("." + domain):
+            continue
 
-		# Turn the fully qualified domain name in the YAML file into
-		# our short form (None => domain, or a relative QNAME) if
-		# domain is not None.
-		if domain is not None:
-			if qname == domain:
-				qname = None
-			else:
-				qname = qname[0:len(qname)-len("." + domain)]
+        # Turn the fully qualified domain name in the YAML file into
+        # our short form (None => domain, or a relative QNAME) if
+        # domain is not None.
+        if domain is not None:
+            if qname == domain:
+                qname = None
+            else:
+                qname = qname[0:len(qname)-len("." + domain)]
 
-		# Short form. Mapping a domain name to a string is short-hand
-		# for creating A records.
-		if isinstance(value, str):
-			values = [("A", value)]
-			if value == "local" and env.get("PUBLIC_IPV6"):
-				values.append( ("AAAA", value) )
+        # Short form. Mapping a domain name to a string is short-hand
+        # for creating A records.
+        if isinstance(value, str):
+            values = [("A", value)]
+            if value == "local" and env.get("PUBLIC_IPV6"):
+                values.append(("AAAA", value))
 
-		# A mapping creates multiple records.
-		elif isinstance(value, dict):
-			values = value.items()
+        # A mapping creates multiple records.
+        elif isinstance(value, dict):
+            values = value.items()
 
-		# No other type of data is allowed.
-		else:
-			raise ValueError()
+        # No other type of data is allowed.
+        else:
+            raise ValueError()
 
-		for rtype, value2 in values:
-			# The "local" keyword on A/AAAA records are short-hand for our own IP.
-			# This also flags for web configuration that the user wants a website here.
-			if rtype == "A" and value2 == "local":
-				value2 = env["PUBLIC_IP"]
-			if rtype == "AAAA" and value2 == "local":
-				if "PUBLIC_IPV6" not in env: continue # no IPv6 address is available so don't set anything
-				value2 = env["PUBLIC_IPV6"]
-			yield (qname, rtype, value2)
+        for rtype, value2 in values:
+            # The "local" keyword on A/AAAA records are short-hand for our own IP.
+            # This also flags for web configuration that the user wants a website here.
+            if rtype == "A" and value2 == "local":
+                value2 = env["PUBLIC_IP"]
+            if rtype == "AAAA" and value2 == "local":
+                # no IPv6 address is available so don't set anything
+                if "PUBLIC_IPV6" not in env:
+                    continue
+                value2 = env["PUBLIC_IPV6"]
+            yield (qname, rtype, value2)
 
 ########################################################################
 
+
 def build_tlsa_record(env):
-	# A DANE TLSA record in DNS specifies that connections on a port
-	# must use TLS and the certificate must match a particular certificate.
-	#
-	# Thanks to http://blog.huque.com/2012/10/dnssec-and-certificates.html
-	# for explaining all of this!
+    # A DANE TLSA record in DNS specifies that connections on a port
+    # must use TLS and the certificate must match a particular certificate.
+    #
+    # Thanks to http://blog.huque.com/2012/10/dnssec-and-certificates.html
+    # for explaining all of this!
 
-	# Get the hex SHA256 of the DER-encoded server certificate:
-	certder = shell("check_output", [
-		"/usr/bin/openssl",
-		"x509",
-		"-in", os.path.join(env["STORAGE_ROOT"], "ssl", "ssl_certificate.pem"),
-		"-outform", "DER"
-		],
-		return_bytes=True)
-	certhash = hashlib.sha256(certder).hexdigest()
+    # Get the hex SHA256 of the DER-encoded server certificate:
+    certder = shell("check_output", [
+        "/usr/bin/openssl",
+        "x509",
+        "-in", os.path.join(env["STORAGE_ROOT"], "ssl", "ssl_certificate.pem"),
+        "-outform", "DER"
+        ],
+        return_bytes=True)
+    certhash = hashlib.sha256(certder).hexdigest()
+
+    # Specify the TLSA parameters:
+    # 3: This is the certificate that the client should trust. No CA is needed.
+    # 0: The whole certificate is matched.
+    # 1: The certificate is SHA256'd here.
+    return "3 0 1 " + certhash
 
-	# Specify the TLSA parameters:
-	# 3: This is the certificate that the client should trust. No CA is needed.
-	# 0: The whole certificate is matched.
-	# 1: The certificate is SHA256'd here.
-	return "3 0 1 " + certhash
 
 def build_sshfp_records():
-	# The SSHFP record is a way for us to embed this server's SSH public
-	# key fingerprint into the DNS so that remote hosts have an out-of-band
-	# method to confirm the fingerprint. See RFC 4255 and RFC 6594. This
-	# depends on DNSSEC.
-	#
-	# On the client side, set SSH's VerifyHostKeyDNS option to 'ask' to
-	# include this info in the key verification prompt or 'yes' to trust
-	# the SSHFP record.
-	#
-	# See https://github.com/xelerance/sshfp for inspiriation.
+    # The SSHFP record is a way for us to embed this server's SSH public
+    # key fingerprint into the DNS so that remote hosts have an out-of-band
+    # method to confirm the fingerprint. See RFC 4255 and RFC 6594. This
+    # depends on DNSSEC.
+    #
+    # On the client side, set SSH's VerifyHostKeyDNS option to 'ask' to
+    # include this info in the key verification prompt or 'yes' to trust
+    # the SSHFP record.
+    #
+    # See https://github.com/xelerance/sshfp for inspiriation.
 
-	algorithm_number = {
-		"ssh-rsa": 1,
-		"ssh-dss": 2,
-		"ecdsa-sha2-nistp256": 3,
-	}
+    algorithm_number = {
+        "ssh-rsa": 1,
+        "ssh-dss": 2,
+        "ecdsa-sha2-nistp256": 3,
+    }
+
+    # Get our local fingerprints by running ssh-keyscan. The output looks
+    # like the known_hosts file: hostname, keytype, fingerprint. The order
+    # of the output is arbitrary, so sort it to prevent spurrious updates
+    # to the zone file (that trigger bumping the serial number).
+    keys = shell("check_output", ["ssh-keyscan", "localhost"])
+    for key in sorted(keys.split("\n")):
+        if key.strip() == "" or key[0] == "#":
+            continue
+        try:
+            host, keytype, pubkey = key.split(" ")
+            yield "%d %d ( %s )" % (
+                algorithm_number[keytype],
+                2,  # specifies we are using SHA-256 on next line
+                hashlib.sha256(base64.b64decode(pubkey)).hexdigest().upper(),
+                )
+        except:
+            # Lots of things can go wrong. Don't let it disturb the DNS
+            # zone.
+            pass
 
-	# Get our local fingerprints by running ssh-keyscan. The output looks
-	# like the known_hosts file: hostname, keytype, fingerprint. The order
-	# of the output is arbitrary, so sort it to prevent spurrious updates
-	# to the zone file (that trigger bumping the serial number).
-	keys = shell("check_output", ["ssh-keyscan", "localhost"])
-	for key in sorted(keys.split("\n")):
-		if key.strip() == "" or key[0] == "#": continue
-		try:
-			host, keytype, pubkey = key.split(" ")
-			yield "%d %d ( %s )" % (
-				algorithm_number[keytype],
-				2, # specifies we are using SHA-256 on next line
-				hashlib.sha256(base64.b64decode(pubkey)).hexdigest().upper(),
-				)
-		except:
-			# Lots of things can go wrong. Don't let it disturb the DNS
-			# zone.
-			pass
-	
 ########################################################################
 
+
 def write_nsd_zone(domain, zonefile, records, env, force):
-	# On the $ORIGIN line, there's typically a ';' comment at the end explaining
-	# what the $ORIGIN line does. Any further data after the domain confuses
-	# ldns-signzone, however. It used to say '; default zone domain'.
+    # On the $ORIGIN line, there's typically a ';' comment at the end explaining
+    # what the $ORIGIN line does. Any further data after the domain confuses
+    # ldns-signzone, however. It used to say '; default zone domain'.
 
-	# The SOA contact address for all of the domains on this system is hostmaster
-	# @ the PRIMARY_HOSTNAME. Hopefully that's legit.
+    # The SOA contact address for all of the domains on this system is hostmaster
+    # @ the PRIMARY_HOSTNAME. Hopefully that's legit.
 
-	# For the refresh through TTL fields, a good reference is:
-	# http://www.peerwisdom.org/2013/05/15/dns-understanding-the-soa-record/
+    # For the refresh through TTL fields, a good reference is:
+    # http://www.peerwisdom.org/2013/05/15/dns-understanding-the-soa-record/
 
-
-	zone = """
+    zone = """
 $ORIGIN {domain}.
 $TTL 1800           ; default time to live
 
@@ -389,96 +413,98 @@ $TTL 1800           ; default time to live
            )
 """
 
-	# Replace replacement strings.
-	zone = zone.format(domain=domain.encode("idna").decode("ascii"), primary_domain=env["PRIMARY_HOSTNAME"].encode("idna").decode("ascii"))
+    # Replace replacement strings.
+    zone = zone.format(domain=domain.encode("idna").decode("ascii"), primary_domain=env["PRIMARY_HOSTNAME"].encode("idna").decode("ascii"))
 
-	# Add records.
-	for subdomain, querytype, value, explanation in records:
-		if subdomain:
-			zone += subdomain.encode("idna").decode("ascii")
-		zone += "\tIN\t" + querytype + "\t"
-		if querytype == "TXT":
-			# Quote and escape.
-			value = value.replace('\\', '\\\\') # escape backslashes
-			value = value.replace('"', '\\"') # escape quotes
-			value = '"' + value + '"' # wrap in quotes
-		elif querytype in ("NS", "CNAME"):
-			# These records must be IDNA-encoded.
-			value = value.encode("idna").decode("ascii")
-		elif querytype == "MX":
-			# Also IDNA-encoded, but must parse first.
-			priority, host = value.split(" ", 1)
-			host = host.encode("idna").decode("ascii")
-			value = priority + " " + host
-		zone += value + "\n"
+    # Add records.
+    for subdomain, querytype, value, explanation in records:
+        if subdomain:
+            zone += subdomain.encode("idna").decode("ascii")
+        zone += "\tIN\t" + querytype + "\t"
+        if querytype == "TXT":
+            # Quote and escape.
+            value = value.replace('\\', '\\\\')  # escape backslashes
+            value = value.replace('"', '\\"')  # escape quotes
+            value = '"' + value + '"'  # wrap in quotes
+        elif querytype in ("NS", "CNAME"):
+            # These records must be IDNA-encoded.
+            value = value.encode("idna").decode("ascii")
+        elif querytype == "MX":
+            # Also IDNA-encoded, but must parse first.
+            priority, host = value.split(" ", 1)
+            host = host.encode("idna").decode("ascii")
+            value = priority + " " + host
+        zone += value + "\n"
 
-	# DNSSEC requires re-signing a zone periodically. That requires
-	# bumping the serial number even if no other records have changed.
-	# We don't see the DNSSEC records yet, so we have to figure out
-	# if a re-signing is necessary so we can prematurely bump the
-	# serial number.
-	force_bump = False
-	if not os.path.exists(zonefile + ".signed"):
-		# No signed file yet. Shouldn't normally happen unless a box
-		# is going from not using DNSSEC to using DNSSEC.
-		force_bump = True
-	else:
-		# We've signed the domain. Check if we are close to the expiration
-		# time of the signature. If so, we'll force a bump of the serial
-		# number so we can re-sign it.
-		with open(zonefile + ".signed") as f:
-			signed_zone = f.read()
-		expiration_times = re.findall(r"\sRRSIG\s+SOA\s+\d+\s+\d+\s\d+\s+(\d{14})", signed_zone)
-		if len(expiration_times) == 0:
-			# weird
-			force_bump = True
-		else:
-			# All of the times should be the same, but if not choose the soonest.
-			expiration_time = min(expiration_times)
-			expiration_time = datetime.datetime.strptime(expiration_time, "%Y%m%d%H%M%S")
-			if expiration_time - datetime.datetime.now() < datetime.timedelta(days=3):
-				# We're within three days of the expiration, so bump serial & resign.
-				force_bump = True
+    # DNSSEC requires re-signing a zone periodically. That requires
+    # bumping the serial number even if no other records have changed.
+    # We don't see the DNSSEC records yet, so we have to figure out
+    # if a re-signing is necessary so we can prematurely bump the
+    # serial number.
+    force_bump = False
+    if not os.path.exists(zonefile + ".signed"):
+        # No signed file yet. Shouldn't normally happen unless a box
+        # is going from not using DNSSEC to using DNSSEC.
+        force_bump = True
+    else:
+        # We've signed the domain. Check if we are close to the expiration
+        # time of the signature. If so, we'll force a bump of the serial
+        # number so we can re-sign it.
+        with open(zonefile + ".signed") as f:
+            signed_zone = f.read()
+        expiration_times = re.findall(r"\sRRSIG\s+SOA\s+\d+\s+\d+\s\d+\s+(\d{14})", signed_zone)
+        if len(expiration_times) == 0:
+            # weird
+            force_bump = True
+        else:
+            # All of the times should be the same, but if not choose the soonest.
+            expiration_time = min(expiration_times)
+            expiration_time = datetime.datetime.strptime(expiration_time, "%Y%m%d%H%M%S")
+            if expiration_time - datetime.datetime.now() < datetime.timedelta(days=3):
+                # We're within three days of the expiration, so bump serial & resign.
+                force_bump = True
 
-	# Set the serial number.
-	serial = datetime.datetime.now().strftime("%Y%m%d00")
-	if os.path.exists(zonefile):
-		# If the zone already exists, is different, and has a later serial number,
-		# increment the number.
-		with open(zonefile) as f:
-			existing_zone = f.read()
-			m = re.search(r"(\d+)\s*;\s*serial number", existing_zone)
-			if m:
-				# Clear out the serial number in the existing zone file for the
-				# purposes of seeing if anything *else* in the zone has changed.
-				existing_serial = m.group(1)
-				existing_zone = existing_zone.replace(m.group(0), "__SERIAL__     ; serial number")
+    # Set the serial number.
+    serial = datetime.datetime.now().strftime("%Y%m%d00")
+    if os.path.exists(zonefile):
+        # If the zone already exists, is different, and has a later serial number,
+        # increment the number.
+        with open(zonefile) as f:
+            existing_zone = f.read()
+            m = re.search(r"(\d+)\s*;\s*serial number", existing_zone)
+            if m:
+                # Clear out the serial number in the existing zone file for the
+                # purposes of seeing if anything *else* in the zone has changed.
+                existing_serial = m.group(1)
+                existing_zone = existing_zone.replace(m.group(0), "__SERIAL__     ; serial number")
 
-				# If the existing zone is the same as the new zone (modulo the serial number),
-				# there is no need to update the file. Unless we're forcing a bump.
-				if zone == existing_zone and not force_bump and not force:
-					return False
+                # If the existing zone is the same as the new zone (modulo the serial number),
+                # there is no need to update the file. Unless we're forcing a bump.
+                if zone == existing_zone and not force_bump and not force:
+                    return False
 
-				# If the existing serial is not less than a serial number
-				# based on the current date plus 00, increment it. Otherwise,
-				# the serial number is less than our desired new serial number
-				# so we'll use the desired new number.
-				if existing_serial >= serial:
-					serial = str(int(existing_serial) + 1)
+                # If the existing serial is not less than a serial number
+                # based on the current date plus 00, increment it. Otherwise,
+                # the serial number is less than our desired new serial number
+                # so we'll use the desired new number.
+                if existing_serial >= serial:
+                    serial = str(int(existing_serial) + 1)
 
-	zone = zone.replace("__SERIAL__", serial)
+    zone = zone.replace("__SERIAL__", serial)
 
-	# Write the zone file.
-	with open(zonefile, "w") as f:
-		f.write(zone)
+    # Write the zone file.
+    with open(zonefile, "w") as f:
+        f.write(zone)
 
-	return True # file is updated
+    # file is updated
+    return True
 
 ########################################################################
 
+
 def write_nsd_conf(zonefiles, additional_records, env):
-	# Basic header.
-	nsdconf = """
+    # Basic header.
+    nsdconf = """
 server:
   hide-version: yes
 
@@ -488,376 +514,394 @@ server:
   # The directory for zonefile: files.
   zonesdir: "/etc/nsd/zones"
 """
-	
-	# Since we have bind9 listening on localhost for locally-generated
-	# DNS queries that require a recursive nameserver, and the system
-	# might have other network interfaces for e.g. tunnelling, we have
-	# to be specific about the network interfaces that nsd binds to.
-	for ipaddr in (env.get("PRIVATE_IP", "") + " " + env.get("PRIVATE_IPV6", "")).split(" "):
-		if ipaddr == "": continue
-		nsdconf += "  ip-address: %s\n" % ipaddr
 
-	# Append the zones.
-	for domain, zonefile in zonefiles:
-		nsdconf += """
+    # Since we have bind9 listening on localhost for locally-generated
+    # DNS queries that require a recursive nameserver, and the system
+    # might have other network interfaces for e.g. tunnelling, we have
+    # to be specific about the network interfaces that nsd binds to.
+    for ipaddr in (env.get("PRIVATE_IP", "") + " " + env.get("PRIVATE_IPV6", "")).split(" "):
+        if ipaddr == "":
+            continue
+        nsdconf += "  ip-address: %s\n" % ipaddr
+
+    # Append the zones.
+    for domain, zonefile in zonefiles:
+        nsdconf += """
 zone:
-	name: %s
-	zonefile: %s
+    name: %s
+    zonefile: %s
 """ % (domain.encode("idna").decode("ascii"), zonefile)
 
-		# If a custom secondary nameserver has been set, allow zone transfers
-		# and notifies to that nameserver.
-		if additional_records.get("_secondary_nameserver"):
-			# Get the IP address of the nameserver by resolving it.
-			hostname = additional_records.get("_secondary_nameserver")
-			resolver = dns.resolver.get_default_resolver()
-			response = dns.resolver.query(hostname+'.', "A")
-			ipaddr = str(response[0])
-			nsdconf += """\tnotify: %s NOKEY
-	provide-xfr: %s NOKEY
+        # If a custom secondary nameserver has been set, allow zone transfers
+        # and notifies to that nameserver.
+        if additional_records.get("_secondary_nameserver"):
+            # Get the IP address of the nameserver by resolving it.
+            hostname = additional_records.get("_secondary_nameserver")
+            resolver = dns.resolver.get_default_resolver()
+            response = dns.resolver.query(hostname+'.', "A")
+            ipaddr = str(response[0])
+            nsdconf += """\tnotify: %s NOKEY
+    provide-xfr: %s NOKEY
 """ % (ipaddr, ipaddr)
 
+    # Check if the nsd.conf is changing. If it isn't changing,
+    # return False to flag that no change was made.
+    with open("/etc/nsd/nsd.conf") as f:
+        if f.read() == nsdconf:
+            return False
 
-	# Check if the nsd.conf is changing. If it isn't changing,
-	# return False to flag that no change was made.
-	with open("/etc/nsd/nsd.conf") as f:
-		if f.read() == nsdconf:
-			return False
+    with open("/etc/nsd/nsd.conf", "w") as f:
+        f.write(nsdconf)
 
-	with open("/etc/nsd/nsd.conf", "w") as f:
-		f.write(nsdconf)
-
-	return True
+    return True
 
 ########################################################################
 
+
 def dnssec_choose_algo(domain, env):
-	if '.' in domain and domain.rsplit('.')[-1] in \
-		("email", "guide", "fund"):
-		# At GoDaddy, RSASHA256 is the only algorithm supported
-		# for .email and .guide.
-		# A variety of algorithms are supported for .fund. This
-		# is preferred.
-		return "RSASHA256"
+    if '.' in domain and domain.rsplit('.')[-1] in ("email", "guide", "fund"):
+        # At GoDaddy, RSASHA256 is the only algorithm supported
+        # for .email and .guide.
+        # A variety of algorithms are supported for .fund. This
+        # is preferred.
+        return "RSASHA256"
+
+    # For any domain we were able to sign before, don't change the algorithm
+    # on existing users. We'll probably want to migrate to SHA256 later.
+    return "RSASHA1-NSEC3-SHA1"
 
-	# For any domain we were able to sign before, don't change the algorithm
-	# on existing users. We'll probably want to migrate to SHA256 later.
-	return "RSASHA1-NSEC3-SHA1"
 
 def sign_zone(domain, zonefile, env):
-	algo = dnssec_choose_algo(domain, env)
-	dnssec_keys = load_env_vars_from_file(os.path.join(env['STORAGE_ROOT'], 'dns/dnssec/%s.conf' % algo))
+    algo = dnssec_choose_algo(domain, env)
+    dnssec_keys = load_env_vars_from_file(os.path.join(env['STORAGE_ROOT'], 'dns/dnssec/%s.conf' % algo))
 
-	# From here, use the IDNA encoding of the domain name.
-	domain = domain.encode("idna").decode("ascii")
+    # From here, use the IDNA encoding of the domain name.
+    domain = domain.encode("idna").decode("ascii")
 
-	# In order to use the same keys for all domains, we have to generate
-	# a new .key file with a DNSSEC record for the specific domain. We
-	# can reuse the same key, but it won't validate without a DNSSEC
-	# record specifically for the domain.
-	# 
-	# Copy the .key and .private files to /tmp to patch them up.
-	#
-	# Use os.umask and open().write() to securely create a copy that only
-	# we (root) can read.
-	files_to_kill = []
-	for key in ("KSK", "ZSK"):
-		if dnssec_keys.get(key, "").strip() == "": raise Exception("DNSSEC is not properly set up.")
-		oldkeyfn = os.path.join(env['STORAGE_ROOT'], 'dns/dnssec/' + dnssec_keys[key])
-		newkeyfn = '/tmp/' + dnssec_keys[key].replace("_domain_", domain)
-		dnssec_keys[key] = newkeyfn
-		for ext in (".private", ".key"):
-			if not os.path.exists(oldkeyfn + ext): raise Exception("DNSSEC is not properly set up.")
-			with open(oldkeyfn + ext, "r") as fr:
-				keydata = fr.read()
-			keydata = keydata.replace("_domain_", domain) # trick ldns-signkey into letting our generic key be used by this zone
-			fn = newkeyfn + ext
-			prev_umask = os.umask(0o77) # ensure written file is not world-readable
-			try:
-				with open(fn, "w") as fw:
-					fw.write(keydata)
-			finally:
-				os.umask(prev_umask) # other files we write should be world-readable
-			files_to_kill.append(fn)
+    # In order to use the same keys for all domains, we have to generate
+    # a new .key file with a DNSSEC record for the specific domain. We
+    # can reuse the same key, but it won't validate without a DNSSEC
+    # record specifically for the domain.
+    #
+    # Copy the .key and .private files to /tmp to patch them up.
+    #
+    # Use os.umask and open().write() to securely create a copy that only
+    # we (root) can read.
+    files_to_kill = []
+    for key in ("KSK", "ZSK"):
+        if dnssec_keys.get(key, "").strip() == "":
+            raise Exception("DNSSEC is not properly set up.")
+        oldkeyfn = os.path.join(env['STORAGE_ROOT'], 'dns/dnssec/' + dnssec_keys[key])
+        newkeyfn = '/tmp/' + dnssec_keys[key].replace("_domain_", domain)
+        dnssec_keys[key] = newkeyfn
+        for ext in (".private", ".key"):
+            if not os.path.exists(oldkeyfn + ext):
+                raise Exception("DNSSEC is not properly set up.")
+            with open(oldkeyfn + ext, "r") as fr:
+                keydata = fr.read()
+            # trick ldns-signkey into letting our generic key be used by this zone
+            keydata = keydata.replace("_domain_", domain)
+            fn = newkeyfn + ext
+            # ensure written file is not world-readable
+            prev_umask = os.umask(0o77)
+            try:
+                with open(fn, "w") as fw:
+                    fw.write(keydata)
+            finally:
+                # other files we write should be world-readable
+                os.umask(prev_umask)
+            files_to_kill.append(fn)
 
-	# Do the signing.
-	expiry_date = (datetime.datetime.now() + datetime.timedelta(days=30)).strftime("%Y%m%d")
-	shell('check_call', ["/usr/bin/ldns-signzone",
-		# expire the zone after 30 days
-		"-e", expiry_date,
+    # Do the signing.
+    expiry_date = (datetime.datetime.now() + datetime.timedelta(days=30)).strftime("%Y%m%d")
+    shell('check_call', [
+        "/usr/bin/ldns-signzone",
 
-		# use NSEC3
-		"-n",
+        # expire the zone after 30 days
+        "-e", expiry_date,
 
-		# zonefile to sign
-		"/etc/nsd/zones/" + zonefile,
+        # use NSEC3
+        "-n",
 
-		# keys to sign with (order doesn't matter -- it'll figure it out)
-		dnssec_keys["KSK"],
-		dnssec_keys["ZSK"],
-	])
+        # zonefile to sign
+        "/etc/nsd/zones/" + zonefile,
 
-	# Create a DS record based on the patched-up key files. The DS record is specific to the
-	# zone being signed, so we can't use the .ds files generated when we created the keys.
-	# The DS record points to the KSK only. Write this next to the zone file so we can
-	# get it later to give to the user with instructions on what to do with it.
-	#
-	# We want to be able to validate DS records too, but multiple forms may be valid depending
-	# on the digest type. So we'll write all (both) valid records. Only one DS record should
-	# actually be deployed. Preferebly the first.
-	with open("/etc/nsd/zones/" + zonefile + ".ds", "w") as f:
-		for digest_type in ('2', '1'):
-			rr_ds = shell('check_output', ["/usr/bin/ldns-key2ds",
-				"-n", # output to stdout
-				"-" + digest_type, # 1=SHA1, 2=SHA256
-				dnssec_keys["KSK"] + ".key"
-			])
-			f.write(rr_ds)
+        # keys to sign with (order doesn't matter -- it'll figure it out)
+        dnssec_keys["KSK"],
+        dnssec_keys["ZSK"],
+    ])
 
-	# Remove our temporary file.
-	for fn in files_to_kill:
-		os.unlink(fn)
+    # Create a DS record based on the patched-up key files. The DS record is specific to the
+    # zone being signed, so we can't use the .ds files generated when we created the keys.
+    # The DS record points to the KSK only. Write this next to the zone file so we can
+    # get it later to give to the user with instructions on what to do with it.
+    #
+    # We want to be able to validate DS records too, but multiple forms may be valid depending
+    # on the digest type. So we'll write all (both) valid records. Only one DS record should
+    # actually be deployed. Preferebly the first.
+    with open("/etc/nsd/zones/" + zonefile + ".ds", "w") as f:
+        for digest_type in ('2', '1'):
+            rr_ds = shell('check_output', [
+                "/usr/bin/ldns-key2ds",
+                "-n",  # output to stdout
+                "-" + digest_type,  # 1=SHA1, 2=SHA256
+                dnssec_keys["KSK"] + ".key"
+            ])
+            f.write(rr_ds)
+
+    # Remove our temporary file.
+    for fn in files_to_kill:
+        os.unlink(fn)
 
 ########################################################################
 
+
 def write_opendkim_tables(domains, env):
-	# Append a record to OpenDKIM's KeyTable and SigningTable for each domain
-	# that we send mail from (zones and all subdomains).
+    # Append a record to OpenDKIM's KeyTable and SigningTable for each domain
+    # that we send mail from (zones and all subdomains).
 
-	opendkim_key_file = os.path.join(env['STORAGE_ROOT'], 'mail/dkim/mail.private')
+    opendkim_key_file = os.path.join(env['STORAGE_ROOT'], 'mail/dkim/mail.private')
 
-	if not os.path.exists(opendkim_key_file):
-		# Looks like OpenDKIM is not installed.
-		return False
+    if not os.path.exists(opendkim_key_file):
+        # Looks like OpenDKIM is not installed.
+        return False
 
-	config = {
-		# The SigningTable maps email addresses to a key in the KeyTable that
-		# specifies signing information for matching email addresses. Here we
-		# map each domain to a same-named key.
-		#
-		# Elsewhere we set the DMARC policy for each domain such that mail claiming
-		# to be From: the domain must be signed with a DKIM key on the same domain.
-		# So we must have a separate KeyTable entry for each domain.
-		"SigningTable":
-			"".join(
-				"*@{domain} {domain}\n".format(domain=domain)
-				for domain in domains
-			),
+    config = {
+        # The SigningTable maps email addresses to a key in the KeyTable that
+        # specifies signing information for matching email addresses. Here we
+        # map each domain to a same-named key.
+        #
+        # Elsewhere we set the DMARC policy for each domain such that mail claiming
+        # to be From: the domain must be signed with a DKIM key on the same domain.
+        # So we must have a separate KeyTable entry for each domain.
+        "SigningTable": "".join(
+            "*@{domain} {domain}\n".format(domain=domain)
+            for domain in domains
+        ),
 
-		# The KeyTable specifies the signing domain, the DKIM selector, and the
-		# path to the private key to use for signing some mail. Per DMARC, the
-		# signing domain must match the sender's From: domain.
-		"KeyTable":
-			"".join(
-				"{domain} {domain}:mail:{key_file}\n".format(domain=domain, key_file=opendkim_key_file)
-				for domain in domains
-			),
-	}
+        # The KeyTable specifies the signing domain, the DKIM selector, and the
+        # path to the private key to use for signing some mail. Per DMARC, the
+        # signing domain must match the sender's From: domain.
+        "KeyTable": "".join(
+            "{domain} {domain}:mail:{key_file}\n".format(domain=domain, key_file=opendkim_key_file)
+            for domain in domains
+        ),
+    }
 
-	did_update = False
-	for filename, content in config.items():
-		# Don't write the file if it doesn't need an update.
-		if os.path.exists("/etc/opendkim/" + filename):
-			with open("/etc/opendkim/" + filename) as f:
-				if f.read() == content:
-					continue
+    did_update = False
+    for filename, content in config.items():
+        # Don't write the file if it doesn't need an update.
+        if os.path.exists("/etc/opendkim/" + filename):
+            with open("/etc/opendkim/" + filename) as f:
+                if f.read() == content:
+                    continue
 
-		# The contents needs to change.
-		with open("/etc/opendkim/" + filename, "w") as f:
-			f.write(content)
-		did_update = True
+        # The contents needs to change.
+        with open("/etc/opendkim/" + filename, "w") as f:
+            f.write(content)
+        did_update = True
 
-	# Return whether the files changed. If they didn't change, there's
-	# no need to kick the opendkim process.
-	return did_update
+    # Return whether the files changed. If they didn't change, there's
+    # no need to kick the opendkim process.
+    return did_update
 
 ########################################################################
 
+
 def set_custom_dns_record(qname, rtype, value, env):
-	# validate qname
-	for zone, fn in get_dns_zones(env):
-		# It must match a zone apex or be a subdomain of a zone
-		# that we are otherwise hosting.
-		if qname == zone or qname.endswith("."+zone):
-			break
-	else:
-		# No match.
-		raise ValueError("%s is not a domain name or a subdomain of a domain name managed by this box." % qname)
+    # validate qname
+    for zone, fn in get_dns_zones(env):
+        # It must match a zone apex or be a subdomain of a zone
+        # that we are otherwise hosting.
+        if qname == zone or qname.endswith("."+zone):
+            break
+    else:
+        # No match.
+        raise ValueError("%s is not a domain name or a subdomain of a domain name managed by this box." % qname)
 
-	# validate rtype
-	rtype = rtype.upper()
-	if value is not None:
-		if rtype in ("A", "AAAA"):
-			v = ipaddress.ip_address(value)
-			if rtype == "A" and not isinstance(v, ipaddress.IPv4Address): raise ValueError("That's an IPv6 address.")
-			if rtype == "AAAA" and not isinstance(v, ipaddress.IPv6Address): raise ValueError("That's an IPv4 address.")
-		elif rtype in ("CNAME", "TXT", "SRV"):
-			# anything goes
-			pass
-		else:
-			raise ValueError("Unknown record type '%s'." % rtype)
+    # validate rtype
+    rtype = rtype.upper()
+    if value is not None:
+        if rtype in ("A", "AAAA"):
+            v = ipaddress.ip_address(value)
+            if rtype == "A" and not isinstance(v, ipaddress.IPv4Address):
+                raise ValueError("That's an IPv6 address.")
+            if rtype == "AAAA" and not isinstance(v, ipaddress.IPv6Address):
+                raise ValueError("That's an IPv4 address.")
+        elif rtype in ("CNAME", "TXT", "SRV"):
+            # anything goes
+            pass
+        else:
+            raise ValueError("Unknown record type '%s'." % rtype)
 
-	# load existing config
-	config = get_custom_dns_config(env)
+    # load existing config
+    config = get_custom_dns_config(env)
 
-	# update
-	if qname not in config:
-		if value is None:
-			# Is asking to delete a record that does not exist.
-			return False
-		elif rtype == "A":
-			# Add this record using the short form 'qname: value'.
-			config[qname] = value
-		else:
-			# Add this record. This is the qname's first record.
-			config[qname] = { rtype: value }
-	else:
-		if isinstance(config[qname], str):
-			# This is a short-form 'qname: value' implicit-A record.
-			if value is None and rtype != "A":
-				# Is asking to delete a record that doesn't exist.
-				return False
-			elif value is None and rtype == "A":
-				# Delete record.
-				del config[qname]
-			elif rtype == "A":
-				# Update, keeping short form.
-				if config[qname] == "value":
-					# No change.
-					return False
-				config[qname] = value
-			else:
-				# Expand short form so we can add a new record type.
-				config[qname] = { "A": config[qname], rtype: value }
-		else:
-			# This is the qname: { ... } (dict) format.
-			if value is None:
-				if rtype not in config[qname]:
-					# Is asking to delete a record that doesn't exist.
-					return False
-				else:
-					# Delete the record. If it's the last record, delete the domain.
-					del config[qname][rtype]
-					if len(config[qname]) == 0:
-						del config[qname]
-			else:
-				# Update the record.
-				if config[qname].get(rtype) == "value":
-					# No change.
-					return False
-				config[qname][rtype] = value
+    # update
+    if qname not in config:
+        if value is None:
+            # Is asking to delete a record that does not exist.
+            return False
+        elif rtype == "A":
+            # Add this record using the short form 'qname: value'.
+            config[qname] = value
+        else:
+            # Add this record. This is the qname's first record.
+            config[qname] = {rtype: value}
+    else:
+        if isinstance(config[qname], str):
+            # This is a short-form 'qname: value' implicit-A record.
+            if value is None and rtype != "A":
+                # Is asking to delete a record that doesn't exist.
+                return False
+            elif value is None and rtype == "A":
+                # Delete record.
+                del config[qname]
+            elif rtype == "A":
+                # Update, keeping short form.
+                if config[qname] == "value":
+                    # No change.
+                    return False
+                config[qname] = value
+            else:
+                # Expand short form so we can add a new record type.
+                config[qname] = {"A": config[qname], rtype: value}
+        else:
+            # This is the qname: { ... } (dict) format.
+            if value is None:
+                if rtype not in config[qname]:
+                    # Is asking to delete a record that doesn't exist.
+                    return False
+                else:
+                    # Delete the record. If it's the last record, delete the domain.
+                    del config[qname][rtype]
+                    if len(config[qname]) == 0:
+                        del config[qname]
+            else:
+                # Update the record.
+                if config[qname].get(rtype) == "value":
+                    # No change.
+                    return False
+                config[qname][rtype] = value
 
-	# serialize & save
-	write_custom_dns_config(config, env)
+    # serialize & save
+    write_custom_dns_config(config, env)
 
-	return True
+    return True
 
 ########################################################################
 
+
 def set_secondary_dns(hostname, env):
-	config = get_custom_dns_config(env)
+    config = get_custom_dns_config(env)
 
-	if hostname in (None, ""):
-		# Clear.
-		if "_secondary_nameserver" in config:
-			del config["_secondary_nameserver"]
-	else:
-		# Validate.
-		hostname = hostname.strip().lower()
-		resolver = dns.resolver.get_default_resolver()
-		try:
-			response = dns.resolver.query(hostname, "A")
-		except (dns.resolver.NoNameservers, dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
-			raise ValueError("Could not resolve the IP address of %s." % hostname)
+    if hostname in (None, ""):
+        # Clear.
+        if "_secondary_nameserver" in config:
+            del config["_secondary_nameserver"]
+    else:
+        # Validate.
+        hostname = hostname.strip().lower()
+        resolver = dns.resolver.get_default_resolver()
+        try:
+            response = dns.resolver.query(hostname, "A")
+        except (dns.resolver.NoNameservers, dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
+            raise ValueError("Could not resolve the IP address of %s." % hostname)
 
-		# Set.
-		config["_secondary_nameserver"] = hostname
+        # Set.
+        config["_secondary_nameserver"] = hostname
 
-	# Save and apply.
-	write_custom_dns_config(config, env)
-	return do_dns_update(env)
+    # Save and apply.
+    write_custom_dns_config(config, env)
+    return do_dns_update(env)
 
 
 ########################################################################
 
 def justtestingdotemail(domain, records):
-	# If the domain is a subdomain of justtesting.email, which we own,
-	# automatically populate the zone where it is set up on dns4e.com.
-	# Ideally if dns4e.com supported NS records we would just have it
-	# delegate DNS to us, but instead we will populate the whole zone.
+    # If the domain is a subdomain of justtesting.email, which we own,
+    # automatically populate the zone where it is set up on dns4e.com.
+    # Ideally if dns4e.com supported NS records we would just have it
+    # delegate DNS to us, but instead we will populate the whole zone.
 
-	import subprocess, json, urllib.parse
+    import subprocess
+    import json
+    import urllib.parse
 
-	if not domain.endswith(".justtesting.email"):
-		return
+    if not domain.endswith(".justtesting.email"):
+        return
 
-	for subdomain, querytype, value, explanation in records:
-		if querytype in ("NS",): continue
-		if subdomain in ("www", "ns1", "ns2"): continue # don't do unnecessary things
+    for subdomain, querytype, value, explanation in records:
+        if querytype in ("NS",):
+            continue
+        # don't do unnecessary things
+        if subdomain in ("www", "ns1", "ns2"):
+            continue
 
-		if subdomain == None:
-			subdomain = domain
-		else:
-			subdomain = subdomain + "." + domain
+        if subdomain is None:
+            subdomain = domain
+        else:
+            subdomain = subdomain + "." + domain
 
-		if querytype == "TXT":
-			# nsd requires parentheses around txt records with multiple parts,
-			# but DNS4E requires there be no parentheses; also it goes into
-			# nsd with a newline and a tab, which we replace with a space here
-			value = re.sub("^\s*\(\s*([\w\W]*)\)", r"\1", value)
-			value = re.sub("\s+", " ", value)
-		else:
-			continue
+        if querytype == "TXT":
+            # nsd requires parentheses around txt records with multiple parts,
+            # but DNS4E requires there be no parentheses; also it goes into
+            # nsd with a newline and a tab, which we replace with a space here
+            value = re.sub("^\s*\(\s*([\w\W]*)\)", r"\1", value)
+            value = re.sub("\s+", " ", value)
+        else:
+            continue
 
-		print("Updating DNS for %s/%s..." % (subdomain, querytype))
-		resp = json.loads(subprocess.check_output([
-			"curl",
-			"-s",
-			"https://api.dns4e.com/v7/%s/%s" % (urllib.parse.quote(subdomain), querytype.lower()),
-			"--user", "2ddbd8e88ed1495fa0ec:A97TDJV26CVUJS6hqAs0CKnhj4HvjTM7MwAAg8xb",
-			"--data", "record=%s" % urllib.parse.quote(value),
-			]).decode("utf8"))
-		print("\t...", resp.get("message", "?"))
+        print("Updating DNS for %s/%s..." % (subdomain, querytype))
+        resp = json.loads(subprocess.check_output([
+            "curl",
+            "-s",
+            "https://api.dns4e.com/v7/%s/%s" % (urllib.parse.quote(subdomain), querytype.lower()),
+            "--user", "2ddbd8e88ed1495fa0ec:A97TDJV26CVUJS6hqAs0CKnhj4HvjTM7MwAAg8xb",
+            "--data", "record=%s" % urllib.parse.quote(value),
+            ]).decode("utf8"))
+        print("\t...", resp.get("message", "?"))
 
 ########################################################################
 
+
 def build_recommended_dns(env):
-	ret = []
-	domains = get_dns_domains(env)
-	zonefiles = get_dns_zones(env)
-	additional_records = get_custom_dns_config(env)
-	for domain, zonefile in zonefiles:
-		records = build_zone(domain, domains, additional_records, env)
+    ret = []
+    domains = get_dns_domains(env)
+    zonefiles = get_dns_zones(env)
+    additional_records = get_custom_dns_config(env)
+    for domain, zonefile in zonefiles:
+        records = build_zone(domain, domains, additional_records, env)
 
-		# remove records that we don't dislay
-		records = [r for r in records if r[3] is not False]
+        # remove records that we don't dislay
+        records = [r for r in records if r[3] is not False]
 
-		# put Required at the top, then Recommended, then everythiing else
-		records.sort(key = lambda r : 0 if r[3].startswith("Required.") else (1 if r[3].startswith("Recommended.") else 2))
+        # put Required at the top, then Recommended, then everythiing else
+        records.sort(key=lambda r: 0 if r[3].startswith("Required.") else (1 if r[3].startswith("Recommended.") else 2))
 
-		# expand qnames
-		for i in range(len(records)):
-			if records[i][0] == None:
-				qname = domain
-			else:
-				qname = records[i][0] + "." + domain
+        # expand qnames
+        for i in range(len(records)):
+            if records[i][0] is None:
+                qname = domain
+            else:
+                qname = records[i][0] + "." + domain
 
-			records[i] = {
-				"qname": qname,
-				"rtype": records[i][1],
-				"value": records[i][2],
-				"explanation": records[i][3],
-			}
+            records[i] = {
+                "qname": qname,
+                "rtype": records[i][1],
+                "value": records[i][2],
+                "explanation": records[i][3],
+            }
 
-		# return
-		ret.append((domain, records))
-	return ret
+        # return
+        ret.append((domain, records))
+    return ret
 
 if __name__ == "__main__":
-	from utils import load_environment
-	env = load_environment()
-	for zone, records in build_recommended_dns(env):
-		for record in records:
-			print("; " + record['explanation'])
-			print(record['qname'], record['rtype'], record['value'], sep="\t")
-			print()
+    from utils import load_environment
+    env = load_environment()
+    for zone, records in build_recommended_dns(env):
+        for record in records:
+            print("; " + record['explanation'])
+            print(record['qname'], record['rtype'], record['value'], sep="\t")
+            print()
diff --git a/management/mail_log.py b/management/mail_log.py
index c506a75d..a29a750e 100755
--- a/management/mail_log.py
+++ b/management/mail_log.py
@@ -1,121 +1,130 @@
 #!/usr/bin/python3
 
-import re, os.path
+import re
+import os.path
 import dateutil.parser
 
 import mailconfig
 import utils
 
+
 def scan_mail_log(logger, env):
-	collector = {
-		"other-services": set(),
-		"imap-logins": { },
-		"postgrey": { },
-		"rejected-mail": { },
-	}
+    collector = {
+        "other-services": set(),
+        "imap-logins": {},
+        "postgrey": {},
+        "rejected-mail": {},
+    }
 
-	collector["real_mail_addresses"] = set(mailconfig.get_mail_users(env)) | set(alias[0] for alias in mailconfig.get_mail_aliases(env))
+    collector["real_mail_addresses"] = set(mailconfig.get_mail_users(env)) | set(alias[0] for alias in mailconfig.get_mail_aliases(env))
 
-	for fn in ('/var/log/mail.log.1', '/var/log/mail.log'):
-		if not os.path.exists(fn): continue
-		with open(fn, 'rb') as log:
-			for line in log:
-				line = line.decode("utf8", errors='replace')
-				scan_mail_log_line(line.strip(), collector)
+    for fn in ('/var/log/mail.log.1', '/var/log/mail.log'):
+        if not os.path.exists(fn):
+            continue
+        with open(fn, 'rb') as log:
+            for line in log:
+                line = line.decode("utf8", errors='replace')
+                scan_mail_log_line(line.strip(), collector)
 
-	if collector["imap-logins"]:
-		logger.add_heading("Recent IMAP Logins")
-		logger.print_block("The most recent login from each remote IP adddress is show.")
-		for k in utils.sort_email_addresses(collector["imap-logins"], env):
-			for ip, date in sorted(collector["imap-logins"][k].items(), key = lambda kv : kv[1]):
-				logger.print_line(k + "\t" + str(date) + "\t" + ip)
+    if collector["imap-logins"]:
+        logger.add_heading("Recent IMAP Logins")
+        logger.print_block("The most recent login from each remote IP adddress is show.")
+        for k in utils.sort_email_addresses(collector["imap-logins"], env):
+            for ip, date in sorted(collector["imap-logins"][k].items(), key=lambda kv: kv[1]):
+                logger.print_line(k + "\t" + str(date) + "\t" + ip)
 
-	if collector["postgrey"]:
-		logger.add_heading("Greylisted Mail")
-		logger.print_block("The following mail was greylisted, meaning the emails were temporarily rejected. Legitimate senders will try again within ten minutes.")
-		logger.print_line("recipient" + "\t" + "received" + "\t" + "sender" + "\t" + "delivered")
-		for recipient in utils.sort_email_addresses(collector["postgrey"], env):
-			for (client_address, sender), (first_date, delivered_date) in sorted(collector["postgrey"][recipient].items(), key = lambda kv : kv[1][0]):
-				logger.print_line(recipient + "\t" + str(first_date) + "\t" + sender + "\t" + (("delivered " + str(delivered_date)) if delivered_date else "no retry yet"))
+    if collector["postgrey"]:
+        logger.add_heading("Greylisted Mail")
+        logger.print_block("The following mail was greylisted, meaning the emails were temporarily rejected. Legitimate senders will try again within ten minutes.")
+        logger.print_line("recipient" + "\t" + "received" + "\t" + "sender" + "\t" + "delivered")
+        for recipient in utils.sort_email_addresses(collector["postgrey"], env):
+            for (client_address, sender), (first_date, delivered_date) in sorted(collector["postgrey"][recipient].items(), key=lambda kv: kv[1][0]):
+                logger.print_line(recipient + "\t" + str(first_date) + "\t" + sender + "\t" + (("delivered " + str(delivered_date)) if delivered_date else "no retry yet"))
 
-	if collector["rejected-mail"]:
-		logger.add_heading("Rejected Mail")
-		logger.print_block("The following incoming mail was rejected.")
-		for k in utils.sort_email_addresses(collector["rejected-mail"], env):
-			for date, sender, message in collector["rejected-mail"][k]:
-				logger.print_line(k + "\t" + str(date) + "\t" + sender + "\t" + message)
+    if collector["rejected-mail"]:
+        logger.add_heading("Rejected Mail")
+        logger.print_block("The following incoming mail was rejected.")
+        for k in utils.sort_email_addresses(collector["rejected-mail"], env):
+            for date, sender, message in collector["rejected-mail"][k]:
+                logger.print_line(k + "\t" + str(date) + "\t" + sender + "\t" + message)
+
+    if len(collector["other-services"]) > 0:
+        logger.add_heading("Other")
+        logger.print_block("Unrecognized services in the log: " + ", ".join(collector["other-services"]))
 
-	if len(collector["other-services"]) > 0:
-		logger.add_heading("Other")
-		logger.print_block("Unrecognized services in the log: " + ", ".join(collector["other-services"]))
 
 def scan_mail_log_line(line, collector):
-	m = re.match(r"(\S+ \d+ \d+:\d+:\d+) (\S+) (\S+?)(\[\d+\])?: (.*)", line)
-	if not m: return
+    m = re.match(r"(\S+ \d+ \d+:\d+:\d+) (\S+) (\S+?)(\[\d+\])?: (.*)", line)
+    if not m:
+        return
 
-	date, system, service, pid, log = m.groups()
-	date = dateutil.parser.parse(date)
-	
-	if service == "dovecot":
-		scan_dovecot_line(date, log, collector)
+    date, system, service, pid, log = m.groups()
+    date = dateutil.parser.parse(date)
 
-	elif service == "postgrey":
-		scan_postgrey_line(date, log, collector)
+    if service == "dovecot":
+        scan_dovecot_line(date, log, collector)
 
-	elif service == "postfix/smtpd":
-		scan_postfix_smtpd_line(date, log, collector)
+    elif service == "postgrey":
+        scan_postgrey_line(date, log, collector)
 
-	elif service in ("postfix/qmgr", "postfix/pickup", "postfix/cleanup",
-			"postfix/scache", "spampd", "postfix/anvil", "postfix/master",
-			"opendkim", "postfix/lmtp", "postfix/tlsmgr"):
-		# nothing to look at
-		pass
+    elif service == "postfix/smtpd":
+        scan_postfix_smtpd_line(date, log, collector)
+
+    elif service in ("postfix/qmgr", "postfix/pickup", "postfix/cleanup",
+                     "postfix/scache", "spampd", "postfix/anvil",
+                     "postfix/master", "opendkim", "postfix/lmtp",
+                     "postfix/tlsmgr"):
+        # nothing to look at
+        pass
+
+    else:
+        collector["other-services"].add(service)
 
-	else:
-		collector["other-services"].add(service)
 
 def scan_dovecot_line(date, log, collector):
-	m = re.match("imap-login: Login: user=<(.*?)>, method=PLAIN, rip=(.*?),", log)
-	if m:
-		login, ip = m.group(1), m.group(2)
-		if ip != "127.0.0.1": # local login from webmail/zpush
-			collector["imap-logins"].setdefault(login, {})[ip] = date
+    m = re.match("imap-login: Login: user=<(.*?)>, method=PLAIN, rip=(.*?),", log)
+    if m:
+        login, ip = m.group(1), m.group(2)
+        if ip != "127.0.0.1":  # local login from webmail/zpush
+            collector["imap-logins"].setdefault(login, {})[ip] = date
+
 
 def scan_postgrey_line(date, log, collector):
-	m = re.match("action=(greylist|pass), reason=(.*?), (?:delay=\d+, )?client_name=(.*), client_address=(.*), sender=(.*), recipient=(.*)", log)
-	if m:
-		action, reason, client_name, client_address, sender, recipient = m.groups()
-		key = (client_address, sender)
-		if action == "greylist" and reason == "new":
-			collector["postgrey"].setdefault(recipient, {})[key] = (date, None)
-		elif action == "pass" and reason == "triplet found" and key in collector["postgrey"].get(recipient, {}):
-			collector["postgrey"][recipient][key] = (collector["postgrey"][recipient][key][0], date)
+    m = re.match("action=(greylist|pass), reason=(.*?), (?:delay=\d+, )?client_name=(.*), client_address=(.*), sender=(.*), recipient=(.*)", log)
+    if m:
+        action, reason, client_name, client_address, sender, recipient = m.groups()
+        key = (client_address, sender)
+        if action == "greylist" and reason == "new":
+            collector["postgrey"].setdefault(recipient, {})[key] = (date, None)
+        elif action == "pass" and reason == "triplet found" and key in collector["postgrey"].get(recipient, {}):
+            collector["postgrey"][recipient][key] = (collector["postgrey"][recipient][key][0], date)
+
 
 def scan_postfix_smtpd_line(date, log, collector):
-	m = re.match("NOQUEUE: reject: RCPT from .*?: (.*?); from=<(.*?)> to=<(.*?)>", log)
-	if m:
-		message, sender, recipient = m.groups()
-		if recipient in collector["real_mail_addresses"]:
-			# only log mail to real recipients
+    m = re.match("NOQUEUE: reject: RCPT from .*?: (.*?); from=<(.*?)> to=<(.*?)>", log)
+    if m:
+        message, sender, recipient = m.groups()
+        if recipient in collector["real_mail_addresses"]:
+            # only log mail to real recipients
 
-			# skip this, is reported in the greylisting report
-			if "Recipient address rejected: Greylisted" in message:
-				return
+            # skip this, is reported in the greylisting report
+            if "Recipient address rejected: Greylisted" in message:
+                return
 
-			# simplify this one
-			m = re.search(r"Client host \[(.*?)\] blocked using zen.spamhaus.org; (.*)", message)
-			if m:
-				message = "ip blocked: " + m.group(2)
+            # simplify this one
+            m = re.search(r"Client host \[(.*?)\] blocked using zen.spamhaus.org; (.*)", message)
+            if m:
+                message = "ip blocked: " + m.group(2)
 
-			# simplify this one too
-			m = re.search(r"Sender address \[.*@(.*)\] blocked using dbl.spamhaus.org; (.*)", message)
-			if m:
-				message = "domain blocked: " + m.group(2)
+            # simplify this one too
+            m = re.search(r"Sender address \[.*@(.*)\] blocked using dbl.spamhaus.org; (.*)", message)
+            if m:
+                message = "domain blocked: " + m.group(2)
 
-			collector["rejected-mail"].setdefault(recipient, []).append( (date, sender, message) )
+            collector["rejected-mail"].setdefault(recipient, []).append((date, sender, message))
 
 
 if __name__ == "__main__":
-	from status_checks import ConsoleOutput
-	env = utils.load_environment()
-	scan_mail_log(ConsoleOutput(), env)
+    from status_checks import ConsoleOutput
+    env = utils.load_environment()
+    scan_mail_log(ConsoleOutput(), env)
diff --git a/management/mailconfig.py b/management/mailconfig.py
index b95ee87e..a295aa74 100755
--- a/management/mailconfig.py
+++ b/management/mailconfig.py
@@ -1,581 +1,620 @@
 #!/usr/bin/python3
 
-import subprocess, shutil, os, sqlite3, re
+import subprocess
+import shutil
+import os
+import sqlite3
+import re
 import utils
 
+
 def validate_email(email, mode=None):
-	# There are a lot of characters permitted in email addresses, but
-	# Dovecot's sqlite driver seems to get confused if there are any
-	# unusual characters in the address. Bah. Also note that since
-	# the mailbox path name is based on the email address, the address
-	# shouldn't be absurdly long and must not have a forward slash.
+    # There are a lot of characters permitted in email addresses, but
+    # Dovecot's sqlite driver seems to get confused if there are any
+    # unusual characters in the address. Bah. Also note that since
+    # the mailbox path name is based on the email address, the address
+    # shouldn't be absurdly long and must not have a forward slash.
 
-	if len(email) > 255: return False
+    if len(email) > 255:
+        return False
 
-	if mode == 'user':
-		# For Dovecot's benefit, only allow basic characters.
-		ATEXT = r'[a-zA-Z0-9_\-]'
-	elif mode in (None, 'alias'):
-		# For aliases, we can allow any valid email address.
-		# Based on RFC 2822 and https://github.com/SyrusAkbary/validate_email/blob/master/validate_email.py,
-		# these characters are permitted in email addresses.
-		ATEXT = r'[\w!#$%&\'\*\+\-/=\?\^`\{\|\}~]' # see 3.2.4
-	else:
-		raise ValueError(mode)
+    if mode == 'user':
+        # For Dovecot's benefit, only allow basic characters.
+        ATEXT = r'[a-zA-Z0-9_\-]'
+    elif mode in (None, 'alias'):
+        # For aliases, we can allow any valid email address.
+        # Based on RFC 2822 and https://github.com/SyrusAkbary/validate_email/blob/master/validate_email.py,
+        # these characters are permitted in email addresses.
+        ATEXT = r'[\w!#$%&\'\*\+\-/=\?\^`\{\|\}~]'  # see 3.2.4
+    else:
+        raise ValueError(mode)
 
-	# per RFC 2822 3.2.4
-	DOT_ATOM_TEXT_LOCAL = ATEXT + r'+(?:\.' + ATEXT + r'+)*'
-	if mode == 'alias':
-		# For aliases, Postfix accepts '@domain.tld' format for
-		# catch-all addresses on the source side and domain aliases
-		# on the destination side. Make the local part optional.
-		DOT_ATOM_TEXT_LOCAL = '(?:' + DOT_ATOM_TEXT_LOCAL + ')?'
+    # per RFC 2822 3.2.4
+    DOT_ATOM_TEXT_LOCAL = ATEXT + r'+(?:\.' + ATEXT + r'+)*'
+    if mode == 'alias':
+        # For aliases, Postfix accepts '@domain.tld' format for
+        # catch-all addresses on the source side and domain aliases
+        # on the destination side. Make the local part optional.
+        DOT_ATOM_TEXT_LOCAL = '(?:' + DOT_ATOM_TEXT_LOCAL + ')?'
 
-	# as above, but we can require that the host part have at least
-	# one period in it, so use a "+" rather than a "*" at the end
-	DOT_ATOM_TEXT_HOST = ATEXT + r'+(?:\.' + ATEXT + r'+)+'
+    # as above, but we can require that the host part have at least
+    # one period in it, so use a "+" rather than a "*" at the end
+    DOT_ATOM_TEXT_HOST = ATEXT + r'+(?:\.' + ATEXT + r'+)+'
 
-	# per RFC 2822 3.4.1
-	ADDR_SPEC = '^(%s)@(%s)$' % (DOT_ATOM_TEXT_LOCAL, DOT_ATOM_TEXT_HOST)
+    # per RFC 2822 3.4.1
+    ADDR_SPEC = '^(%s)@(%s)$' % (DOT_ATOM_TEXT_LOCAL, DOT_ATOM_TEXT_HOST)
 
-	# Check the regular expression.
-	m = re.match(ADDR_SPEC, email)
-	if not m: return False
+    # Check the regular expression.
+    m = re.match(ADDR_SPEC, email)
+    if not m:
+        return False
 
-	# Check that the domain part is IDNA-encodable.
-	localpart, domainpart = m.groups()
-	try:
-		domainpart.encode("idna")
-	except:
-		return False
+    # Check that the domain part is IDNA-encodable.
+    localpart, domainpart = m.groups()
+    try:
+        domainpart.encode("idna")
+    except:
+        return False
+
+    return True
 
-	return True
 
 def sanitize_idn_email_address(email):
-	# Convert an IDNA-encoded email address (domain part) into Unicode
-	# before storing in our database. Chrome may IDNA-ize <input type="email">
-	# values before POSTing, so we want to normalize before putting
-	# values into the database.
-	try:
-		localpart, domainpart = email.split("@")
-		domainpart = domainpart.encode("ascii").decode("idna")
-		return localpart + "@" + domainpart
-	except:
-		# Domain part is already Unicode or not IDNA-valid, so
-		# leave unchanged.
-		return email
+    # Convert an IDNA-encoded email address (domain part) into Unicode
+    # before storing in our database. Chrome may IDNA-ize <input type="email">
+    # values before POSTing, so we want to normalize before putting
+    # values into the database.
+    try:
+        localpart, domainpart = email.split("@")
+        domainpart = domainpart.encode("ascii").decode("idna")
+        return localpart + "@" + domainpart
+    except:
+        # Domain part is already Unicode or not IDNA-valid, so
+        # leave unchanged.
+        return email
+
 
 def open_database(env, with_connection=False):
-	conn = sqlite3.connect(env["STORAGE_ROOT"] + "/mail/users.sqlite")
-	if not with_connection:
-		return conn.cursor()
-	else:
-		return conn, conn.cursor()
+    conn = sqlite3.connect(env["STORAGE_ROOT"] + "/mail/users.sqlite")
+    if not with_connection:
+        return conn.cursor()
+    else:
+        return conn, conn.cursor()
+
 
 def get_mail_users(env):
-	# Returns a flat, sorted list of all user accounts.
-	c = open_database(env)
-	c.execute('SELECT email FROM users')
-	users = [ row[0] for row in c.fetchall() ]
-	return utils.sort_email_addresses(users, env)
+    # Returns a flat, sorted list of all user accounts.
+    c = open_database(env)
+    c.execute('SELECT email FROM users')
+    users = [row[0] for row in c.fetchall()]
+    return utils.sort_email_addresses(users, env)
+
 
 def get_mail_users_ex(env, with_archived=False, with_slow_info=False):
-	# Returns a complex data structure of all user accounts, optionally
-	# including archived (status="inactive") accounts.
-	#
-	# [
-	#   {
-	#     domain: "domain.tld",
-	#     users: [
-	#       {
-	#         email: "name@domain.tld",
-	#         privileges: [ "priv1", "priv2", ... ],
-	#         status: "active",
-	#         aliases: [
-	#           ("alias@domain.tld", ["indirect.alias@domain.tld", ...]),
-	#           ...
-	#         ]
-	#       },
-	#       ...
-	#     ]
-	#   },
-	#   ...
-	# ]
+    # Returns a complex data structure of all user accounts, optionally
+    # including archived (status="inactive") accounts.
+    #
+    # [
+    #   {
+    #     domain: "domain.tld",
+    #     users: [
+    #       {
+    #         email: "name@domain.tld",
+    #         privileges: [ "priv1", "priv2", ... ],
+    #         status: "active",
+    #         aliases: [
+    #           ("alias@domain.tld", ["indirect.alias@domain.tld", ...]),
+    #           ...
+    #         ]
+    #       },
+    #       ...
+    #     ]
+    #   },
+    #   ...
+    # ]
 
-	# Pre-load all aliases.
-	aliases = get_mail_alias_map(env)
+    # Pre-load all aliases.
+    aliases = get_mail_alias_map(env)
 
-	# Get users and their privileges.
-	users = []
-	active_accounts = set()
-	c = open_database(env)
-	c.execute('SELECT email, privileges FROM users')
-	for email, privileges in c.fetchall():
-		active_accounts.add(email)
+    # Get users and their privileges.
+    users = []
+    active_accounts = set()
+    c = open_database(env)
+    c.execute('SELECT email, privileges FROM users')
+    for email, privileges in c.fetchall():
+        active_accounts.add(email)
 
-		user = {
-			"email": email,
-			"privileges": parse_privs(privileges),
-			"status": "active",
-		}
-		users.append(user)
+        user = {
+            "email": email,
+            "privileges": parse_privs(privileges),
+            "status": "active",
+        }
+        users.append(user)
 
-		if with_slow_info:
-			user["aliases"] = [
-				(alias, sorted(evaluate_mail_alias_map(alias, aliases, env)))
-				for alias in aliases.get(email.lower(), [])
-				]
-			user["mailbox_size"] = utils.du(os.path.join(env['STORAGE_ROOT'], 'mail/mailboxes', *reversed(email.split("@"))))
+        if with_slow_info:
+            user["aliases"] = [
+                (alias, sorted(evaluate_mail_alias_map(alias, aliases, env)))
+                for alias in aliases.get(email.lower(), [])
+                ]
+            user["mailbox_size"] = utils.du(os.path.join(env['STORAGE_ROOT'], 'mail/mailboxes', *reversed(email.split("@"))))
 
-	# Add in archived accounts.
-	if with_archived:
-		root = os.path.join(env['STORAGE_ROOT'], 'mail/mailboxes')
-		for domain in os.listdir(root):
-			for user in os.listdir(os.path.join(root, domain)):
-				email = user + "@" + domain
-				mbox = os.path.join(root, domain, user)
-				if email in active_accounts: continue
-				user = {
-					"email": email, 
-					"privileges": "",
-					"status": "inactive",
-					"mailbox": mbox,
-				}
-				users.append(user)
-				if with_slow_info:
-					user["mailbox_size"] = utils.du(mbox)
+    # Add in archived accounts.
+    if with_archived:
+        root = os.path.join(env['STORAGE_ROOT'], 'mail/mailboxes')
+        for domain in os.listdir(root):
+            for user in os.listdir(os.path.join(root, domain)):
+                email = user + "@" + domain
+                mbox = os.path.join(root, domain, user)
+                if email in active_accounts:
+                    continue
+                user = {
+                    "email": email,
+                    "privileges": "",
+                    "status": "inactive",
+                    "mailbox": mbox,
+                }
+                users.append(user)
+                if with_slow_info:
+                    user["mailbox_size"] = utils.du(mbox)
 
-	# Group by domain.
-	domains = { }
-	for user in users:
-		domain = get_domain(user["email"])
-		if domain not in domains:
-			domains[domain] = {
-				"domain": domain,
-				"users": []
-				}
-		domains[domain]["users"].append(user)
+    # Group by domain.
+    domains = {}
+    for user in users:
+        domain = get_domain(user["email"])
+        if domain not in domains:
+            domains[domain] = {
+                "domain": domain,
+                "users": []
+                }
+        domains[domain]["users"].append(user)
 
-	# Sort domains.
-	domains = [domains[domain] for domain in utils.sort_domains(domains.keys(), env)]
+    # Sort domains.
+    domains = [domains[domain] for domain in utils.sort_domains(domains.keys(), env)]
 
-	# Sort users within each domain first by status then lexicographically by email address.
-	for domain in domains:
-		domain["users"].sort(key = lambda user : (user["status"] != "active", user["email"]))
+    # Sort users within each domain first by status then lexicographically by email address.
+    for domain in domains:
+        domain["users"].sort(key=lambda user: (user["status"] != "active", user["email"]))
+
+    return domains
 
-	return domains
 
 def get_admins(env):
-	# Returns a set of users with admin privileges.
-	users = set()
-	for domain in get_mail_users_ex(env):
-		for user in domain["users"]:
-			if "admin" in user["privileges"]:
-				users.add(user["email"])
-	return users
+    # Returns a set of users with admin privileges.
+    users = set()
+    for domain in get_mail_users_ex(env):
+        for user in domain["users"]:
+            if "admin" in user["privileges"]:
+                users.add(user["email"])
+    return users
+
 
 def get_mail_aliases(env):
-	# Returns a sorted list of tuples of (alias, forward-to string).
-	c = open_database(env)
-	c.execute('SELECT source, destination FROM aliases')
-	aliases = { row[0]: row[1] for row in c.fetchall() } # make dict
+    # Returns a sorted list of tuples of (alias, forward-to string).
+    c = open_database(env)
+    c.execute('SELECT source, destination FROM aliases')
+    # make dict
+    aliases = {row[0]: row[1] for row in c.fetchall()}
+
+    # put in a canonical order: sort by domain, then by email address lexicographically
+    aliases = [(source, aliases[source]) for source in utils.sort_email_addresses(aliases.keys(), env)]
+    return aliases
 
-	# put in a canonical order: sort by domain, then by email address lexicographically
-	aliases = [ (source, aliases[source]) for source in utils.sort_email_addresses(aliases.keys(), env) ]
-	return aliases
 
 def get_mail_aliases_ex(env):
-	# Returns a complex data structure of all mail aliases, similar
-	# to get_mail_users_ex.
-	#
-	# [
-	#   {
-	#     domain: "domain.tld",
-	#     alias: [
-	#       {
-	#         source: "name@domain.tld",
-	#         destination: ["target1@domain.com", "target2@domain.com", ...],
-	#         required: True|False
-	#       },
-	#       ...
-	#     ]
-	#   },
-	#   ...
-	# ]
+    # Returns a complex data structure of all mail aliases, similar
+    # to get_mail_users_ex.
+    #
+    # [
+    #   {
+    #     domain: "domain.tld",
+    #     alias: [
+    #       {
+    #         source: "name@domain.tld",
+    #         destination: ["target1@domain.com", "target2@domain.com", ...],
+    #         required: True|False
+    #       },
+    #       ...
+    #     ]
+    #   },
+    #   ...
+    # ]
 
-	required_aliases = get_required_aliases(env)
-	domains = {}
-	for source, destination in get_mail_aliases(env):
-		# get alias info
-		domain = get_domain(source)
-		required = ((source in required_aliases) or (source == get_system_administrator(env)))
+    required_aliases = get_required_aliases(env)
+    domains = {}
+    for source, destination in get_mail_aliases(env):
+        # get alias info
+        domain = get_domain(source)
+        required = ((source in required_aliases) or (source == get_system_administrator(env)))
 
-		# add to list
-		if not domain in domains:
-			domains[domain] = {
-				"domain": domain,
-				"aliases": [],
-			}
-		domains[domain]["aliases"].append({
-			"source": source,
-			"destination": [d.strip() for d in destination.split(",")],
-			"required": required,
-		})
+        # add to list
+        if not domain in domains:
+            domains[domain] = {
+                "domain": domain,
+                "aliases": [],
+            }
+        domains[domain]["aliases"].append({
+            "source": source,
+            "destination": [d.strip() for d in destination.split(",")],
+            "required": required,
+        })
 
-	# Sort domains.
-	domains = [domains[domain] for domain in utils.sort_domains(domains.keys(), env)]
+    # Sort domains.
+    domains = [domains[domain] for domain in utils.sort_domains(domains.keys(), env)]
+
+    # Sort aliases within each domain first by required-ness then lexicographically by source address.
+    for domain in domains:
+        domain["aliases"].sort(key=lambda alias: (alias["required"], alias["source"]))
+    return domains
 
-	# Sort aliases within each domain first by required-ness then lexicographically by source address.
-	for domain in domains:
-		domain["aliases"].sort(key = lambda alias : (alias["required"], alias["source"]))
-	return domains
 
 def get_mail_alias_map(env):
-	aliases = { }
-	for alias, targets in get_mail_aliases(env):
-		for em in targets.split(","):
-			em = em.strip().lower()
-			aliases.setdefault(em, []).append(alias)
-	return aliases
+    aliases = {}
+    for alias, targets in get_mail_aliases(env):
+        for em in targets.split(","):
+            em = em.strip().lower()
+            aliases.setdefault(em, []).append(alias)
+    return aliases
+
+
+def evaluate_mail_alias_map(email, aliases, env):
+    ret = set()
+    for alias in aliases.get(email.lower(), []):
+        ret.add(alias)
+        ret |= evaluate_mail_alias_map(alias, aliases, env)
+    return ret
 
-def evaluate_mail_alias_map(email, aliases,  env):
-	ret = set()
-	for alias in aliases.get(email.lower(), []):
-		ret.add(alias)
-		ret |= evaluate_mail_alias_map(alias, aliases, env)
-	return ret
 
 def get_domain(emailaddr):
-	return emailaddr.split('@', 1)[1]
+    return emailaddr.split('@', 1)[1]
+
+
+def get_mail_domains(env, filter_aliases=lambda alias: True):
+    return set(
+        [get_domain(addr) for addr in get_mail_users(env)] +
+        [get_domain(source) for source, target in get_mail_aliases(env) if filter_aliases((source, target))]
+    )
 
-def get_mail_domains(env, filter_aliases=lambda alias : True):
-	return set(
-		   [get_domain(addr) for addr in get_mail_users(env)]
-		 + [get_domain(source) for source, target in get_mail_aliases(env) if filter_aliases((source, target)) ]
-		 )
 
 def add_mail_user(email, pw, privs, env):
-	# accept IDNA domain names but normalize to Unicode before going into database
-	email = sanitize_idn_email_address(email)
+    # accept IDNA domain names but normalize to Unicode before going into database
+    email = sanitize_idn_email_address(email)
 
-	# validate email
-	if email.strip() == "":
-		return ("No email address provided.", 400)
-	if not validate_email(email, mode='user'):
-		return ("Invalid email address.", 400)
+    # validate email
+    if email.strip() == "":
+        return ("No email address provided.", 400)
+    if not validate_email(email, mode='user'):
+        return ("Invalid email address.", 400)
 
-	validate_password(pw)
+    validate_password(pw)
 
-	# validate privileges
-	if privs is None or privs.strip() == "":
-		privs = []
-	else:
-		privs = privs.split("\n")
-		for p in privs:
-			validation = validate_privilege(p)
-			if validation: return validation
+    # validate privileges
+    if privs is None or privs.strip() == "":
+        privs = []
+    else:
+        privs = privs.split("\n")
+        for p in privs:
+            validation = validate_privilege(p)
+            if validation:
+                return validation
 
-	# get the database
-	conn, c = open_database(env, with_connection=True)
+    # get the database
+    conn, c = open_database(env, with_connection=True)
 
-	# hash the password
-	pw = hash_password(pw)
+    # hash the password
+    pw = hash_password(pw)
 
-	# add the user to the database
-	try:
-		c.execute("INSERT INTO users (email, password, privileges) VALUES (?, ?, ?)",
-			(email, pw, "\n".join(privs)))
-	except sqlite3.IntegrityError:
-		return ("User already exists.", 400)
+    # add the user to the database
+    try:
+        c.execute("INSERT INTO users (email, password, privileges) VALUES (?, ?, ?)",
+                  (email, pw, "\n".join(privs)))
+    except sqlite3.IntegrityError:
+        return ("User already exists.", 400)
 
-	# write databasebefore next step
-	conn.commit()
+    # write databasebefore next step
+    conn.commit()
 
-	# Create the user's INBOX, Spam, and Drafts folders, and subscribe them.
-	# K-9 mail will poll every 90 seconds if a Drafts folder does not exist, so create it
-	# to avoid unnecessary polling.
+    # Create the user's INBOX, Spam, and Drafts folders, and subscribe them.
+    # K-9 mail will poll every 90 seconds if a Drafts folder does not exist, so create it
+    # to avoid unnecessary polling.
 
-	# Check if the mailboxes exist before creating them. When creating a user that had previously
-	# been deleted, the mailboxes will still exist because they are still on disk.
-	try:
-		existing_mboxes = utils.shell('check_output', ["doveadm", "mailbox", "list", "-u", email, "-8"], capture_stderr=True).split("\n")
-	except subprocess.CalledProcessError as e:
-		c.execute("DELETE FROM users WHERE email=?", (email,))
-		conn.commit()
-		return ("Failed to initialize the user: " + e.output.decode("utf8"), 400)
+    # Check if the mailboxes exist before creating them. When creating a user that had previously
+    # been deleted, the mailboxes will still exist because they are still on disk.
+    try:
+        existing_mboxes = utils.shell('check_output', ["doveadm", "mailbox", "list", "-u", email, "-8"], capture_stderr=True).split("\n")
+    except subprocess.CalledProcessError as e:
+        c.execute("DELETE FROM users WHERE email=?", (email,))
+        conn.commit()
+        return ("Failed to initialize the user: " + e.output.decode("utf8"), 400)
 
-	for folder in ("INBOX", "Spam", "Drafts"):
-		if folder not in existing_mboxes:
-			utils.shell('check_call', ["doveadm", "mailbox", "create", "-u", email, "-s", folder])
+    for folder in ("INBOX", "Spam", "Drafts"):
+        if folder not in existing_mboxes:
+            utils.shell('check_call', ["doveadm", "mailbox", "create", "-u", email, "-s", folder])
+
+    # Update things in case any new domains are added.
+    return kick(env, "mail user added")
 
-	# Update things in case any new domains are added.
-	return kick(env, "mail user added")
 
 def set_mail_password(email, pw, env):
-	# accept IDNA domain names but normalize to Unicode before going into database
-	email = sanitize_idn_email_address(email)
+    # accept IDNA domain names but normalize to Unicode before going into database
+    email = sanitize_idn_email_address(email)
 
-	# validate that password is acceptable
-	validate_password(pw)
-	
-	# hash the password
-	pw = hash_password(pw)
+    # validate that password is acceptable
+    validate_password(pw)
+
+    # hash the password
+    pw = hash_password(pw)
+
+    # update the database
+    conn, c = open_database(env, with_connection=True)
+    c.execute("UPDATE users SET password=? WHERE email=?", (pw, email))
+    if c.rowcount != 1:
+        return ("That's not a user (%s)." % email, 400)
+    conn.commit()
+    return "OK"
 
-	# update the database
-	conn, c = open_database(env, with_connection=True)
-	c.execute("UPDATE users SET password=? WHERE email=?", (pw, email))
-	if c.rowcount != 1:
-		return ("That's not a user (%s)." % email, 400)
-	conn.commit()
-	return "OK"
 
 def hash_password(pw):
-	# Turn the plain password into a Dovecot-format hashed password, meaning
-	# something like "{SCHEME}hashedpassworddata".
-	# http://wiki2.dovecot.org/Authentication/PasswordSchemes
-	return utils.shell('check_output', ["/usr/bin/doveadm", "pw", "-s", "SHA512-CRYPT", "-p", pw]).strip()
+    # Turn the plain password into a Dovecot-format hashed password, meaning
+    # something like "{SCHEME}hashedpassworddata".
+    # http://wiki2.dovecot.org/Authentication/PasswordSchemes
+    return utils.shell('check_output', ["/usr/bin/doveadm", "pw", "-s", "SHA512-CRYPT", "-p", pw]).strip()
+
 
 def get_mail_password(email, env):
-	# Gets the hashed password for a user. Passwords are stored in Dovecot's
-	# password format, with a prefixed scheme.
-	# http://wiki2.dovecot.org/Authentication/PasswordSchemes
-	# update the database
-	c = open_database(env)
-	c.execute('SELECT password FROM users WHERE email=?', (email,))
-	rows = c.fetchall()
-	if len(rows) != 1:
-		raise ValueError("That's not a user (%s)." % email)
-	return rows[0][0]
+    # Gets the hashed password for a user. Passwords are stored in Dovecot's
+    # password format, with a prefixed scheme.
+    # http://wiki2.dovecot.org/Authentication/PasswordSchemes
+    # update the database
+    c = open_database(env)
+    c.execute('SELECT password FROM users WHERE email=?', (email,))
+    rows = c.fetchall()
+    if len(rows) != 1:
+        raise ValueError("That's not a user (%s)." % email)
+    return rows[0][0]
+
 
 def remove_mail_user(email, env):
-	# accept IDNA domain names but normalize to Unicode before going into database
-	email = sanitize_idn_email_address(email)
+    # accept IDNA domain names but normalize to Unicode before going into database
+    email = sanitize_idn_email_address(email)
 
-	# remove
-	conn, c = open_database(env, with_connection=True)
-	c.execute("DELETE FROM users WHERE email=?", (email,))
-	if c.rowcount != 1:
-		return ("That's not a user (%s)." % email, 400)
-	conn.commit()
+    # remove
+    conn, c = open_database(env, with_connection=True)
+    c.execute("DELETE FROM users WHERE email=?", (email,))
+    if c.rowcount != 1:
+        return ("That's not a user (%s)." % email, 400)
+    conn.commit()
+
+    # Update things in case any domains are removed.
+    return kick(env, "mail user removed")
 
-	# Update things in case any domains are removed.
-	return kick(env, "mail user removed")
 
 def parse_privs(value):
-	return [p for p in value.split("\n") if p.strip() != ""]
+    return [p for p in value.split("\n") if p.strip() != ""]
+
 
 def get_mail_user_privileges(email, env):
-	# accept IDNA domain names but normalize to Unicode before going into database
-	email = sanitize_idn_email_address(email)
+    # accept IDNA domain names but normalize to Unicode before going into database
+    email = sanitize_idn_email_address(email)
+
+    # get privs
+    c = open_database(env)
+    c.execute('SELECT privileges FROM users WHERE email=?', (email,))
+    rows = c.fetchall()
+    if len(rows) != 1:
+        return ("That's not a user (%s)." % email, 400)
+    return parse_privs(rows[0][0])
 
-	# get privs
-	c = open_database(env)
-	c.execute('SELECT privileges FROM users WHERE email=?', (email,))
-	rows = c.fetchall()
-	if len(rows) != 1:
-		return ("That's not a user (%s)." % email, 400)
-	return parse_privs(rows[0][0])
 
 def validate_privilege(priv):
-	if "\n" in priv or priv.strip() == "":
-		return ("That's not a valid privilege (%s)." % priv, 400)
-	return None
+    if "\n" in priv or priv.strip() == "":
+        return ("That's not a valid privilege (%s)." % priv, 400)
+    return None
+
 
 def add_remove_mail_user_privilege(email, priv, action, env):
-	# accept IDNA domain names but normalize to Unicode before going into database
-	email = sanitize_idn_email_address(email)
+    # accept IDNA domain names but normalize to Unicode before going into database
+    email = sanitize_idn_email_address(email)
 
-	# validate
-	validation = validate_privilege(priv)
-	if validation: return validation
+    # validate
+    validation = validate_privilege(priv)
+    if validation:
+        return validation
 
-	# get existing privs, but may fail
-	privs = get_mail_user_privileges(email, env)
-	if isinstance(privs, tuple): return privs # error
+    # get existing privs, but may fail
+    privs = get_mail_user_privileges(email, env)
+    # error
+    if isinstance(privs, tuple):
+        return privs
 
-	# update privs set
-	if action == "add":
-		if priv not in privs:
-			privs.append(priv)
-	elif action == "remove":
-		privs = [p for p in privs if p != priv]
-	else:
-		return ("Invalid action.", 400)
+    # update privs set
+    if action == "add":
+        if priv not in privs:
+            privs.append(priv)
+    elif action == "remove":
+        privs = [p for p in privs if p != priv]
+    else:
+        return ("Invalid action.", 400)
 
-	# commit to database
-	conn, c = open_database(env, with_connection=True)
-	c.execute("UPDATE users SET privileges=? WHERE email=?", ("\n".join(privs), email))
-	if c.rowcount != 1:
-		return ("Something went wrong.", 400)
-	conn.commit()
+    # commit to database
+    conn, c = open_database(env, with_connection=True)
+    c.execute("UPDATE users SET privileges=? WHERE email=?", ("\n".join(privs), email))
+    if c.rowcount != 1:
+        return ("Something went wrong.", 400)
+    conn.commit()
+
+    return "OK"
 
-	return "OK"
 
 def add_mail_alias(source, destination, env, update_if_exists=False, do_kick=True):
-	# accept IDNA domain names but normalize to Unicode before going into database
-	source = sanitize_idn_email_address(source)
+    # accept IDNA domain names but normalize to Unicode before going into database
+    source = sanitize_idn_email_address(source)
 
-	# validate source
-	if source.strip() == "":
-		return ("No incoming email address provided.", 400)
-	if not validate_email(source, mode='alias'):
-		return ("Invalid incoming email address (%s)." % source, 400)
+    # validate source
+    if source.strip() == "":
+        return ("No incoming email address provided.", 400)
+    if not validate_email(source, mode='alias'):
+        return ("Invalid incoming email address (%s)." % source, 400)
 
-	# validate destination
-	dests = []
-	destination = destination.strip()
-	if validate_email(destination, mode='alias'):
-		# Oostfix allows a single @domain.tld as the destination, which means
-		# the local part on the address is preserved in the rewrite.
-		dests.append(sanitize_idn_email_address(destination))
-	else:
-		# Parse comma and \n-separated destination emails & validate. In this
-		# case, the recipients must be complete email addresses.
-		for line in destination.split("\n"):
-			for email in line.split(","):
-				email = email.strip()
-				email = sanitize_idn_email_address(email) # Unicode => IDNA
-				if email == "": continue
-				if not validate_email(email):
-					return ("Invalid destination email address (%s)." % email, 400)
-				dests.append(email)
-	if len(destination) == 0:
-		return ("No destination email address(es) provided.", 400)
-	destination = ",".join(dests)
+    # validate destination
+    dests = []
+    destination = destination.strip()
+    if validate_email(destination, mode='alias'):
+        # Oostfix allows a single @domain.tld as the destination, which means
+        # the local part on the address is preserved in the rewrite.
+        dests.append(sanitize_idn_email_address(destination))
+    else:
+        # Parse comma and \n-separated destination emails & validate. In this
+        # case, the recipients must be complete email addresses.
+        for line in destination.split("\n"):
+            for email in line.split(","):
+                email = email.strip()
+                # Unicode => IDNA
+                email = sanitize_idn_email_address(email)
+                if email == "":
+                    continue
+                if not validate_email(email):
+                    return ("Invalid destination email address (%s)." % email, 400)
+                dests.append(email)
+    if len(destination) == 0:
+        return ("No destination email address(es) provided.", 400)
+    destination = ",".join(dests)
 
-	# save to db
-	conn, c = open_database(env, with_connection=True)
-	try:
-		c.execute("INSERT INTO aliases (source, destination) VALUES (?, ?)", (source, destination))
-		return_status = "alias added"
-	except sqlite3.IntegrityError:
-		if not update_if_exists:
-			return ("Alias already exists (%s)." % source, 400)
-		else:
-			c.execute("UPDATE aliases SET destination = ? WHERE source = ?", (destination, source))
-			return_status = "alias updated"
+    # save to db
+    conn, c = open_database(env, with_connection=True)
+    try:
+        c.execute("INSERT INTO aliases (source, destination) VALUES (?, ?)", (source, destination))
+        return_status = "alias added"
+    except sqlite3.IntegrityError:
+        if not update_if_exists:
+            return ("Alias already exists (%s)." % source, 400)
+        else:
+            c.execute("UPDATE aliases SET destination = ? WHERE source = ?", (destination, source))
+            return_status = "alias updated"
 
-	conn.commit()
+    conn.commit()
+
+    if do_kick:
+        # Update things in case any new domains are added.
+        return kick(env, return_status)
 
-	if do_kick:
-		# Update things in case any new domains are added.
-		return kick(env, return_status)
 
 def remove_mail_alias(source, env, do_kick=True):
-	# accept IDNA domain names but normalize to Unicode before going into database
-	source = sanitize_idn_email_address(source)
+    # accept IDNA domain names but normalize to Unicode before going into database
+    source = sanitize_idn_email_address(source)
 
-	# remove
-	conn, c = open_database(env, with_connection=True)
-	c.execute("DELETE FROM aliases WHERE source=?", (source,))
-	if c.rowcount != 1:
-		return ("That's not an alias (%s)." % source, 400)
-	conn.commit()
+    # remove
+    conn, c = open_database(env, with_connection=True)
+    c.execute("DELETE FROM aliases WHERE source=?", (source,))
+    if c.rowcount != 1:
+        return ("That's not an alias (%s)." % source, 400)
+    conn.commit()
+
+    if do_kick:
+        # Update things in case any domains are removed.
+        return kick(env, "alias removed")
 
-	if do_kick:
-		# Update things in case any domains are removed.
-		return kick(env, "alias removed")
 
 def get_system_administrator(env):
-	return "administrator@" + env['PRIMARY_HOSTNAME']
+    return "administrator@" + env['PRIMARY_HOSTNAME']
+
 
 def get_required_aliases(env):
-	# These are the aliases that must exist.
-	aliases = set()
+    # These are the aliases that must exist.
+    aliases = set()
 
-	# The hostmaster alias is exposed in the DNS SOA for each zone.
-	aliases.add("hostmaster@" + env['PRIMARY_HOSTNAME'])
+    # The hostmaster alias is exposed in the DNS SOA for each zone.
+    aliases.add("hostmaster@" + env['PRIMARY_HOSTNAME'])
 
-	# Get a list of domains we serve mail for, except ones for which the only
-	# email on that domain is a postmaster/admin alias to the administrator
-	# or a wildcard alias (since it will forward postmaster/admin).
-	real_mail_domains = get_mail_domains(env,
-		filter_aliases = lambda alias :
-			((not alias[0].startswith("postmaster@") and not alias[0].startswith("admin@"))	or alias[1] != get_system_administrator(env))
-			and not alias[0].startswith("@")
-			)
+    # Get a list of domains we serve mail for, except ones for which the only
+    # email on that domain is a postmaster/admin alias to the administrator
+    # or a wildcard alias (since it will forward postmaster/admin).
+    # JMT: no clean way to PEP8 wrap this
+    real_mail_domains = get_mail_domains(env,
+        filter_aliases=lambda alias: 
+            ((not alias[0].startswith("postmaster@") and not alias[0].startswith("admin@")) or alias[1] != get_system_administrator(env))
+            and not alias[0].startswith("@")
+    )
 
-	# Create postmaster@ and admin@ for all domains we serve mail on.
-	# postmaster@ is assumed to exist by our Postfix configuration. admin@
-	# isn't anything, but it might save the user some trouble e.g. when
-	# buying an SSL certificate.
-	for domain in real_mail_domains:
-		aliases.add("postmaster@" + domain)
-		aliases.add("admin@" + domain)
+    # Create postmaster@ and admin@ for all domains we serve mail on.
+    # postmaster@ is assumed to exist by our Postfix configuration. admin@
+    # isn't anything, but it might save the user some trouble e.g. when
+    # buying an SSL certificate.
+    for domain in real_mail_domains:
+        aliases.add("postmaster@" + domain)
+        aliases.add("admin@" + domain)
+
+    return aliases
 
-	return aliases
 
 def kick(env, mail_result=None):
-	results = []
+    results = []
 
-	# Inclde the current operation's result in output.
+    # Inclde the current operation's result in output.
 
-	if mail_result is not None:
-		results.append(mail_result + "\n")
+    if mail_result is not None:
+        results.append(mail_result + "\n")
 
-	# Ensure every required alias exists.
+    # Ensure every required alias exists.
 
-	existing_users = get_mail_users(env)
-	existing_aliases = get_mail_aliases(env)
-	required_aliases = get_required_aliases(env)
+    existing_users = get_mail_users(env)
+    existing_aliases = get_mail_aliases(env)
+    required_aliases = get_required_aliases(env)
 
-	def ensure_admin_alias_exists(source):
-		# If a user account exists with that address, we're good.
-		if source in existing_users:
-			return
+    def ensure_admin_alias_exists(source):
+        # If a user account exists with that address, we're good.
+        if source in existing_users:
+            return
 
-		# Does this alias exists?
-		for s, t in existing_aliases:
-			if s == source:
-				return
-		# Doesn't exist.
-		administrator = get_system_administrator(env)
-		add_mail_alias(source, administrator, env, do_kick=False)
-		results.append("added alias %s (=> %s)\n" % (source, administrator))
+        # Does this alias exists?
+        for s, t in existing_aliases:
+            if s == source:
+                return
+        # Doesn't exist.
+        administrator = get_system_administrator(env)
+        add_mail_alias(source, administrator, env, do_kick=False)
+        results.append("added alias %s (=> %s)\n" % (source, administrator))
 
+    for alias in required_aliases:
+        ensure_admin_alias_exists(alias)
 
-	for alias in required_aliases:
-		ensure_admin_alias_exists(alias)
+    # Remove auto-generated postmaster/admin on domains we no
+    # longer have any other email addresses for.
+    for source, target in existing_aliases:
+        user, domain = source.split("@")
+        if user in ("postmaster", "admin") and source not in required_aliases and target == get_system_administrator(env):
+            remove_mail_alias(source, env, do_kick=False)
+            results.append("removed alias %s (was to %s; domain no longer used for email)\n" % (source, target))
 
-	# Remove auto-generated postmaster/admin on domains we no
-	# longer have any other email addresses for.
-	for source, target in existing_aliases:
-		user, domain = source.split("@")
-		if user in ("postmaster", "admin") \
-			and source not in required_aliases \
-			and target == get_system_administrator(env):
-			remove_mail_alias(source, env, do_kick=False)
-			results.append("removed alias %s (was to %s; domain no longer used for email)\n" % (source, target))
+    # Update DNS and nginx in case any domains are added/removed.
 
-	# Update DNS and nginx in case any domains are added/removed.
+    from dns_update import do_dns_update
+    results.append(do_dns_update(env))
 
-	from dns_update import do_dns_update
-	results.append( do_dns_update(env) )
+    from web_update import do_web_update
+    results.append(do_web_update(env))
 
-	from web_update import do_web_update
-	results.append( do_web_update(env) )
+    return "".join(s for s in results if s != "")
 
-	return "".join(s for s in results if s != "")
 
 def validate_password(pw):
-	# validate password
-	if pw.strip() == "":
-		raise ValueError("No password provided.")
-	if re.search(r"[\s]", pw):
-		raise ValueError("Passwords cannot contain spaces.")
-	if len(pw) < 4:
-		raise ValueError("Passwords must be at least four characters.")
+    # validate password
+    if pw.strip() == "":
+        raise ValueError("No password provided.")
+    if re.search(r"[\s]", pw):
+        raise ValueError("Passwords cannot contain spaces.")
+    if len(pw) < 4:
+        raise ValueError("Passwords must be at least four characters.")
 
 
 if __name__ == "__main__":
-	import sys
-	if len(sys.argv) > 2 and sys.argv[1] == "validate-email":
-		# Validate that we can create a Dovecot account for a given string.
-		if validate_email(sys.argv[2], mode='user'):
-			sys.exit(0)
-		else:
-			sys.exit(1)
+    import sys
+    if len(sys.argv) > 2 and sys.argv[1] == "validate-email":
+        # Validate that we can create a Dovecot account for a given string.
+        if validate_email(sys.argv[2], mode='user'):
+            sys.exit(0)
+        else:
+            sys.exit(1)
 
-	if len(sys.argv) > 1 and sys.argv[1] == "update":
-		from utils import load_environment
-		print(kick(load_environment()))
+    if len(sys.argv) > 1 and sys.argv[1] == "update":
+        from utils import load_environment
+        print(kick(load_environment()))
diff --git a/management/status_checks.py b/management/status_checks.py
index 23c9700d..b484fc7e 100755
--- a/management/status_checks.py
+++ b/management/status_checks.py
@@ -6,10 +6,17 @@
 
 __ALL__ = ['check_certificate']
 
-import os, os.path, re, subprocess, datetime, multiprocessing.pool
+import os
+import os.path
+import re
+import subprocess
+import datetime
+import multiprocessing.pool
 
-import dns.reversename, dns.resolver
-import dateutil.parser, dateutil.tz
+import dns.reversename
+import dns.resolver
+import dateutil.parser
+import dateutil.tz
 
 from dns_update import get_dns_zones, build_tlsa_record, get_custom_dns_config
 from web_update import get_web_domains, get_domain_ssl_files
@@ -17,28 +24,30 @@ from mailconfig import get_mail_domains, get_mail_aliases
 
 from utils import shell, sort_domains, load_env_vars_from_file
 
+
 def run_checks(env, output, pool):
-	# run systems checks
-	output.add_heading("System")
+    # run systems checks
+    output.add_heading("System")
 
-	# check that services are running
-	if not run_services_checks(env, output, pool):
-		# If critical services are not running, stop. If bind9 isn't running,
-		# all later DNS checks will timeout and that will take forever to
-		# go through, and if running over the web will cause a fastcgi timeout.
-		return
+    # check that services are running
+    if not run_services_checks(env, output, pool):
+        # If critical services are not running, stop. If bind9 isn't running,
+        # all later DNS checks will timeout and that will take forever to
+        # go through, and if running over the web will cause a fastcgi timeout.
+        return
 
-	# clear bind9's DNS cache so our DNS checks are up to date
-	# (ignore errors; if bind9/rndc isn't running we'd already report
-	# that in run_services checks.)
-	shell('check_call', ["/usr/sbin/rndc", "flush"], trap=True)
-	
-	run_system_checks(env, output)
+    # clear bind9's DNS cache so our DNS checks are up to date
+    # (ignore errors; if bind9/rndc isn't running we'd already report
+    # that in run_services checks.)
+    shell('check_call', ["/usr/sbin/rndc", "flush"], trap=True)
 
-	# perform other checks asynchronously
+    run_system_checks(env, output)
+
+    # perform other checks asynchronously
+
+    run_network_checks(env, output)
+    run_domain_checks(env, output, pool)
 
-	run_network_checks(env, output)
-	run_domain_checks(env, output, pool)
 
 def get_ssh_port():
     # Returns ssh port
@@ -51,741 +60,768 @@ def get_ssh_port():
         if e == "port":
             returnNext = True
 
+
 def run_services_checks(env, output, pool):
-	# Check that system services are running.
+    # Check that system services are running.
 
-	services = [
-		{ "name": "Local DNS (bind9)", "port": 53, "public": False, },
-		#{ "name": "NSD Control", "port": 8952, "public": False, },
-		{ "name": "Local DNS Control (bind9/rndc)", "port": 953, "public": False, },
-		{ "name": "Dovecot LMTP LDA", "port": 10026, "public": False, },
-		{ "name": "Postgrey", "port": 10023, "public": False, },
-		{ "name": "Spamassassin", "port": 10025, "public": False, },
-		{ "name": "OpenDKIM", "port": 8891, "public": False, },
-		{ "name": "OpenDMARC", "port": 8893, "public": False, },
-		{ "name": "Memcached", "port": 11211, "public": False, },
-		{ "name": "Sieve (dovecot)", "port": 4190, "public": True, },
-		{ "name": "Mail-in-a-Box Management Daemon", "port": 10222, "public": False, },
+    services = [
+        {"name": "Local DNS (bind9)", "port": 53, "public": False, },
+        # {"name": "NSD Control", "port": 8952, "public": False, },
+        {"name": "Local DNS Control (bind9/rndc)", "port": 953, "public": False, },
+        {"name": "Dovecot LMTP LDA", "port": 10026, "public": False, },
+        {"name": "Postgrey", "port": 10023, "public": False, },
+        {"name": "Spamassassin", "port": 10025, "public": False, },
+        {"name": "OpenDKIM", "port": 8891, "public": False, },
+        {"name": "OpenDMARC", "port": 8893, "public": False, },
+        {"name": "Memcached", "port": 11211, "public": False, },
+        {"name": "Sieve (dovecot)", "port": 4190, "public": True, },
+        {"name": "Mail-in-a-Box Management Daemon", "port": 10222, "public": False, },
 
-		{ "name": "SSH Login (ssh)", "port": get_ssh_port(), "public": True, },
-		{ "name": "Public DNS (nsd4)", "port": 53, "public": True, },
-		{ "name": "Incoming Mail (SMTP/postfix)", "port": 25, "public": True, },
-		{ "name": "Outgoing Mail (SMTP 587/postfix)", "port": 587, "public": True, },
-		#{ "name": "Postfix/master", "port": 10587, "public": True, },
-		{ "name": "IMAPS (dovecot)", "port": 993, "public": True, },
-		{ "name": "HTTP Web (nginx)", "port": 80, "public": True, },
-		{ "name": "HTTPS Web (nginx)", "port": 443, "public": True, },
-	]
+        {"name": "SSH Login (ssh)", "port": get_ssh_port(), "public": True, },
+        {"name": "Public DNS (nsd4)", "port": 53, "public": True, },
+        {"name": "Incoming Mail (SMTP/postfix)", "port": 25, "public": True, },
+        {"name": "Outgoing Mail (SMTP 587/postfix)", "port": 587, "public": True, },
+        #{"name": "Postfix/master", "port": 10587, "public": True, },
+        {"name": "IMAPS (dovecot)", "port": 993, "public": True, },
+        {"name": "HTTP Web (nginx)", "port": 80, "public": True, },
+        {"name": "HTTPS Web (nginx)", "port": 443, "public": True, },
+    ]
 
-	all_running = True
-	fatal = False
-	ret = pool.starmap(check_service, ((i, service, env) for i, service in enumerate(services)), chunksize=1)
-	for i, running, fatal2, output2 in sorted(ret):
-		all_running = all_running and running
-		fatal = fatal or fatal2
-		output2.playback(output)
+    all_running = True
+    fatal = False
+    ret = pool.starmap(check_service, ((i, service, env) for i, service in enumerate(services)), chunksize=1)
+    for i, running, fatal2, output2 in sorted(ret):
+        all_running = all_running and running
+        fatal = fatal or fatal2
+        output2.playback(output)
 
-	if all_running:
-		output.print_ok("All system services are running.")
+    if all_running:
+        output.print_ok("All system services are running.")
+
+    return not fatal
 
-	return not fatal
 
 def check_service(i, service, env):
-	import socket
-	output = BufferedOutput()
-	running = False
-	fatal = False
-	s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-	s.settimeout(1)
-	try:
-		try:
-			s.connect((
-				"127.0.0.1" if not service["public"] else env['PUBLIC_IP'],
-				service["port"]))
-			running = True
-		except OSError as e1:
-			if service["public"] and service["port"] != 53:
-				# For public services (except DNS), try the private IP as a fallback.
-				s1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-				s1.settimeout(1)
-				try:
-					s1.connect(("127.0.0.1", service["port"]))
-					output.print_error("%s is running but is not publicly accessible at %s:%d (%s)." % (service['name'], env['PUBLIC_IP'], service['port'], str(e1)))
-				except:
-					raise e1
-				finally:
-					s1.close()
-			else:
-				raise
+    import socket
+    output = BufferedOutput()
+    running = False
+    fatal = False
+    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+    s.settimeout(1)
+    try:
+        try:
+            s.connect((
+                "127.0.0.1" if not service["public"] else env['PUBLIC_IP'],
+                service["port"]))
+            running = True
+        except OSError as e1:
+            if service["public"] and service["port"] != 53:
+                # For public services (except DNS), try the private IP as a fallback.
+                s1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+                s1.settimeout(1)
+                try:
+                    s1.connect(("127.0.0.1", service["port"]))
+                    output.print_error("%s is running but is not publicly accessible at %s:%d (%s)." % (service['name'], env['PUBLIC_IP'], service['port'], str(e1)))
+                except:
+                    raise e1
+                finally:
+                    s1.close()
+            else:
+                raise
 
-	except OSError as e:
-		output.print_error("%s is not running (%s; port %d)." % (service['name'], str(e), service['port']))
+    except OSError as e:
+        output.print_error("%s is not running (%s; port %d)." % (service['name'], str(e), service['port']))
 
-		# Why is nginx not running?
-		if service["port"] in (80, 443):
-			output.print_line(shell('check_output', ['nginx', '-t'], capture_stderr=True, trap=True)[1].strip())
+        # Why is nginx not running?
+        if service["port"] in (80, 443):
+            output.print_line(shell('check_output', ['nginx', '-t'], capture_stderr=True, trap=True)[1].strip())
 
-		# Flag if local DNS is not running.
-		if service["port"] == 53 and service["public"] == False:
-			fatal = True
-	finally:
-		s.close()
+        # Flag if local DNS is not running.
+        if service["port"] == 53 and service["public"] is False:
+            fatal = True
+    finally:
+        s.close()
+
+    return (i, running, fatal, output)
 
-	return (i, running, fatal, output)
 
 def run_system_checks(env, output):
-	check_ssh_password(env, output)
-	check_software_updates(env, output)
-	check_system_aliases(env, output)
-	check_free_disk_space(env, output)
+    check_ssh_password(env, output)
+    check_software_updates(env, output)
+    check_system_aliases(env, output)
+    check_free_disk_space(env, output)
+
 
 def check_ssh_password(env, output):
-	# Check that SSH login with password is disabled. The openssh-server
-	# package may not be installed so check that before trying to access
-	# the configuration file.
-	if not os.path.exists("/etc/ssh/sshd_config"):
-		return
-	sshd = open("/etc/ssh/sshd_config").read()
-	if re.search("\nPasswordAuthentication\s+yes", sshd) \
-		or not re.search("\nPasswordAuthentication\s+no", sshd):
-		output.print_error("""The SSH server on this machine permits password-based login. A more secure
-			way to log in is using a public key. Add your SSH public key to $HOME/.ssh/authorized_keys, check
-			that you can log in without a password, set the option 'PasswordAuthentication no' in
-			/etc/ssh/sshd_config, and then restart the openssh via 'sudo service ssh restart'.""")
-	else:
-		output.print_ok("SSH disallows password-based login.")
+    # Check that SSH login with password is disabled. The openssh-server
+    # package may not be installed so check that before trying to access
+    # the configuration file.
+    if not os.path.exists("/etc/ssh/sshd_config"):
+        return
+    sshd = open("/etc/ssh/sshd_config").read()
+    if re.search("\nPasswordAuthentication\s+yes", sshd) or not re.search("\nPasswordAuthentication\s+no", sshd):
+        output.print_error("""The SSH server on this machine permits password-based login. A more secure
+            way to log in is using a public key. Add your SSH public key to $HOME/.ssh/authorized_keys, check
+            that you can log in without a password, set the option 'PasswordAuthentication no' in
+            /etc/ssh/sshd_config, and then restart the openssh via 'sudo service ssh restart'.""")
+    else:
+        output.print_ok("SSH disallows password-based login.")
+
 
 def check_software_updates(env, output):
-	# Check for any software package updates.
-	pkgs = list_apt_updates(apt_update=False)
-	if os.path.exists("/var/run/reboot-required"):
-		output.print_error("System updates have been installed and a reboot of the machine is required.")
-	elif len(pkgs) == 0:
-		output.print_ok("System software is up to date.")
-	else:
-		output.print_error("There are %d software packages that can be updated." % len(pkgs))
-		for p in pkgs:
-			output.print_line("%s (%s)" % (p["package"], p["version"]))
+    # Check for any software package updates.
+    pkgs = list_apt_updates(apt_update=False)
+    if os.path.exists("/var/run/reboot-required"):
+        output.print_error("System updates have been installed and a reboot of the machine is required.")
+    elif len(pkgs) == 0:
+        output.print_ok("System software is up to date.")
+    else:
+        output.print_error("There are %d software packages that can be updated." % len(pkgs))
+        for p in pkgs:
+            output.print_line("%s (%s)" % (p["package"], p["version"]))
+
 
 def check_system_aliases(env, output):
-	# Check that the administrator alias exists since that's where all
-	# admin email is automatically directed.
-	check_alias_exists("administrator@" + env['PRIMARY_HOSTNAME'], env, output)
+    # Check that the administrator alias exists since that's where all
+    # admin email is automatically directed.
+    check_alias_exists("administrator@" + env['PRIMARY_HOSTNAME'], env, output)
+
 
 def check_free_disk_space(env, output):
-	# Check free disk space.
-	st = os.statvfs(env['STORAGE_ROOT'])
-	bytes_total = st.f_blocks * st.f_frsize
-	bytes_free = st.f_bavail * st.f_frsize
-	disk_msg = "The disk has %s GB space remaining." % str(round(bytes_free/1024.0/1024.0/1024.0*10.0)/10.0)
-	if bytes_free > .3 * bytes_total:
-		output.print_ok(disk_msg)
-	elif bytes_free > .15 * bytes_total:
-		output.print_warning(disk_msg)
-	else:
-		output.print_error(disk_msg)
+    # Check free disk space.
+    st = os.statvfs(env['STORAGE_ROOT'])
+    bytes_total = st.f_blocks * st.f_frsize
+    bytes_free = st.f_bavail * st.f_frsize
+    disk_msg = "The disk has %s GB space remaining." % str(round(bytes_free/1024.0/1024.0/1024.0*10.0)/10.0)
+    if bytes_free > .3 * bytes_total:
+        output.print_ok(disk_msg)
+    elif bytes_free > .15 * bytes_total:
+        output.print_warning(disk_msg)
+    else:
+        output.print_error(disk_msg)
+
 
 def run_network_checks(env, output):
-	# Also see setup/network-checks.sh.
+    # Also see setup/network-checks.sh.
 
-	output.add_heading("Network")
+    output.add_heading("Network")
 
-	# Stop if we cannot make an outbound connection on port 25. Many residential
-	# networks block outbound port 25 to prevent their network from sending spam.
-	# See if we can reach one of Google's MTAs with a 5-second timeout.
-	code, ret = shell("check_call", ["/bin/nc", "-z", "-w5", "aspmx.l.google.com", "25"], trap=True)
-	if ret == 0:
-		output.print_ok("Outbound mail (SMTP port 25) is not blocked.")
-	else:
-		output.print_error("""Outbound mail (SMTP port 25) seems to be blocked by your network. You
-			will not be able to send any mail. Many residential networks block port 25 to prevent hijacked
-			machines from being able to send spam. A quick connection test to Google's mail server on port 25
-			failed.""")
+    # Stop if we cannot make an outbound connection on port 25. Many residential
+    # networks block outbound port 25 to prevent their network from sending spam.
+    # See if we can reach one of Google's MTAs with a 5-second timeout.
+    code, ret = shell("check_call", ["/bin/nc", "-z", "-w5", "aspmx.l.google.com", "25"], trap=True)
+    if ret == 0:
+        output.print_ok("Outbound mail (SMTP port 25) is not blocked.")
+    else:
+        output.print_error("""Outbound mail (SMTP port 25) seems to be blocked by your network. You
+            will not be able to send any mail. Many residential networks block port 25 to prevent hijacked
+            machines from being able to send spam. A quick connection test to Google's mail server on port 25
+            failed.""")
+
+    # Stop if the IPv4 address is listed in the ZEN Spamhaus Block List.
+    # The user might have ended up on an IP address that was previously in use
+    # by a spammer, or the user may be deploying on a residential network. We
+    # will not be able to reliably send mail in these cases.
+    rev_ip4 = ".".join(reversed(env['PUBLIC_IP'].split('.')))
+    zen = query_dns(rev_ip4+'.zen.spamhaus.org', 'A', nxdomain=None)
+    if zen is None:
+        output.print_ok("IP address is not blacklisted by zen.spamhaus.org.")
+    else:
+        output.print_error("""The IP address of this machine %s is listed in the Spamhaus Block List (code %s),
+            which may prevent recipients from receiving your email. See http://www.spamhaus.org/query/ip/%s."""
+                           % (env['PUBLIC_IP'], zen, env['PUBLIC_IP']))
 
-	# Stop if the IPv4 address is listed in the ZEN Spamhaus Block List.
-	# The user might have ended up on an IP address that was previously in use
-	# by a spammer, or the user may be deploying on a residential network. We
-	# will not be able to reliably send mail in these cases.
-	rev_ip4 = ".".join(reversed(env['PUBLIC_IP'].split('.')))
-	zen = query_dns(rev_ip4+'.zen.spamhaus.org', 'A', nxdomain=None)
-	if zen is None:
-		output.print_ok("IP address is not blacklisted by zen.spamhaus.org.")
-	else:
-		output.print_error("""The IP address of this machine %s is listed in the Spamhaus Block List (code %s),
-			which may prevent recipients from receiving your email. See http://www.spamhaus.org/query/ip/%s."""
-			% (env['PUBLIC_IP'], zen, env['PUBLIC_IP']))
 
 def run_domain_checks(env, output, pool):
-	# Get the list of domains we handle mail for.
-	mail_domains = get_mail_domains(env)
+    # Get the list of domains we handle mail for.
+    mail_domains = get_mail_domains(env)
 
-	# Get the list of domains we serve DNS zones for (i.e. does not include subdomains).
-	dns_zonefiles = dict(get_dns_zones(env))
-	dns_domains = set(dns_zonefiles)
+    # Get the list of domains we serve DNS zones for (i.e. does not include subdomains).
+    dns_zonefiles = dict(get_dns_zones(env))
+    dns_domains = set(dns_zonefiles)
 
-	# Get the list of domains we serve HTTPS for.
-	web_domains = set(get_web_domains(env))
+    # Get the list of domains we serve HTTPS for.
+    web_domains = set(get_web_domains(env))
 
-	domains_to_check = mail_domains | dns_domains | web_domains
+    domains_to_check = mail_domains | dns_domains | web_domains
 
-	# Serial version:
-	#for domain in sort_domains(domains_to_check, env):
-	#	run_domain_checks_on_domain(domain, env, dns_domains, dns_zonefiles, mail_domains, web_domains)
+    # Serial version:
+    #for domain in sort_domains(domains_to_check, env):
+    #    run_domain_checks_on_domain(domain, env, dns_domains, dns_zonefiles, mail_domains, web_domains)
+
+    # Parallelize the checks across a worker pool.
+    args = ((domain, env, dns_domains, dns_zonefiles, mail_domains, web_domains)
+            for domain in domains_to_check)
+    ret = pool.starmap(run_domain_checks_on_domain, args, chunksize=1)
+    # (domain, output) => { domain: output }
+    ret = dict(ret)
+    for domain in sort_domains(ret, env):
+        ret[domain].playback(output)
 
-	# Parallelize the checks across a worker pool.
-	args = ((domain, env, dns_domains, dns_zonefiles, mail_domains, web_domains)
-		for domain in domains_to_check)
-	ret = pool.starmap(run_domain_checks_on_domain, args, chunksize=1)
-	ret = dict(ret) # (domain, output) => { domain: output }
-	for domain in sort_domains(ret, env):
-		ret[domain].playback(output)
 
 def run_domain_checks_on_domain(domain, env, dns_domains, dns_zonefiles, mail_domains, web_domains):
-	output = BufferedOutput()
+    output = BufferedOutput()
 
-	output.add_heading(domain)
+    output.add_heading(domain)
 
-	if domain == env["PRIMARY_HOSTNAME"]:
-		check_primary_hostname_dns(domain, env, output, dns_domains, dns_zonefiles)
-		
-	if domain in dns_domains:
-		check_dns_zone(domain, env, output, dns_zonefiles)
-		
-	if domain in mail_domains:
-		check_mail_domain(domain, env, output)
+    if domain == env["PRIMARY_HOSTNAME"]:
+        check_primary_hostname_dns(domain, env, output, dns_domains, dns_zonefiles)
 
-	if domain in web_domains:
-		check_web_domain(domain, env, output)
+    if domain in dns_domains:
+        check_dns_zone(domain, env, output, dns_zonefiles)
 
-	if domain in dns_domains:
-		check_dns_zone_suggestions(domain, env, output, dns_zonefiles)
+    if domain in mail_domains:
+        check_mail_domain(domain, env, output)
+
+    if domain in web_domains:
+        check_web_domain(domain, env, output)
+
+    if domain in dns_domains:
+        check_dns_zone_suggestions(domain, env, output, dns_zonefiles)
+
+    return (domain, output)
 
-	return (domain, output)
 
 def check_primary_hostname_dns(domain, env, output, dns_domains, dns_zonefiles):
-	# If a DS record is set on the zone containing this domain, check DNSSEC now.
-	for zone in dns_domains:
-		if zone == domain or domain.endswith("." + zone):
-			if query_dns(zone, "DS", nxdomain=None) is not None:
-				check_dnssec(zone, env, output, dns_zonefiles, is_checking_primary=True)
+    # If a DS record is set on the zone containing this domain, check DNSSEC now.
+    for zone in dns_domains:
+        if zone == domain or domain.endswith("." + zone):
+            if query_dns(zone, "DS", nxdomain=None) is not None:
+                check_dnssec(zone, env, output, dns_zonefiles, is_checking_primary=True)
 
-	# Check that the ns1/ns2 hostnames resolve to A records. This information probably
-	# comes from the TLD since the information is set at the registrar as glue records.
-	# We're probably not actually checking that here but instead checking that we, as
-	# the nameserver, are reporting the right info --- but if the glue is incorrect this
-	# will probably fail.
-	ip = query_dns("ns1." + domain, "A") + '/' + query_dns("ns2." + domain, "A")
-	if ip == env['PUBLIC_IP'] + '/' + env['PUBLIC_IP']:
-		output.print_ok("Nameserver glue records are correct at registrar. [ns1/ns2.%s => %s]" % (env['PRIMARY_HOSTNAME'], env['PUBLIC_IP']))
-	else:
-		output.print_error("""Nameserver glue records are incorrect. The ns1.%s and ns2.%s nameservers must be configured at your domain name
-			registrar as having the IP address %s. They currently report addresses of %s. It may take several hours for
-			public DNS to update after a change."""
-			% (env['PRIMARY_HOSTNAME'], env['PRIMARY_HOSTNAME'], env['PUBLIC_IP'], ip))
+    # Check that the ns1/ns2 hostnames resolve to A records. This information probably
+    # comes from the TLD since the information is set at the registrar as glue records.
+    # We're probably not actually checking that here but instead checking that we, as
+    # the nameserver, are reporting the right info --- but if the glue is incorrect this
+    # will probably fail.
+    ip = query_dns("ns1." + domain, "A") + '/' + query_dns("ns2." + domain, "A")
+    if ip == env['PUBLIC_IP'] + '/' + env['PUBLIC_IP']:
+        output.print_ok("Nameserver glue records are correct at registrar. [ns1/ns2.%s => %s]" % (env['PRIMARY_HOSTNAME'], env['PUBLIC_IP']))
+    else:
+        output.print_error("""Nameserver glue records are incorrect. The ns1.%s and ns2.%s nameservers must be configured at your domain name
+            registrar as having the IP address %s. They currently report addresses of %s. It may take several hours for
+            public DNS to update after a change."""
+                           % (env['PRIMARY_HOSTNAME'], env['PRIMARY_HOSTNAME'], env['PUBLIC_IP'], ip))
 
-	# Check that PRIMARY_HOSTNAME resolves to PUBLIC_IP in public DNS.
-	ip = query_dns(domain, "A")
-	if ip == env['PUBLIC_IP']:
-		output.print_ok("Domain resolves to box's IP address. [%s => %s]" % (env['PRIMARY_HOSTNAME'], env['PUBLIC_IP']))
-	else:
-		output.print_error("""This domain must resolve to your box's IP address (%s) in public DNS but it currently resolves
-			to %s. It may take several hours for public DNS to update after a change. This problem may result from other
-			issues listed here."""
-			% (env['PUBLIC_IP'], ip))
+    # Check that PRIMARY_HOSTNAME resolves to PUBLIC_IP in public DNS.
+    ip = query_dns(domain, "A")
+    if ip == env['PUBLIC_IP']:
+        output.print_ok("Domain resolves to box's IP address. [%s => %s]" % (env['PRIMARY_HOSTNAME'], env['PUBLIC_IP']))
+    else:
+        output.print_error("""This domain must resolve to your box's IP address (%s) in public DNS but it currently resolves
+            to %s. It may take several hours for public DNS to update after a change. This problem may result from other
+            issues listed here."""
+                           % (env['PUBLIC_IP'], ip))
 
-	# Check reverse DNS on the PRIMARY_HOSTNAME. Note that it might not be
-	# a DNS zone if it is a subdomain of another domain we have a zone for.
-	ipaddr_rev = dns.reversename.from_address(env['PUBLIC_IP'])
-	existing_rdns = query_dns(ipaddr_rev, "PTR")
-	if existing_rdns == domain:
-		output.print_ok("Reverse DNS is set correctly at ISP. [%s => %s]" % (env['PUBLIC_IP'], env['PRIMARY_HOSTNAME']))
-	else:
-		output.print_error("""Your box's reverse DNS is currently %s, but it should be %s. Your ISP or cloud provider will have instructions
-			on setting up reverse DNS for your box at %s.""" % (existing_rdns, domain, env['PUBLIC_IP']) )
+    # Check reverse DNS on the PRIMARY_HOSTNAME. Note that it might not be
+    # a DNS zone if it is a subdomain of another domain we have a zone for.
+    ipaddr_rev = dns.reversename.from_address(env['PUBLIC_IP'])
+    existing_rdns = query_dns(ipaddr_rev, "PTR")
+    if existing_rdns == domain:
+        output.print_ok("Reverse DNS is set correctly at ISP. [%s => %s]" % (env['PUBLIC_IP'], env['PRIMARY_HOSTNAME']))
+    else:
+        output.print_error("""Your box's reverse DNS is currently %s, but it should be %s. Your ISP or cloud provider will have instructions on setting up reverse DNS for your box at %s.""" % (existing_rdns, domain, env['PUBLIC_IP']))
 
-	# Check the TLSA record.
-	tlsa_qname = "_25._tcp." + domain
-	tlsa25 = query_dns(tlsa_qname, "TLSA", nxdomain=None)
-	tlsa25_expected = build_tlsa_record(env)
-	if tlsa25 == tlsa25_expected:
-		output.print_ok("""The DANE TLSA record for incoming mail is correct (%s).""" % tlsa_qname,)
-	elif tlsa25 is None:
-		output.print_error("""The DANE TLSA record for incoming mail is not set. This is optional.""")
-	else:
-		output.print_error("""The DANE TLSA record for incoming mail (%s) is not correct. It is '%s' but it should be '%s'.
-			It may take several hours for public DNS to update after a change."""
-                        % (tlsa_qname, tlsa25, tlsa25_expected))
+    # Check the TLSA record.
+    tlsa_qname = "_25._tcp." + domain
+    tlsa25 = query_dns(tlsa_qname, "TLSA", nxdomain=None)
+    tlsa25_expected = build_tlsa_record(env)
+    if tlsa25 == tlsa25_expected:
+        output.print_ok("""The DANE TLSA record for incoming mail is correct (%s).""" % tlsa_qname,)
+    elif tlsa25 is None:
+        output.print_error("""The DANE TLSA record for incoming mail is not set. This is optional.""")
+    else:
+        output.print_error("""The DANE TLSA record for incoming mail (%s) is not correct. It is '%s' but it should be '%s'.
+            It may take several hours for public DNS to update after a change."""
+                           % (tlsa_qname, tlsa25, tlsa25_expected))
+
+    # Check that the hostmaster@ email address exists.
+    check_alias_exists("hostmaster@" + domain, env, output)
 
-	# Check that the hostmaster@ email address exists.
-	check_alias_exists("hostmaster@" + domain, env, output)
 
 def check_alias_exists(alias, env, output):
-	mail_alises = dict(get_mail_aliases(env))
-	if alias in mail_alises:
-		output.print_ok("%s exists as a mail alias [=> %s]" % (alias, mail_alises[alias]))
-	else:
-		output.print_error("""You must add a mail alias for %s and direct email to you or another administrator.""" % alias)
+    mail_alises = dict(get_mail_aliases(env))
+    if alias in mail_alises:
+        output.print_ok("%s exists as a mail alias [=> %s]" % (alias, mail_alises[alias]))
+    else:
+        output.print_error("""You must add a mail alias for %s and direct email to you or another administrator.""" % alias)
+
 
 def check_dns_zone(domain, env, output, dns_zonefiles):
-	# If a DS record is set at the registrar, check DNSSEC first because it will affect the NS query.
-	# If it is not set, we suggest it last.
-	if query_dns(domain, "DS", nxdomain=None) is not None:
-		check_dnssec(domain, env, output, dns_zonefiles)
+    # If a DS record is set at the registrar, check DNSSEC first because it will affect the NS query.
+    # If it is not set, we suggest it last.
+    if query_dns(domain, "DS", nxdomain=None) is not None:
+        check_dnssec(domain, env, output, dns_zonefiles)
+
+    # We provide a DNS zone for the domain. It should have NS records set up
+    # at the domain name's registrar pointing to this box. The secondary DNS
+    # server may be customized. Unfortunately this may not check the domain's
+    # whois information -- we may be getting the NS records from us rather than
+    # the TLD, and so we're not actually checking the TLD. For that we'd need
+    # to do a DNS trace.
+    custom_dns = get_custom_dns_config(env)
+    existing_ns = query_dns(domain, "NS")
+    correct_ns = "; ".join(sorted([
+        "ns1." + env['PRIMARY_HOSTNAME'],
+        custom_dns.get("_secondary_nameserver", "ns2." + env['PRIMARY_HOSTNAME']),
+        ]))
+    if existing_ns.lower() == correct_ns.lower():
+        output.print_ok("Nameservers are set correctly at registrar. [%s]" % correct_ns)
+    else:
+        output.print_error("""The nameservers set on this domain are incorrect. They are currently %s. Use your domain name registrar's
+            control panel to set the nameservers to %s."""
+                           % (existing_ns, correct_ns))
 
-	# We provide a DNS zone for the domain. It should have NS records set up
-	# at the domain name's registrar pointing to this box. The secondary DNS
-	# server may be customized. Unfortunately this may not check the domain's
-	# whois information -- we may be getting the NS records from us rather than
-	# the TLD, and so we're not actually checking the TLD. For that we'd need
-	# to do a DNS trace.
-	custom_dns = get_custom_dns_config(env)
-	existing_ns = query_dns(domain, "NS")
-	correct_ns = "; ".join(sorted([
-		"ns1." + env['PRIMARY_HOSTNAME'],
-		custom_dns.get("_secondary_nameserver", "ns2." + env['PRIMARY_HOSTNAME']),
-		]))
-	if existing_ns.lower() == correct_ns.lower():
-		output.print_ok("Nameservers are set correctly at registrar. [%s]" % correct_ns)
-	else:
-		output.print_error("""The nameservers set on this domain are incorrect. They are currently %s. Use your domain name registrar's
-			control panel to set the nameservers to %s."""
-				% (existing_ns, correct_ns) )
 
 def check_dns_zone_suggestions(domain, env, output, dns_zonefiles):
-	# Since DNSSEC is optional, if a DS record is NOT set at the registrar suggest it.
-	# (If it was set, we did the check earlier.)
-	if query_dns(domain, "DS", nxdomain=None) is None:
-		check_dnssec(domain, env, output, dns_zonefiles)
+    # Since DNSSEC is optional, if a DS record is NOT set at the registrar suggest it.
+    # (If it was set, we did the check earlier.)
+    if query_dns(domain, "DS", nxdomain=None) is None:
+        check_dnssec(domain, env, output, dns_zonefiles)
 
 
 def check_dnssec(domain, env, output, dns_zonefiles, is_checking_primary=False):
-	# See if the domain has a DS record set at the registrar. The DS record may have
-	# several forms. We have to be prepared to check for any valid record. We've
-	# pre-generated all of the valid digests --- read them in.
-	ds_correct = open('/etc/nsd/zones/' + dns_zonefiles[domain] + '.ds').read().strip().split("\n")
-	digests = { }
-	for rr_ds in ds_correct:
-		ds_keytag, ds_alg, ds_digalg, ds_digest = rr_ds.split("\t")[4].split(" ")
-		digests[ds_digalg] = ds_digest
+    # See if the domain has a DS record set at the registrar. The DS record may have
+    # several forms. We have to be prepared to check for any valid record. We've
+    # pre-generated all of the valid digests --- read them in.
+    ds_correct = open('/etc/nsd/zones/' + dns_zonefiles[domain] + '.ds').read().strip().split("\n")
+    digests = {}
+    for rr_ds in ds_correct:
+        ds_keytag, ds_alg, ds_digalg, ds_digest = rr_ds.split("\t")[4].split(" ")
+        digests[ds_digalg] = ds_digest
 
-	# Some registrars may want the public key so they can compute the digest. The DS
-	# record that we suggest using is for the KSK (and that's how the DS records were generated).
-	alg_name_map = { '7': 'RSASHA1-NSEC3-SHA1', '8': 'RSASHA256' }
-	dnssec_keys = load_env_vars_from_file(os.path.join(env['STORAGE_ROOT'], 'dns/dnssec/%s.conf' % alg_name_map[ds_alg]))
-	dnsssec_pubkey = open(os.path.join(env['STORAGE_ROOT'], 'dns/dnssec/' + dnssec_keys['KSK'] + '.key')).read().split("\t")[3].split(" ")[3]
+    # Some registrars may want the public key so they can compute the digest. The DS
+    # record that we suggest using is for the KSK (and that's how the DS records were generated).
+    alg_name_map = {'7': 'RSASHA1-NSEC3-SHA1', '8': 'RSASHA256'}
+    dnssec_keys = load_env_vars_from_file(os.path.join(env['STORAGE_ROOT'], 'dns/dnssec/%s.conf' % alg_name_map[ds_alg]))
+    dnsssec_pubkey = open(os.path.join(env['STORAGE_ROOT'], 'dns/dnssec/' + dnssec_keys['KSK'] + '.key')).read().split("\t")[3].split(" ")[3]
+
+    # Query public DNS for the DS record at the registrar.
+    ds = query_dns(domain, "DS", nxdomain=None)
+    ds_looks_valid = ds and len(ds.split(" ")) == 4
+    if ds_looks_valid:
+        ds = ds.split(" ")
+    if ds_looks_valid and ds[0] == ds_keytag and ds[1] == ds_alg and ds[3] == digests.get(ds[2]):
+        if is_checking_primary:
+            return
+        output.print_ok("DNSSEC 'DS' record is set correctly at registrar.")
+    else:
+        if ds is None:
+            if is_checking_primary:
+                return
+            output.print_error("""This domain's DNSSEC DS record is not set. The DS record is optional. The DS record activates DNSSEC.
+                To set a DS record, you must follow the instructions provided by your domain name registrar and provide to them this information:""")
+        else:
+            if is_checking_primary:
+                output.print_error("""The DNSSEC 'DS' record for %s is incorrect. See further details below.""" % domain)
+                return
+            output.print_error("""This domain's DNSSEC DS record is incorrect. The chain of trust is broken between the public DNS system
+                and this machine's DNS server. It may take several hours for public DNS to update after a change. If you did not recently
+                make a change, you must resolve this immediately by following the instructions provided by your domain name registrar and
+                provide to them this information:""")
+        output.print_line("")
+        output.print_line("Key Tag: " + ds_keytag + ("" if not ds_looks_valid or ds[0] == ds_keytag else " (Got '%s')" % ds[0]))
+        output.print_line("Key Flags: KSK")
+        output.print_line(
+            ("Algorithm: %s / %s" % (ds_alg, alg_name_map[ds_alg])) +
+            ("" if not ds_looks_valid or ds[1] == ds_alg else " (Got '%s')" % ds[1]))
+            # see http://www.iana.org/assignments/dns-sec-alg-numbers/dns-sec-alg-numbers.xhtml
+        output.print_line("Digest Type: 2 / SHA-256")
+            # http://www.ietf.org/assignments/ds-rr-types/ds-rr-types.xml
+        output.print_line("Digest: " + digests['2'])
+        if ds_looks_valid and ds[3] != digests.get(ds[2]):
+            output.print_line("(Got digest type %s and digest %s which do not match.)" % (ds[2], ds[3]))
+        output.print_line("Public Key: ")
+        output.print_line(dnsssec_pubkey, monospace=True)
+        output.print_line("")
+        output.print_line("Bulk/Record Format:")
+        output.print_line("" + ds_correct[0])
+        output.print_line("")
 
-	# Query public DNS for the DS record at the registrar.
-	ds = query_dns(domain, "DS", nxdomain=None)
-	ds_looks_valid = ds and len(ds.split(" ")) == 4
-	if ds_looks_valid: ds = ds.split(" ")
-	if ds_looks_valid and ds[0] == ds_keytag and ds[1] == ds_alg and ds[3] == digests.get(ds[2]):
-		if is_checking_primary: return
-		output.print_ok("DNSSEC 'DS' record is set correctly at registrar.")
-	else:
-		if ds == None:
-			if is_checking_primary: return
-			output.print_error("""This domain's DNSSEC DS record is not set. The DS record is optional. The DS record activates DNSSEC.
-				To set a DS record, you must follow the instructions provided by your domain name registrar and provide to them this information:""")
-		else:
-			if is_checking_primary:
-				output.print_error("""The DNSSEC 'DS' record for %s is incorrect. See further details below.""" % domain)
-				return
-			output.print_error("""This domain's DNSSEC DS record is incorrect. The chain of trust is broken between the public DNS system
-				and this machine's DNS server. It may take several hours for public DNS to update after a change. If you did not recently
-				make a change, you must resolve this immediately by following the instructions provided by your domain name registrar and
-				provide to them this information:""")
-		output.print_line("")
-		output.print_line("Key Tag: " + ds_keytag + ("" if not ds_looks_valid or ds[0] == ds_keytag else " (Got '%s')" % ds[0]))
-		output.print_line("Key Flags: KSK")
-		output.print_line(
-			  ("Algorithm: %s / %s" % (ds_alg, alg_name_map[ds_alg]))
-			+ ("" if not ds_looks_valid or ds[1] == ds_alg else " (Got '%s')" % ds[1]))
-			# see http://www.iana.org/assignments/dns-sec-alg-numbers/dns-sec-alg-numbers.xhtml
-		output.print_line("Digest Type: 2 / SHA-256")
-			# http://www.ietf.org/assignments/ds-rr-types/ds-rr-types.xml
-		output.print_line("Digest: " + digests['2'])
-		if ds_looks_valid and ds[3] != digests.get(ds[2]):
-			output.print_line("(Got digest type %s and digest %s which do not match.)" % (ds[2], ds[3]))
-		output.print_line("Public Key: ")
-		output.print_line(dnsssec_pubkey, monospace=True)
-		output.print_line("")
-		output.print_line("Bulk/Record Format:")
-		output.print_line("" + ds_correct[0])
-		output.print_line("")
 
 def check_mail_domain(domain, env, output):
-	# Check the MX record.
+    # Check the MX record.
 
-	mx = query_dns(domain, "MX", nxdomain=None)
-	expected_mx = "10 " + env['PRIMARY_HOSTNAME']
+    mx = query_dns(domain, "MX", nxdomain=None)
+    expected_mx = "10 " + env['PRIMARY_HOSTNAME']
 
-	if mx == expected_mx:
-		output.print_ok("Domain's email is directed to this domain. [%s => %s]" % (domain, mx))
+    if mx == expected_mx:
+        output.print_ok("Domain's email is directed to this domain. [%s => %s]" % (domain, mx))
 
-	elif mx == None:
-		# A missing MX record is okay on the primary hostname because
-		# the primary hostname's A record (the MX fallback) is... itself,
-		# which is what we want the MX to be.
-		if domain == env['PRIMARY_HOSTNAME']:
-			output.print_ok("Domain's email is directed to this domain. [%s has no MX record, which is ok]" % (domain,))
+    elif mx is None:
+        # A missing MX record is okay on the primary hostname because
+        # the primary hostname's A record (the MX fallback) is... itself,
+        # which is what we want the MX to be.
+        if domain == env['PRIMARY_HOSTNAME']:
+            output.print_ok("Domain's email is directed to this domain. [%s has no MX record, which is ok]" % (domain,))
 
-		# And a missing MX record is okay on other domains if the A record
-		# matches the A record of the PRIMARY_HOSTNAME. Actually this will
-		# probably confuse DANE TLSA, but we'll let that slide for now.
-		else:
-			domain_a = query_dns(domain, "A", nxdomain=None)
-			primary_a = query_dns(env['PRIMARY_HOSTNAME'], "A", nxdomain=None)
-			if domain_a != None and domain_a == primary_a:
-				output.print_ok("Domain's email is directed to this domain. [%s has no MX record but its A record is OK]" % (domain,))
-			else:
-				output.print_error("""This domain's DNS MX record is not set. It should be '%s'. Mail will not
-					be delivered to this box. It may take several hours for public DNS to update after a
-					change. This problem may result from other issues listed here.""" % (expected_mx,))
+        # And a missing MX record is okay on other domains if the A record
+        # matches the A record of the PRIMARY_HOSTNAME. Actually this will
+        # probably confuse DANE TLSA, but we'll let that slide for now.
+        else:
+            domain_a = query_dns(domain, "A", nxdomain=None)
+            primary_a = query_dns(env['PRIMARY_HOSTNAME'], "A", nxdomain=None)
+            if domain_a is not None and domain_a == primary_a:
+                output.print_ok("Domain's email is directed to this domain. [%s has no MX record but its A record is OK]" % (domain,))
+            else:
+                output.print_error("""This domain's DNS MX record is not set. It should be '%s'. Mail will not
+                    be delivered to this box. It may take several hours for public DNS to update after a
+                    change. This problem may result from other issues listed here.""" % (expected_mx,))
 
-	else:
-		output.print_error("""This domain's DNS MX record is incorrect. It is currently set to '%s' but should be '%s'. Mail will not
-			be delivered to this box. It may take several hours for public DNS to update after a change. This problem may result from
-			other issues listed here.""" % (mx, expected_mx))
+    else:
+        output.print_error("""This domain's DNS MX record is incorrect. It is currently set to '%s' but should be '%s'. Mail will not
+            be delivered to this box. It may take several hours for public DNS to update after a change. This problem may result from
+            other issues listed here.""" % (mx, expected_mx))
 
-	# Check that the postmaster@ email address exists. Not required if the domain has a
-	# catch-all address or domain alias.
-	if "@" + domain not in dict(get_mail_aliases(env)):
-		check_alias_exists("postmaster@" + domain, env, output)
+    # Check that the postmaster@ email address exists. Not required if the domain has a
+    # catch-all address or domain alias.
+    if "@" + domain not in dict(get_mail_aliases(env)):
+        check_alias_exists("postmaster@" + domain, env, output)
+
+    # Stop if the domain is listed in the Spamhaus Domain Block List.
+    # The user might have chosen a domain that was previously in use by a spammer
+    # and will not be able to reliably send mail.
+    dbl = query_dns(domain+'.dbl.spamhaus.org', "A", nxdomain=None)
+    if dbl is None:
+        output.print_ok("Domain is not blacklisted by dbl.spamhaus.org.")
+    else:
+        output.print_error("""This domain is listed in the Spamhaus Domain Block List (code %s),
+            which may prevent recipients from receiving your mail.
+            See http://www.spamhaus.org/dbl/ and http://www.spamhaus.org/query/domain/%s.""" % (dbl, domain))
 
-	# Stop if the domain is listed in the Spamhaus Domain Block List.
-	# The user might have chosen a domain that was previously in use by a spammer
-	# and will not be able to reliably send mail.
-	dbl = query_dns(domain+'.dbl.spamhaus.org', "A", nxdomain=None)
-	if dbl is None:
-		output.print_ok("Domain is not blacklisted by dbl.spamhaus.org.")
-	else:
-		output.print_error("""This domain is listed in the Spamhaus Domain Block List (code %s),
-			which may prevent recipients from receiving your mail.
-			See http://www.spamhaus.org/dbl/ and http://www.spamhaus.org/query/domain/%s.""" % (dbl, domain))
 
 def check_web_domain(domain, env, output):
-	# See if the domain's A record resolves to our PUBLIC_IP. This is already checked
-	# for PRIMARY_HOSTNAME, for which it is required for mail specifically. For it and
-	# other domains, it is required to access its website.
-	if domain != env['PRIMARY_HOSTNAME']:
-		ip = query_dns(domain, "A")
-		if ip == env['PUBLIC_IP']:
-			output.print_ok("Domain resolves to this box's IP address. [%s => %s]" % (domain, env['PUBLIC_IP']))
-		else:
-			output.print_error("""This domain should resolve to your box's IP address (%s) if you would like the box to serve
-				webmail or a website on this domain. The domain currently resolves to %s in public DNS. It may take several hours for
-				public DNS to update after a change. This problem may result from other issues listed here.""" % (env['PUBLIC_IP'], ip))
+    # See if the domain's A record resolves to our PUBLIC_IP. This is already checked
+    # for PRIMARY_HOSTNAME, for which it is required for mail specifically. For it and
+    # other domains, it is required to access its website.
+    if domain != env['PRIMARY_HOSTNAME']:
+        ip = query_dns(domain, "A")
+        if ip == env['PUBLIC_IP']:
+            output.print_ok("Domain resolves to this box's IP address. [%s => %s]" % (domain, env['PUBLIC_IP']))
+        else:
+            output.print_error("""This domain should resolve to your box's IP address (%s) if you would like the box to serve
+                webmail or a website on this domain. The domain currently resolves to %s in public DNS. It may take several hours for
+                public DNS to update after a change. This problem may result from other issues listed here.""" % (env['PUBLIC_IP'], ip))
+
+    # We need a SSL certificate for PRIMARY_HOSTNAME because that's where the
+    # user will log in with IMAP or webmail. Any other domain we serve a
+    # website for also needs a signed certificate.
+    check_ssl_cert(domain, env, output)
 
-	# We need a SSL certificate for PRIMARY_HOSTNAME because that's where the
-	# user will log in with IMAP or webmail. Any other domain we serve a
-	# website for also needs a signed certificate.
-	check_ssl_cert(domain, env, output)
 
 def query_dns(qname, rtype, nxdomain='[Not Set]'):
-	# Make the qname absolute by appending a period. Without this, dns.resolver.query
-	# will fall back a failed lookup to a second query with this machine's hostname
-	# appended. This has been causing some false-positive Spamhaus reports. The
-	# reverse DNS lookup will pass a dns.name.Name instance which is already
-	# absolute so we should not modify that.
-	if isinstance(qname, str):
-		qname += "."
+    # Make the qname absolute by appending a period. Without this, dns.resolver.query
+    # will fall back a failed lookup to a second query with this machine's hostname
+    # appended. This has been causing some false-positive Spamhaus reports. The
+    # reverse DNS lookup will pass a dns.name.Name instance which is already
+    # absolute so we should not modify that.
+    if isinstance(qname, str):
+        qname += "."
 
-	# Do the query.
-	try:
-		response = dns.resolver.query(qname, rtype)
-	except (dns.resolver.NoNameservers, dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
-		# Host did not have an answer for this query; not sure what the
-		# difference is between the two exceptions.
-		return nxdomain
-	except dns.exception.Timeout:
-		return "[timeout]"
+    # Do the query.
+    try:
+        response = dns.resolver.query(qname, rtype)
+    except (dns.resolver.NoNameservers, dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
+        # Host did not have an answer for this query; not sure what the
+        # difference is between the two exceptions.
+        return nxdomain
+    except dns.exception.Timeout:
+        return "[timeout]"
+
+    # There may be multiple answers; concatenate the response. Remove trailing
+    # periods from responses since that's how qnames are encoded in DNS but is
+    # confusing for us. The order of the answers doesn't matter, so sort so we
+    # can compare to a well known order.
+    return "; ".join(sorted(str(r).rstrip('.') for r in response))
 
-	# There may be multiple answers; concatenate the response. Remove trailing
-	# periods from responses since that's how qnames are encoded in DNS but is
-	# confusing for us. The order of the answers doesn't matter, so sort so we
-	# can compare to a well known order.
-	return "; ".join(sorted(str(r).rstrip('.') for r in response))
 
 def check_ssl_cert(domain, env, output):
-	# Check that SSL certificate is signed.
+    # Check that SSL certificate is signed.
 
-	# Skip the check if the A record is not pointed here.
-	if query_dns(domain, "A", None) not in (env['PUBLIC_IP'], None): return
+    # Skip the check if the A record is not pointed here.
+    if query_dns(domain, "A", None) not in (env['PUBLIC_IP'], None):
+        return
 
-	# Where is the SSL stored?
-	ssl_key, ssl_certificate, ssl_via = get_domain_ssl_files(domain, env)
+    # Where is the SSL stored?
+    ssl_key, ssl_certificate, ssl_via = get_domain_ssl_files(domain, env)
 
-	if not os.path.exists(ssl_certificate):
-		output.print_error("The SSL certificate file for this domain is missing.")
-		return
+    if not os.path.exists(ssl_certificate):
+        output.print_error("The SSL certificate file for this domain is missing.")
+        return
 
-	# Check that the certificate is good.
+    # Check that the certificate is good.
 
-	cert_status, cert_status_details = check_certificate(domain, ssl_certificate, ssl_key)
+    cert_status, cert_status_details = check_certificate(domain, ssl_certificate, ssl_key)
 
-	if cert_status == "OK":
-		# The certificate is ok. The details has expiry info.
-		output.print_ok("SSL certificate is signed & valid. %s %s" % (ssl_via if ssl_via else "", cert_status_details))
+    if cert_status == "OK":
+        # The certificate is ok. The details has expiry info.
+        output.print_ok("SSL certificate is signed & valid. %s %s" % (ssl_via if ssl_via else "", cert_status_details))
 
-	elif cert_status == "SELF-SIGNED":
-		# Offer instructions for purchasing a signed certificate.
+    elif cert_status == "SELF-SIGNED":
+        # Offer instructions for purchasing a signed certificate.
 
-		fingerprint = shell('check_output', [
-			"openssl",
-			"x509",
-			"-in", ssl_certificate,
-			"-noout",
-			"-fingerprint"
-			])
-		fingerprint = re.sub(".*Fingerprint=", "", fingerprint).strip()
+        fingerprint = shell('check_output', [
+            "openssl",
+            "x509",
+            "-in", ssl_certificate,
+            "-noout",
+            "-fingerprint"
+            ])
+        fingerprint = re.sub(".*Fingerprint=", "", fingerprint).strip()
 
-		if domain == env['PRIMARY_HOSTNAME']:
-			output.print_error("""The SSL certificate for this domain is currently self-signed. You will get a security
-			warning when you check or send email and when visiting this domain in a web browser (for webmail or
-			static site hosting). Use the SSL Certificates page in this control panel to install a signed SSL certificate.
-			You may choose to leave the self-signed certificate in place and confirm the security exception, but check that
-			the certificate fingerprint matches the following:""")
-			output.print_line("")
-			output.print_line("   " + fingerprint, monospace=True)
-		else:
-			output.print_warning("""The SSL certificate for this domain is currently self-signed. Visitors to a website on
-			this domain will get a security warning. If you are not serving a website on this domain, then it is
-			safe to leave the self-signed certificate in place. Use the SSL Certificates page in this control panel to
-			install a signed SSL certificate.""")
+        if domain == env['PRIMARY_HOSTNAME']:
+            output.print_error("""The SSL certificate for this domain is currently self-signed. You will get a security
+            warning when you check or send email and when visiting this domain in a web browser (for webmail or
+            static site hosting). Use the SSL Certificates page in this control panel to install a signed SSL certificate.
+            You may choose to leave the self-signed certificate in place and confirm the security exception, but check that
+            the certificate fingerprint matches the following:""")
+            output.print_line("")
+            output.print_line("   " + fingerprint, monospace=True)
+        else:
+            output.print_warning("""The SSL certificate for this domain is currently self-signed. Visitors to a website on
+            this domain will get a security warning. If you are not serving a website on this domain, then it is
+            safe to leave the self-signed certificate in place. Use the SSL Certificates page in this control panel to
+            install a signed SSL certificate.""")
+
+    else:
+        output.print_error("The SSL certificate has a problem: " + cert_status)
+        if cert_status_details:
+            output.print_line("")
+            output.print_line(cert_status_details)
+            output.print_line("")
 
-	else:
-		output.print_error("The SSL certificate has a problem: " + cert_status)
-		if cert_status_details:
-			output.print_line("")
-			output.print_line(cert_status_details)
-			output.print_line("")
 
 def check_certificate(domain, ssl_certificate, ssl_private_key):
-	# Use openssl verify to check the status of a certificate.
+    # Use openssl verify to check the status of a certificate.
 
-	# First check that the certificate is for the right domain. The domain
-	# must be found in the Subject Common Name (CN) or be one of the
-	# Subject Alternative Names. A wildcard might also appear as the CN
-	# or in the SAN list, so check for that tool.
-	retcode, cert_dump = shell('check_output', [
-		"openssl", "x509",
-		"-in", ssl_certificate,
-		"-noout", "-text", "-nameopt", "rfc2253",
-		], trap=True)
+    # First check that the certificate is for the right domain. The domain
+    # must be found in the Subject Common Name (CN) or be one of the
+    # Subject Alternative Names. A wildcard might also appear as the CN
+    # or in the SAN list, so check for that tool.
+    retcode, cert_dump = shell('check_output', [
+        "openssl", "x509",
+        "-in", ssl_certificate,
+        "-noout", "-text", "-nameopt", "rfc2253",
+        ], trap=True)
 
-	# If the certificate is catastrophically bad, catch that now and report it.
-	# More information was probably written to stderr (which we aren't capturing),
-	# but it is probably not helpful to the user anyway.
-	if retcode != 0:
-		return ("The SSL certificate appears to be corrupted or not a PEM-formatted SSL certificate file. (%s)" % ssl_certificate, None)
+    # If the certificate is catastrophically bad, catch that now and report it.
+    # More information was probably written to stderr (which we aren't capturing),
+    # but it is probably not helpful to the user anyway.
+    if retcode != 0:
+        return ("The SSL certificate appears to be corrupted or not a PEM-formatted SSL certificate file. (%s)" % ssl_certificate, None)
 
-	cert_dump = cert_dump.split("\n")
-	certificate_names = set()
-	cert_expiration_date = None
-	while len(cert_dump) > 0:
-		line = cert_dump.pop(0)
+    cert_dump = cert_dump.split("\n")
+    certificate_names = set()
+    cert_expiration_date = None
+    while len(cert_dump) > 0:
+        line = cert_dump.pop(0)
 
-		# Grab from the Subject Common Name. We include the indentation
-		# at the start of the line in case maybe the cert includes the
-		# common name of some other referenced entity (which would be
-		# indented, I hope).
-		m = re.match("        Subject: CN=([^,]+)", line)
-		if m:
-			certificate_names.add(m.group(1))
-	
-		# Grab from the Subject Alternative Name, which is a comma-delim
-		# list of names, like DNS:mydomain.com, DNS:otherdomain.com.
-		m = re.match("            X509v3 Subject Alternative Name:", line)
-		if m:
-			names = re.split(",\s*", cert_dump.pop(0).strip())
-			for n in names:
-				m = re.match("DNS:(.*)", n)
-				if m:
-					certificate_names.add(m.group(1))
+        # Grab from the Subject Common Name. We include the indentation
+        # at the start of the line in case maybe the cert includes the
+        # common name of some other referenced entity (which would be
+        # indented, I hope).
+        m = re.match("        Subject: CN=([^,]+)", line)
+        if m:
+            certificate_names.add(m.group(1))
 
-		m = re.match("            Not After : (.*)", line)
-		if m:
-			cert_expiration_date = dateutil.parser.parse(m.group(1))
+        # Grab from the Subject Alternative Name, which is a comma-delim
+        # list of names, like DNS:mydomain.com, DNS:otherdomain.com.
+        m = re.match("            X509v3 Subject Alternative Name:", line)
+        if m:
+            names = re.split(",\s*", cert_dump.pop(0).strip())
+            for n in names:
+                m = re.match("DNS:(.*)", n)
+                if m:
+                    certificate_names.add(m.group(1))
 
-	domain = domain.encode("idna").decode("ascii")
-	wildcard_domain = re.sub("^[^\.]+", "*", domain)
-	if domain is not None and domain not in certificate_names and wildcard_domain not in certificate_names:
-		return ("The certificate is for the wrong domain name. It is for %s."
-			% ", ".join(sorted(certificate_names)), None)
+        m = re.match("            Not After : (.*)", line)
+        if m:
+            cert_expiration_date = dateutil.parser.parse(m.group(1))
 
-	# Second, check that the certificate matches the private key. Get the modulus of the
-	# private key and of the public key in the certificate. They should match. The output
-	# of each command looks like "Modulus=XXXXX".
-	if ssl_private_key is not None:
-		private_key_modulus = shell('check_output', [
-			"openssl", "rsa",
-			"-inform", "PEM",
-			"-noout", "-modulus",
-			"-in", ssl_private_key])
-		cert_key_modulus = shell('check_output', [
-			"openssl", "x509",
-			"-in", ssl_certificate,
-			"-noout", "-modulus"])
-		if private_key_modulus != cert_key_modulus:
-			return ("The certificate installed at %s does not correspond to the private key at %s." % (ssl_certificate, ssl_private_key), None)
+    domain = domain.encode("idna").decode("ascii")
+    wildcard_domain = re.sub("^[^\.]+", "*", domain)
+    if domain is not None and domain not in certificate_names and wildcard_domain not in certificate_names:
+        return ("The certificate is for the wrong domain name. It is for %s."
+                % ", ".join(sorted(certificate_names)), None)
 
-	# Next validate that the certificate is valid. This checks whether the certificate
-	# is self-signed, that the chain of trust makes sense, that it is signed by a CA
-	# that Ubuntu has installed on this machine's list of CAs, and I think that it hasn't
-	# expired.
+    # Second, check that the certificate matches the private key. Get the modulus of the
+    # private key and of the public key in the certificate. They should match. The output
+    # of each command looks like "Modulus=XXXXX".
+    if ssl_private_key is not None:
+        private_key_modulus = shell('check_output', [
+            "openssl", "rsa",
+            "-inform", "PEM",
+            "-noout", "-modulus",
+            "-in", ssl_private_key])
+        cert_key_modulus = shell('check_output', [
+            "openssl", "x509",
+            "-in", ssl_certificate,
+            "-noout", "-modulus"])
+        if private_key_modulus != cert_key_modulus:
+            return ("The certificate installed at %s does not correspond to the private key at %s." % (ssl_certificate, ssl_private_key), None)
 
-	# In order to verify with openssl, we need to split out any
-	# intermediary certificates in the chain (if any) from our
-	# certificate (at the top). They need to be passed separately.
+    # Next validate that the certificate is valid. This checks whether the certificate
+    # is self-signed, that the chain of trust makes sense, that it is signed by a CA
+    # that Ubuntu has installed on this machine's list of CAs, and I think that it hasn't
+    # expired.
 
-	cert = open(ssl_certificate).read()
-	m = re.match(r'(-*BEGIN CERTIFICATE-*.*?-*END CERTIFICATE-*)(.*)', cert, re.S)
-	if m == None:
-		return ("The certificate file is an invalid PEM certificate.", None)
-	mycert, chaincerts = m.groups()
+    # In order to verify with openssl, we need to split out any
+    # intermediary certificates in the chain (if any) from our
+    # certificate (at the top). They need to be passed separately.
 
-	# This command returns a non-zero exit status in most cases, so trap errors.
+    cert = open(ssl_certificate).read()
+    m = re.match(r'(-*BEGIN CERTIFICATE-*.*?-*END CERTIFICATE-*)(.*)', cert, re.S)
+    if m is None:
+        return ("The certificate file is an invalid PEM certificate.", None)
+    mycert, chaincerts = m.groups()
 
-	retcode, verifyoutput = shell('check_output', [
-		"openssl",
-		"verify", "-verbose",
-		"-purpose", "sslserver", "-policy_check",]
-		+ ([] if chaincerts.strip() == "" else ["-untrusted", "/dev/stdin"])
-		+ [ssl_certificate],
-		input=chaincerts.encode('ascii'),
-		trap=True)
+    # This command returns a non-zero exit status in most cases, so trap errors.
 
-	if "self signed" in verifyoutput:
-		# Certificate is self-signed.
-		return ("SELF-SIGNED", None)
-	elif retcode != 0:
-		if "unable to get local issuer certificate" in verifyoutput:
-			return ("The certificate is missing an intermediate chain or the intermediate chain is incorrect or incomplete. (%s)" % verifyoutput, None)
+    retcode, verifyoutput = shell('check_output', [
+        "openssl",
+        "verify", "-verbose",
+        "-purpose", "sslserver", "-policy_check", ]
+        + ([] if chaincerts.strip() == "" else ["-untrusted", "/dev/stdin"])
+        + [ssl_certificate],
+        input=chaincerts.encode('ascii'),
+        trap=True)
 
-		# There is some unknown problem. Return the `openssl verify` raw output.
-		return ("There is a problem with the SSL certificate.", verifyoutput.strip())
-	else:
-		# `openssl verify` returned a zero exit status so the cert is currently
-		# good.
+    if "self signed" in verifyoutput:
+        # Certificate is self-signed.
+        return ("SELF-SIGNED", None)
+    elif retcode != 0:
+        if "unable to get local issuer certificate" in verifyoutput:
+            return ("The certificate is missing an intermediate chain or the intermediate chain is incorrect or incomplete. (%s)" % verifyoutput, None)
 
-		# But is it expiring soon?
-		now = datetime.datetime.now(dateutil.tz.tzlocal())
-		ndays = (cert_expiration_date-now).days
-		expiry_info = "The certificate expires in %d days on %s." % (ndays, cert_expiration_date.strftime("%x"))
-		if ndays <= 31:
-			return ("The certificate is expiring soon: " + expiry_info, None)
+        # There is some unknown problem. Return the `openssl verify` raw output.
+        return ("There is a problem with the SSL certificate.", verifyoutput.strip())
+    else:
+        # `openssl verify` returned a zero exit status so the cert is currently
+        # good.
 
-		# Return the special OK code.
-		return ("OK", expiry_info)
+        # But is it expiring soon?
+        now = datetime.datetime.now(dateutil.tz.tzlocal())
+        ndays = (cert_expiration_date-now).days
+        expiry_info = "The certificate expires in %d days on %s." % (ndays, cert_expiration_date.strftime("%x"))
+        if ndays <= 31:
+            return ("The certificate is expiring soon: " + expiry_info, None)
+
+        # Return the special OK code.
+        return ("OK", expiry_info)
 
 _apt_updates = None
+
+
 def list_apt_updates(apt_update=True):
-	# See if we have this information cached recently.
-	# Keep the information for 8 hours.
-	global _apt_updates
-	if _apt_updates is not None and _apt_updates[0] > datetime.datetime.now() - datetime.timedelta(hours=8):
-		return _apt_updates[1]
+    # See if we have this information cached recently.
+    # Keep the information for 8 hours.
+    global _apt_updates
+    if _apt_updates is not None and _apt_updates[0] > datetime.datetime.now() - datetime.timedelta(hours=8):
+        return _apt_updates[1]
 
-	# Run apt-get update to refresh package list. This should be running daily
-	# anyway, so on the status checks page don't do this because it is slow.
-	if apt_update:
-		shell("check_call", ["/usr/bin/apt-get", "-qq", "update"])
+    # Run apt-get update to refresh package list. This should be running daily
+    # anyway, so on the status checks page don't do this because it is slow.
+    if apt_update:
+        shell("check_call", ["/usr/bin/apt-get", "-qq", "update"])
 
-	# Run apt-get upgrade in simulate mode to get a list of what
-	# it would do.
-	simulated_install = shell("check_output", ["/usr/bin/apt-get", "-qq", "-s", "upgrade"])
-	pkgs = []
-	for line in simulated_install.split('\n'):
-		if line.strip() == "":
-			continue
-		if re.match(r'^Conf .*', line):
-			 # remove these lines, not informative
-			continue
-		m = re.match(r'^Inst (.*) \[(.*)\] \((\S*)', line)
-		if m:
-			pkgs.append({ "package": m.group(1), "version": m.group(3), "current_version": m.group(2) })
-		else:
-			pkgs.append({ "package": "[" + line + "]", "version": "", "current_version": "" })
+    # Run apt-get upgrade in simulate mode to get a list of what
+    # it would do.
+    simulated_install = shell("check_output", ["/usr/bin/apt-get", "-qq", "-s", "upgrade"])
+    pkgs = []
+    for line in simulated_install.split('\n'):
+        if line.strip() == "":
+            continue
+        if re.match(r'^Conf .*', line):
+             # remove these lines, not informative
+            continue
+        m = re.match(r'^Inst (.*) \[(.*)\] \((\S*)', line)
+        if m:
+            pkgs.append({"package": m.group(1), "version": m.group(3), "current_version": m.group(2)})
+        else:
+            pkgs.append({"package": "[" + line + "]", "version": "", "current_version": ""})
 
-	# Cache for future requests.
-	_apt_updates = (datetime.datetime.now(), pkgs)
+    # Cache for future requests.
+    _apt_updates = (datetime.datetime.now(), pkgs)
 
-	return pkgs
+    return pkgs
 
 
 class ConsoleOutput:
-	try:
-		terminal_columns = int(shell('check_output', ['stty', 'size']).split()[1])
-	except:
-		terminal_columns = 76
+    try:
+        terminal_columns = int(shell('check_output', ['stty', 'size']).split()[1])
+    except:
+        terminal_columns = 76
 
-	def add_heading(self, heading):
-		print()
-		print(heading)
-		print("=" * len(heading))
+    def add_heading(self, heading):
+        print()
+        print(heading)
+        print("=" * len(heading))
 
-	def print_ok(self, message):
-		self.print_block(message, first_line="✓  ")
+    def print_ok(self, message):
+        self.print_block(message, first_line="✓  ")
 
-	def print_error(self, message):
-		self.print_block(message, first_line="✖  ")
+    def print_error(self, message):
+        self.print_block(message, first_line="✖  ")
 
-	def print_warning(self, message):
-		self.print_block(message, first_line="?  ")
+    def print_warning(self, message):
+        self.print_block(message, first_line="?  ")
 
-	def print_block(self, message, first_line="   "):
-		print(first_line, end='')
-		message = re.sub("\n\s*", " ", message)
-		words = re.split("(\s+)", message)
-		linelen = 0
-		for w in words:
-			if linelen + len(w) > self.terminal_columns-1-len(first_line):
-				print()
-				print("   ", end="")
-				linelen = 0
-			if linelen == 0 and w.strip() == "": continue
-			print(w, end="")
-			linelen += len(w)
-		print()
+    def print_block(self, message, first_line="   "):
+        print(first_line, end='')
+        message = re.sub("\n\s*", " ", message)
+        words = re.split("(\s+)", message)
+        linelen = 0
+        for w in words:
+            if linelen + len(w) > self.terminal_columns-1-len(first_line):
+                print()
+                print("   ", end="")
+                linelen = 0
+            if linelen == 0 and w.strip() == "":
+                continue
+            print(w, end="")
+            linelen += len(w)
+        print()
+
+    def print_line(self, message, monospace=False):
+        for line in message.split("\n"):
+            self.print_block(line)
 
-	def print_line(self, message, monospace=False):
-		for line in message.split("\n"):
-			self.print_block(line)
 
 class BufferedOutput:
-	# Record all of the instance method calls so we can play them back later.
-	def __init__(self):
-		self.buf = []
-	def __getattr__(self, attr):
-		if attr not in ("add_heading", "print_ok", "print_error", "print_warning", "print_block", "print_line"):
-			raise AttributeError
-		# Return a function that just records the call & arguments to our buffer.
-		def w(*args, **kwargs):
-			self.buf.append((attr, args, kwargs))
-		return w
-	def playback(self, output):
-		for attr, args, kwargs in self.buf:
-			getattr(output, attr)(*args, **kwargs)
+    # Record all of the instance method calls so we can play them back later.
+    def __init__(self):
+        self.buf = []
+
+    def __getattr__(self, attr):
+        if attr not in ("add_heading", "print_ok", "print_error", "print_warning", "print_block", "print_line"):
+            raise AttributeError
+        # Return a function that just records the call & arguments to our buffer.
+
+        def w(*args, **kwargs):
+            self.buf.append((attr, args, kwargs))
+        return w
+
+    def playback(self, output):
+        for attr, args, kwargs in self.buf:
+            getattr(output, attr)(*args, **kwargs)
 
 
 if __name__ == "__main__":
-	import sys
-	from utils import load_environment
-	env = load_environment()
-	if len(sys.argv) == 1:
-		pool = multiprocessing.pool.Pool(processes=10)
-		run_checks(env, ConsoleOutput(), pool)
-	elif sys.argv[1] == "--check-primary-hostname":
-		# See if the primary hostname appears resolvable and has a signed certificate.
-		domain = env['PRIMARY_HOSTNAME']
-		if query_dns(domain, "A") != env['PUBLIC_IP']:
-			sys.exit(1)
-		ssl_key, ssl_certificate, ssl_via = get_domain_ssl_files(domain, env)
-		if not os.path.exists(ssl_certificate):
-			sys.exit(1)
-		cert_status, cert_status_details = check_certificate(domain, ssl_certificate, ssl_key)
-		if cert_status != "OK":
-			sys.exit(1)
-		sys.exit(0)
-
-
+    import sys
+    from utils import load_environment
+    env = load_environment()
+    if len(sys.argv) == 1:
+        pool = multiprocessing.pool.Pool(processes=10)
+        run_checks(env, ConsoleOutput(), pool)
+    elif sys.argv[1] == "--check-primary-hostname":
+        # See if the primary hostname appears resolvable and has a signed certificate.
+        domain = env['PRIMARY_HOSTNAME']
+        if query_dns(domain, "A") != env['PUBLIC_IP']:
+            sys.exit(1)
+        ssl_key, ssl_certificate, ssl_via = get_domain_ssl_files(domain, env)
+        if not os.path.exists(ssl_certificate):
+            sys.exit(1)
+        cert_status, cert_status_details = check_certificate(domain, ssl_certificate, ssl_key)
+        if cert_status != "OK":
+            sys.exit(1)
+        sys.exit(0)
diff --git a/management/utils.py b/management/utils.py
index 24a2a0a7..25064bea 100644
--- a/management/utils.py
+++ b/management/utils.py
@@ -2,33 +2,39 @@ import os.path
 
 CONF_DIR = os.path.join(os.path.dirname(__file__), "../conf")
 
+
 def load_environment():
     # Load settings from /etc/mailinabox.conf.
     return load_env_vars_from_file("/etc/mailinabox.conf")
 
+
 def load_env_vars_from_file(fn):
     # Load settings from a KEY=VALUE file.
     import collections
     env = collections.OrderedDict()
-    for line in open(fn): env.setdefault(*line.strip().split("=", 1))
+    for line in open(fn):
+        env.setdefault(*line.strip().split("=", 1))
     return env
 
+
 def save_environment(env):
     with open("/etc/mailinabox.conf", "w") as f:
         for k, v in env.items():
             f.write("%s=%s\n" % (k, v))
 
+
 def safe_domain_name(name):
     # Sanitize a domain name so it is safe to use as a file name on disk.
     import urllib.parse
     return urllib.parse.quote(name, safe='')
 
+
 def sort_domains(domain_names, env):
     # Put domain names in a nice sorted order. For web_update, PRIMARY_HOSTNAME
     # must appear first so it becomes the nginx default server.
-    
+
     # First group PRIMARY_HOSTNAME and its subdomains, then parent domains of PRIMARY_HOSTNAME, then other domains.
-    groups = ( [], [], [] )
+    groups = ([], [], [])
     for d in domain_names:
         if d == env['PRIMARY_HOSTNAME'] or d.endswith("." + env['PRIMARY_HOSTNAME']):
             groups[0].append(d)
@@ -44,13 +50,14 @@ def sort_domains(domain_names, env):
         ret = []
         for d in top_domains:
             ret.append(d)
-            ret.extend( sort_group([s for s in group if s.endswith("." + d)]) )
+            ret.extend(sort_group([s for s in group if s.endswith("." + d)]))
         return ret
-        
+
     groups = [sort_group(g) for g in groups]
 
     return groups[0] + groups[1] + groups[2]
 
+
 def sort_email_addresses(email_addresses, env):
     email_addresses = set(email_addresses)
     domains = set(email.split("@", 1)[1] for email in email_addresses if "@" in email)
@@ -59,13 +66,17 @@ def sort_email_addresses(email_addresses, env):
         domain_emails = set(email for email in email_addresses if email.endswith("@" + domain))
         ret.extend(sorted(domain_emails))
         email_addresses -= domain_emails
-    ret.extend(sorted(email_addresses)) # whatever is left
+    # whatever is left
+    ret.extend(sorted(email_addresses))
     return ret
 
+
 def exclusive_process(name):
     # Ensure that a process named `name` does not execute multiple
     # times concurrently.
-    import os, sys, atexit
+    import os
+    import sys
+    import atexit
     pidfile = '/var/run/mailinabox-%s.pid' % name
     mypid = os.getpid()
 
@@ -95,7 +106,8 @@ def exclusive_process(name):
                 try:
                     existing_pid = int(f.read().strip())
                 except ValueError:
-                    pass # No valid integer in the file.
+                    # No valid integer in the file.
+                    pass
 
                 # Check if the pid in it is valid.
                 if existing_pid:
@@ -108,7 +120,7 @@ def exclusive_process(name):
                 f.write(str(mypid))
                 f.truncate()
                 atexit.register(clear_my_pid, pidfile)
- 
+
 
 def clear_my_pid(pidfile):
     import os
@@ -118,26 +130,32 @@ def clear_my_pid(pidfile):
 def is_pid_valid(pid):
     """Checks whether a pid is a valid process ID of a currently running process."""
     # adapted from http://stackoverflow.com/questions/568271/how-to-check-if-there-exists-a-process-with-a-given-pid
-    import os, errno
-    if pid <= 0: raise ValueError('Invalid PID.')
+    import os
+    import errno
+    if pid <= 0:
+        raise ValueError('Invalid PID.')
     try:
         os.kill(pid, 0)
     except OSError as err:
-        if err.errno == errno.ESRCH: # No such process
+        # No such process
+        if err.errno == errno.ESRCH:
             return False
-        elif err.errno == errno.EPERM: # Not permitted to send signal
+        # Not permitted to send signal
+        elif err.errno == errno.EPERM:
             return True
-        else: # EINVAL
+        # EINVAL
+        else:
             raise
     else:
         return True
 
+
 def shell(method, cmd_args, env={}, capture_stderr=False, return_bytes=False, trap=False, input=None):
     # A safe way to execute processes.
     # Some processes like apt-get require being given a sane PATH.
     import subprocess
 
-    env.update({ "PATH": "/sbin:/bin:/usr/sbin:/usr/bin" })
+    env.update({"PATH": "/sbin:/bin:/usr/sbin:/usr/bin"})
     kwargs = {
         'env': env,
         'stderr': None if not capture_stderr else subprocess.STDOUT,
@@ -154,18 +172,21 @@ def shell(method, cmd_args, env={}, capture_stderr=False, return_bytes=False, tr
         except subprocess.CalledProcessError as e:
             ret = e.output
             code = e.returncode
-    if not return_bytes and isinstance(ret, bytes): ret = ret.decode("utf8")
+    if not return_bytes and isinstance(ret, bytes):
+        ret = ret.decode("utf8")
     if not trap:
         return ret
     else:
         return code, ret
 
+
 def create_syslog_handler():
     import logging.handlers
     handler = logging.handlers.SysLogHandler(address='/dev/log')
     handler.setLevel(logging.WARNING)
     return handler
 
+
 def du(path):
     # Computes the size of all files in the path, like the `du` command.
     # Based on http://stackoverflow.com/a/17936789. Takes into account
diff --git a/management/web_update.py b/management/web_update.py
index b088c55f..91ddd4a5 100644
--- a/management/web_update.py
+++ b/management/web_update.py
@@ -2,297 +2,313 @@
 # domains for which a mail account has been set up.
 ########################################################################
 
-import os, os.path, shutil, re, tempfile, rtyaml
+import os
+import os.path
+import shutil
+import re
+import tempfile
+import rtyaml
 
 from mailconfig import get_mail_domains
 from dns_update import get_custom_dns_config, do_dns_update
 from utils import shell, safe_domain_name, sort_domains
 
+
 def get_web_domains(env):
-	# What domains should we serve websites for?
-	domains = set()
+    # What domains should we serve websites for?
+    domains = set()
 
-	# At the least it's the PRIMARY_HOSTNAME so we can serve webmail
-	# as well as Z-Push for Exchange ActiveSync.
-	domains.add(env['PRIMARY_HOSTNAME'])
+    # At the least it's the PRIMARY_HOSTNAME so we can serve webmail
+    # as well as Z-Push for Exchange ActiveSync.
+    domains.add(env['PRIMARY_HOSTNAME'])
 
-	# Also serve web for all mail domains so that we might at least
-	# provide auto-discover of email settings, and also a static website
-	# if the user wants to make one. These will require an SSL cert.
-	domains |= get_mail_domains(env)
+    # Also serve web for all mail domains so that we might at least
+    # provide auto-discover of email settings, and also a static website
+    # if the user wants to make one. These will require an SSL cert.
+    domains |= get_mail_domains(env)
 
-	# ...Unless the domain has an A/AAAA record that maps it to a different
-	# IP address than this box. Remove those domains from our list.
-	dns = get_custom_dns_config(env)
-	for domain, value in dns.items():
-		if domain not in domains: continue
-		if (isinstance(value, str) and (value != "local")) \
-		  or (isinstance(value, dict) and ("CNAME" in value)) \
-		  or (isinstance(value, dict) and ("A" in value) and (value["A"] != "local")) \
-		  or (isinstance(value, dict) and ("AAAA" in value) and (value["AAAA"] != "local")):
-			domains.remove(domain)
+    # ...Unless the domain has an A/AAAA record that maps it to a different
+    # IP address than this box. Remove those domains from our list.
+    dns = get_custom_dns_config(env)
+    for domain, value in dns.items():
+        if domain not in domains:
+            continue
+        if (isinstance(value, str) and (value != "local")) or (isinstance(value, dict) and ("CNAME" in value)) or (isinstance(value, dict) and ("A" in value) and (value["A"] != "local")) or (isinstance(value, dict) and ("AAAA" in value) and (value["AAAA"] != "local")):
+            domains.remove(domain)
+
+    # Sort the list. Put PRIMARY_HOSTNAME first so it becomes the
+    # default server (nginx's default_server).
+    domains = sort_domains(domains, env)
+
+    return domains
 
-	# Sort the list. Put PRIMARY_HOSTNAME first so it becomes the
-	# default server (nginx's default_server).
-	domains = sort_domains(domains, env)
 
-	return domains
-	
 def do_web_update(env, ok_status="web updated\n"):
-	# Build an nginx configuration file.
-	nginx_conf = open(os.path.join(os.path.dirname(__file__), "../conf/nginx-top.conf")).read()
+    # Build an nginx configuration file.
+    nginx_conf = open(os.path.join(os.path.dirname(__file__), "../conf/nginx-top.conf")).read()
 
-	# Add configuration for each web domain.
-	template1 = open(os.path.join(os.path.dirname(__file__), "../conf/nginx.conf")).read()
-	template2 = open(os.path.join(os.path.dirname(__file__), "../conf/nginx-primaryonly.conf")).read()
-	for domain in get_web_domains(env):
-		nginx_conf += make_domain_config(domain, template1, template2, env)
+    # Add configuration for each web domain.
+    template1 = open(os.path.join(os.path.dirname(__file__), "../conf/nginx.conf")).read()
+    template2 = open(os.path.join(os.path.dirname(__file__), "../conf/nginx-primaryonly.conf")).read()
+    for domain in get_web_domains(env):
+        nginx_conf += make_domain_config(domain, template1, template2, env)
 
-	# Did the file change? If not, don't bother writing & restarting nginx.
-	nginx_conf_fn = "/etc/nginx/conf.d/local.conf"
-	if os.path.exists(nginx_conf_fn):
-		with open(nginx_conf_fn) as f:
-			if f.read() == nginx_conf:
-				return ""
+    # Did the file change? If not, don't bother writing & restarting nginx.
+    nginx_conf_fn = "/etc/nginx/conf.d/local.conf"
+    if os.path.exists(nginx_conf_fn):
+        with open(nginx_conf_fn) as f:
+            if f.read() == nginx_conf:
+                return ""
 
-	# Save the file.
-	with open(nginx_conf_fn, "w") as f:
-		f.write(nginx_conf)
+    # Save the file.
+    with open(nginx_conf_fn, "w") as f:
+        f.write(nginx_conf)
 
-	# Kick nginx. Since this might be called from the web admin
-	# don't do a 'restart'. That would kill the connection before
-	# the API returns its response. A 'reload' should be good
-	# enough and doesn't break any open connections.
-	shell('check_call', ["/usr/sbin/service", "nginx", "reload"])
+    # Kick nginx. Since this might be called from the web admin
+    # don't do a 'restart'. That would kill the connection before
+    # the API returns its response. A 'reload' should be good
+    # enough and doesn't break any open connections.
+    shell('check_call', ["/usr/sbin/service", "nginx", "reload"])
+
+    return ok_status
 
-	return ok_status
 
 def make_domain_config(domain, template, template_for_primaryhost, env):
-	# How will we configure this domain.
+    # How will we configure this domain.
 
-	# Where will its root directory be for static files?
+    # Where will its root directory be for static files?
 
-	root = get_web_root(domain, env)
+    root = get_web_root(domain, env)
 
-	# What private key and SSL certificate will we use for this domain?
-	ssl_key, ssl_certificate, ssl_via = get_domain_ssl_files(domain, env)
+    # What private key and SSL certificate will we use for this domain?
+    ssl_key, ssl_certificate, ssl_via = get_domain_ssl_files(domain, env)
 
-	# For hostnames created after the initial setup, ensure we have an SSL certificate
-	# available. Make a self-signed one now if one doesn't exist.
-	ensure_ssl_certificate_exists(domain, ssl_key, ssl_certificate, env)
+    # For hostnames created after the initial setup, ensure we have an SSL certificate
+    # available. Make a self-signed one now if one doesn't exist.
+    ensure_ssl_certificate_exists(domain, ssl_key, ssl_certificate, env)
 
-	# Put pieces together.
-	nginx_conf_parts = re.split("\s*# ADDITIONAL DIRECTIVES HERE\s*", template)
-	nginx_conf = nginx_conf_parts[0] + "\n"
-	if domain == env['PRIMARY_HOSTNAME']:
-		nginx_conf += template_for_primaryhost + "\n"
+    # Put pieces together.
+    nginx_conf_parts = re.split("\s*# ADDITIONAL DIRECTIVES HERE\s*", template)
+    nginx_conf = nginx_conf_parts[0] + "\n"
+    if domain == env['PRIMARY_HOSTNAME']:
+        nginx_conf += template_for_primaryhost + "\n"
 
-	# Replace substitution strings in the template & return.
-	nginx_conf = nginx_conf.replace("$STORAGE_ROOT", env['STORAGE_ROOT'])
-	nginx_conf = nginx_conf.replace("$HOSTNAME", domain.encode("idna").decode("ascii"))
-	nginx_conf = nginx_conf.replace("$ROOT", root)
-	nginx_conf = nginx_conf.replace("$SSL_KEY", ssl_key)
-	nginx_conf = nginx_conf.replace("$SSL_CERTIFICATE", ssl_certificate)
+    # Replace substitution strings in the template & return.
+    nginx_conf = nginx_conf.replace("$STORAGE_ROOT", env['STORAGE_ROOT'])
+    nginx_conf = nginx_conf.replace("$HOSTNAME", domain.encode("idna").decode("ascii"))
+    nginx_conf = nginx_conf.replace("$ROOT", root)
+    nginx_conf = nginx_conf.replace("$SSL_KEY", ssl_key)
+    nginx_conf = nginx_conf.replace("$SSL_CERTIFICATE", ssl_certificate)
 
-	# Because the certificate may change, we should recognize this so we
-	# can trigger an nginx update.
-	def hashfile(filepath):
-		import hashlib
-		sha1 = hashlib.sha1()
-		f = open(filepath, 'rb')
-		try:
-			sha1.update(f.read())
-		finally:
-			f.close()
-		return sha1.hexdigest()
-	nginx_conf += "# ssl files sha1: %s / %s\n" % (hashfile(ssl_key), hashfile(ssl_certificate))
+    # Because the certificate may change, we should recognize this so we
+    # can trigger an nginx update.
+    def hashfile(filepath):
+        import hashlib
+        sha1 = hashlib.sha1()
+        f = open(filepath, 'rb')
+        try:
+            sha1.update(f.read())
+        finally:
+            f.close()
+        return sha1.hexdigest()
+    nginx_conf += "# ssl files sha1: %s / %s\n" % (hashfile(ssl_key), hashfile(ssl_certificate))
 
-	# Add in any user customizations in YAML format.
-	nginx_conf_custom_fn = os.path.join(env["STORAGE_ROOT"], "www/custom.yaml")
-	if os.path.exists(nginx_conf_custom_fn):
-		yaml = rtyaml.load(open(nginx_conf_custom_fn))
-		if domain in yaml:
-			yaml = yaml[domain]
-			for path, url in yaml.get("proxies", {}).items():
-				nginx_conf += "\tlocation %s {\n\t\tproxy_pass %s;\n\t}\n" % (path, url)
-			for path, url in yaml.get("redirects", {}).items():
-				nginx_conf += "\trewrite %s %s permanent;\n" % (path, url)
+    # Add in any user customizations in YAML format.
+    nginx_conf_custom_fn = os.path.join(env["STORAGE_ROOT"], "www/custom.yaml")
+    if os.path.exists(nginx_conf_custom_fn):
+        yaml = rtyaml.load(open(nginx_conf_custom_fn))
+        if domain in yaml:
+            yaml = yaml[domain]
+            for path, url in yaml.get("proxies", {}).items():
+                nginx_conf += "\tlocation %s {\n\t\tproxy_pass %s;\n\t}\n" % (path, url)
+            for path, url in yaml.get("redirects", {}).items():
+                nginx_conf += "\trewrite %s %s permanent;\n" % (path, url)
 
-	# Add in any user customizations in the includes/ folder.
-	nginx_conf_custom_include = os.path.join(env["STORAGE_ROOT"], "www", safe_domain_name(domain) + ".conf")
-	if os.path.exists(nginx_conf_custom_include):
-		nginx_conf += "\tinclude %s;\n" % (nginx_conf_custom_include)
+    # Add in any user customizations in the includes/ folder.
+    nginx_conf_custom_include = os.path.join(env["STORAGE_ROOT"], "www", safe_domain_name(domain) + ".conf")
+    if os.path.exists(nginx_conf_custom_include):
+        nginx_conf += "\tinclude %s;\n" % (nginx_conf_custom_include)
 
-	# Ending.
-	nginx_conf += nginx_conf_parts[1]
+    # Ending.
+    nginx_conf += nginx_conf_parts[1]
+
+    return nginx_conf
 
-	return nginx_conf
 
 def get_web_root(domain, env, test_exists=True):
-	# Try STORAGE_ROOT/web/domain_name if it exists, but fall back to STORAGE_ROOT/web/default.
-	for test_domain in (domain, 'default'):
-		root = os.path.join(env["STORAGE_ROOT"], "www", safe_domain_name(test_domain))
-		if os.path.exists(root) or not test_exists: break
-	return root
+    # Try STORAGE_ROOT/web/domain_name if it exists, but fall back to STORAGE_ROOT/web/default.
+    for test_domain in (domain, 'default'):
+        root = os.path.join(env["STORAGE_ROOT"], "www", safe_domain_name(test_domain))
+        if os.path.exists(root) or not test_exists:
+            break
+    return root
+
 
 def get_domain_ssl_files(domain, env, allow_shared_cert=True):
-	# What SSL private key will we use? Allow the user to override this, but
-	# in many cases using the same private key for all domains would be fine.
-	# Don't allow the user to override the key for PRIMARY_HOSTNAME because
-	# that's what's in the main file.
-	ssl_key = os.path.join(env["STORAGE_ROOT"], 'ssl/ssl_private_key.pem')
-	ssl_key_is_alt = False
-	alt_key = os.path.join(env["STORAGE_ROOT"], 'ssl/%s/private_key.pem' % safe_domain_name(domain))
-	if domain != env['PRIMARY_HOSTNAME'] and os.path.exists(alt_key):
-		ssl_key = alt_key
-		ssl_key_is_alt = True
+    # What SSL private key will we use? Allow the user to override this, but
+    # in many cases using the same private key for all domains would be fine.
+    # Don't allow the user to override the key for PRIMARY_HOSTNAME because
+    # that's what's in the main file.
+    ssl_key = os.path.join(env["STORAGE_ROOT"], 'ssl/ssl_private_key.pem')
+    ssl_key_is_alt = False
+    alt_key = os.path.join(env["STORAGE_ROOT"], 'ssl/%s/private_key.pem' % safe_domain_name(domain))
+    if domain != env['PRIMARY_HOSTNAME'] and os.path.exists(alt_key):
+        ssl_key = alt_key
+        ssl_key_is_alt = True
 
-	# What SSL certificate will we use?
-	ssl_certificate_primary = os.path.join(env["STORAGE_ROOT"], 'ssl/ssl_certificate.pem')
-	ssl_via = None
-	if domain == env['PRIMARY_HOSTNAME']:
-		# For PRIMARY_HOSTNAME, use the one we generated at set-up time.
-		ssl_certificate = ssl_certificate_primary
-	else:
-		# For other domains, we'll probably use a certificate in a different path.
-		ssl_certificate = os.path.join(env["STORAGE_ROOT"], 'ssl/%s/ssl_certificate.pem' % safe_domain_name(domain))
+    # What SSL certificate will we use?
+    ssl_certificate_primary = os.path.join(env["STORAGE_ROOT"], 'ssl/ssl_certificate.pem')
+    ssl_via = None
+    if domain == env['PRIMARY_HOSTNAME']:
+        # For PRIMARY_HOSTNAME, use the one we generated at set-up time.
+        ssl_certificate = ssl_certificate_primary
+    else:
+        # For other domains, we'll probably use a certificate in a different path.
+        ssl_certificate = os.path.join(env["STORAGE_ROOT"], 'ssl/%s/ssl_certificate.pem' % safe_domain_name(domain))
 
-		# But we can be smart and reuse the main SSL certificate if is has
-		# a Subject Alternative Name matching this domain. Don't do this if
-		# the user has uploaded a different private key for this domain.
-		if not ssl_key_is_alt and allow_shared_cert:
-			from status_checks import check_certificate
-			if check_certificate(domain, ssl_certificate_primary, None)[0] == "OK":
-				ssl_certificate = ssl_certificate_primary
-				ssl_via = "Using multi/wildcard certificate of %s." % env['PRIMARY_HOSTNAME']
+        # But we can be smart and reuse the main SSL certificate if is has
+        # a Subject Alternative Name matching this domain. Don't do this if
+        # the user has uploaded a different private key for this domain.
+        if not ssl_key_is_alt and allow_shared_cert:
+            from status_checks import check_certificate
+            if check_certificate(domain, ssl_certificate_primary, None)[0] == "OK":
+                ssl_certificate = ssl_certificate_primary
+                ssl_via = "Using multi/wildcard certificate of %s." % env['PRIMARY_HOSTNAME']
 
-			# For a 'www.' domain, see if we can reuse the cert of the parent.
-			elif domain.startswith('www.'):
-				ssl_certificate_parent = os.path.join(env["STORAGE_ROOT"], 'ssl/%s/ssl_certificate.pem' % safe_domain_name(domain[4:]))
-				if os.path.exists(ssl_certificate_parent) and check_certificate(domain, ssl_certificate_parent, None)[0] == "OK":
-					ssl_certificate = ssl_certificate_parent
-					ssl_via = "Using multi/wildcard certificate of %s." % domain[4:]
+            # For a 'www.' domain, see if we can reuse the cert of the parent.
+            elif domain.startswith('www.'):
+                ssl_certificate_parent = os.path.join(env["STORAGE_ROOT"], 'ssl/%s/ssl_certificate.pem' % safe_domain_name(domain[4:]))
+                if os.path.exists(ssl_certificate_parent) and check_certificate(domain, ssl_certificate_parent, None)[0] == "OK":
+                    ssl_certificate = ssl_certificate_parent
+                    ssl_via = "Using multi/wildcard certificate of %s." % domain[4:]
+
+    return ssl_key, ssl_certificate, ssl_via
 
-	return ssl_key, ssl_certificate, ssl_via
 
 def ensure_ssl_certificate_exists(domain, ssl_key, ssl_certificate, env):
-	# For domains besides PRIMARY_HOSTNAME, generate a self-signed certificate if
-	# a certificate doesn't already exist. See setup/mail.sh for documentation.
+    # For domains besides PRIMARY_HOSTNAME, generate a self-signed certificate if
+    # a certificate doesn't already exist. See setup/mail.sh for documentation.
 
-	if domain == env['PRIMARY_HOSTNAME']:
-		return
+    if domain == env['PRIMARY_HOSTNAME']:
+        return
 
-	# Sanity check. Shouldn't happen. A non-primary domain might use this
-	# certificate (see above), but then the certificate should exist anyway.
-	if ssl_certificate == os.path.join(env["STORAGE_ROOT"], 'ssl/ssl_certificate.pem'):
-		return
+    # Sanity check. Shouldn't happen. A non-primary domain might use this
+    # certificate (see above), but then the certificate should exist anyway.
+    if ssl_certificate == os.path.join(env["STORAGE_ROOT"], 'ssl/ssl_certificate.pem'):
+        return
 
-	if os.path.exists(ssl_certificate):
-		return
+    if os.path.exists(ssl_certificate):
+        return
 
-	os.makedirs(os.path.dirname(ssl_certificate), exist_ok=True)
+    os.makedirs(os.path.dirname(ssl_certificate), exist_ok=True)
 
-	# Generate a new self-signed certificate using the same private key that we already have.
+    # Generate a new self-signed certificate using the same private key that we already have.
 
-	# Start with a CSR written to a temporary file.
-	with tempfile.NamedTemporaryFile(mode="w") as csr_fp:
-		csr_fp.write(create_csr(domain, ssl_key, env))
-		csr_fp.flush() # since we won't close until after running 'openssl x509', since close triggers delete.
+    # Start with a CSR written to a temporary file.
+    with tempfile.NamedTemporaryFile(mode="w") as csr_fp:
+        csr_fp.write(create_csr(domain, ssl_key, env))
+        # since we won't close until after running 'openssl x509', since close triggers delete.
+        csr_fp.flush()
+
+        # And then make the certificate.
+        shell("check_call", [
+            "openssl", "x509", "-req",
+            "-days", "365",
+            "-in", csr_fp.name,
+            "-signkey", ssl_key,
+            "-out", ssl_certificate])
 
-		# And then make the certificate.
-		shell("check_call", [
-			"openssl", "x509", "-req",
-			"-days", "365",
-			"-in", csr_fp.name,
-			"-signkey", ssl_key,
-			"-out", ssl_certificate])
 
 def create_csr(domain, ssl_key, env):
-	return shell("check_output", [
-                "openssl", "req", "-new",
-                "-key", ssl_key,
-                "-out",  "/dev/stdout",
-                "-sha256",
-                "-subj", "/C=%s/ST=/L=/O=/CN=%s" % (env["CSR_COUNTRY"], domain.encode("idna").decode("ascii"))])
+    return shell("check_output", [
+        "openssl", "req", "-new",
+        "-key", ssl_key,
+        "-out",  "/dev/stdout",
+        "-sha256",
+        "-subj", "/C=%s/ST=/L=/O=/CN=%s" % (env["CSR_COUNTRY"], domain.encode("idna").decode("ascii"))])
+
 
 def install_cert(domain, ssl_cert, ssl_chain, env):
-	if domain not in get_web_domains(env):
-		return "Invalid domain name."
+    if domain not in get_web_domains(env):
+        return "Invalid domain name."
 
-	# Write the combined cert+chain to a temporary path and validate that it is OK.
-	# The certificate always goes above the chain.
-	import tempfile, os
-	fd, fn = tempfile.mkstemp('.pem')
-	os.write(fd, (ssl_cert + '\n' + ssl_chain).encode("ascii"))
-	os.close(fd)
+    # Write the combined cert+chain to a temporary path and validate that it is OK.
+    # The certificate always goes above the chain.
+    import tempfile
+    import os
+    fd, fn = tempfile.mkstemp('.pem')
+    os.write(fd, (ssl_cert + '\n' + ssl_chain).encode("ascii"))
+    os.close(fd)
 
-	# Do validation on the certificate before installing it.
-	from status_checks import check_certificate
-	ssl_key, ssl_certificate, ssl_via = get_domain_ssl_files(domain, env, allow_shared_cert=False)
-	cert_status, cert_status_details = check_certificate(domain, fn, ssl_key)
-	if cert_status != "OK":
-		if cert_status == "SELF-SIGNED":
-			cert_status = "This is a self-signed certificate. I can't install that."
-		os.unlink(fn)
-		if cert_status_details is not None:
-			cert_status += " " + cert_status_details
-		return cert_status
+    # Do validation on the certificate before installing it.
+    from status_checks import check_certificate
+    ssl_key, ssl_certificate, ssl_via = get_domain_ssl_files(domain, env, allow_shared_cert=False)
+    cert_status, cert_status_details = check_certificate(domain, fn, ssl_key)
+    if cert_status != "OK":
+        if cert_status == "SELF-SIGNED":
+            cert_status = "This is a self-signed certificate. I can't install that."
+        os.unlink(fn)
+        if cert_status_details is not None:
+            cert_status += " " + cert_status_details
+        return cert_status
 
-	# Copy the certificate to its expected location.
-	os.makedirs(os.path.dirname(ssl_certificate), exist_ok=True)
-	shutil.move(fn, ssl_certificate)
+    # Copy the certificate to its expected location.
+    os.makedirs(os.path.dirname(ssl_certificate), exist_ok=True)
+    shutil.move(fn, ssl_certificate)
 
-	ret = []
+    ret = []
 
-	# When updating the cert for PRIMARY_HOSTNAME, also update DNS because it is
-	# used in the DANE TLSA record and restart postfix and dovecot which use
-	# that certificate.
-	if domain == env['PRIMARY_HOSTNAME']:
-		ret.append( do_dns_update(env) )
+    # When updating the cert for PRIMARY_HOSTNAME, also update DNS because it is
+    # used in the DANE TLSA record and restart postfix and dovecot which use
+    # that certificate.
+    if domain == env['PRIMARY_HOSTNAME']:
+        ret.append(do_dns_update(env))
 
-		shell('check_call', ["/usr/sbin/service", "postfix", "restart"])
-		shell('check_call', ["/usr/sbin/service", "dovecot", "restart"])
-		ret.append("mail services restarted")
+        shell('check_call', ["/usr/sbin/service", "postfix", "restart"])
+        shell('check_call', ["/usr/sbin/service", "dovecot", "restart"])
+        ret.append("mail services restarted")
+
+    # Kick nginx so it sees the cert.
+    ret.append(do_web_update(env, ok_status=""))
+    return "\n".join(r for r in ret if r.strip() != "")
 
-	# Kick nginx so it sees the cert.
-	ret.append( do_web_update(env, ok_status="") )
-	return "\n".join(r for r in ret if r.strip() != "")
 
 def get_web_domains_info(env):
-	# load custom settings so we can tell what domains have a redirect or proxy set up on '/',
-	# which means static hosting is not happening
-	custom_settings = { }
-	nginx_conf_custom_fn = os.path.join(env["STORAGE_ROOT"], "www/custom.yaml")
-	if os.path.exists(nginx_conf_custom_fn):
-		custom_settings = rtyaml.load(open(nginx_conf_custom_fn))
-	def has_root_proxy_or_redirect(domain):
-		return custom_settings.get(domain, {}).get('redirects', {}).get('/') or custom_settings.get(domain, {}).get('proxies', {}).get('/')
+    # load custom settings so we can tell what domains have a redirect or proxy set up on '/',
+    # which means static hosting is not happening
+    custom_settings = {}
+    nginx_conf_custom_fn = os.path.join(env["STORAGE_ROOT"], "www/custom.yaml")
+    if os.path.exists(nginx_conf_custom_fn):
+        custom_settings = rtyaml.load(open(nginx_conf_custom_fn))
 
-	# for the SSL config panel, get cert status
-	def check_cert(domain):
-		from status_checks import check_certificate
-		ssl_key, ssl_certificate, ssl_via = get_domain_ssl_files(domain, env)
-		if not os.path.exists(ssl_certificate):
-			return ("danger", "No Certificate Installed")
-		cert_status, cert_status_details = check_certificate(domain, ssl_certificate, ssl_key)
-		if cert_status == "OK":
-			if not ssl_via:
-				return ("success", "Signed & valid. " + cert_status_details)
-			else:
-				# This is an alternate domain but using the same cert as the primary domain.
-				return ("success", "Signed & valid. " + ssl_via)
-		elif cert_status == "SELF-SIGNED":
-			return ("warning", "Self-signed. Get a signed certificate to stop warnings.")
-		else:
-			return ("danger", "Certificate has a problem: " + cert_status)
+    def has_root_proxy_or_redirect(domain):
+        return custom_settings.get(domain, {}).get('redirects', {}).get('/') or custom_settings.get(domain, {}).get('proxies', {}).get('/')
 
-	return [
-		{
-			"domain": domain,
-			"root": get_web_root(domain, env),
-			"custom_root": get_web_root(domain, env, test_exists=False),
-			"ssl_certificate": check_cert(domain),
-			"static_enabled": not has_root_proxy_or_redirect(domain),
-		}
-		for domain in get_web_domains(env)
-	]
+    # for the SSL config panel, get cert status
+    def check_cert(domain):
+        from status_checks import check_certificate
+        ssl_key, ssl_certificate, ssl_via = get_domain_ssl_files(domain, env)
+        if not os.path.exists(ssl_certificate):
+            return ("danger", "No Certificate Installed")
+        cert_status, cert_status_details = check_certificate(domain, ssl_certificate, ssl_key)
+        if cert_status == "OK":
+            if not ssl_via:
+                return ("success", "Signed & valid. " + cert_status_details)
+            else:
+                # This is an alternate domain but using the same cert as the primary domain.
+                return ("success", "Signed & valid. " + ssl_via)
+        elif cert_status == "SELF-SIGNED":
+            return ("warning", "Self-signed. Get a signed certificate to stop warnings.")
+        else:
+            return ("danger", "Certificate has a problem: " + cert_status)
+
+    return [
+        {
+            "domain": domain,
+            "root": get_web_root(domain, env),
+            "custom_root": get_web_root(domain, env, test_exists=False),
+            "ssl_certificate": check_cert(domain),
+            "static_enabled": not has_root_proxy_or_redirect(domain),
+        }
+        for domain in get_web_domains(env)
+    ]
diff --git a/setup/migrate.py b/setup/migrate.py
index cbc4b361..5b41f76b 100755
--- a/setup/migrate.py
+++ b/setup/migrate.py
@@ -5,134 +5,150 @@
 # We have to be careful here that any dependencies are already installed in the previous
 # version since this script runs before all other aspects of the setup script.
 
-import sys, os, os.path, glob, re, shutil
+import sys
+import os
+import os.path
+import glob
+import re
+import shutil
 
 sys.path.insert(0, 'management')
 from utils import load_environment, save_environment, shell
 
+
 def migration_1(env):
-	# Re-arrange where we store SSL certificates. There was a typo also.
+    # Re-arrange where we store SSL certificates. There was a typo also.
 
-	def move_file(fn, domain_name_escaped, filename):
-		# Moves an SSL-related file into the right place.
-		fn1 = os.path.join( env["STORAGE_ROOT"], 'ssl', domain_name_escaped, file_type)
-		os.makedirs(os.path.dirname(fn1), exist_ok=True)
-		shutil.move(fn, fn1)
+    def move_file(fn, domain_name_escaped, filename):
+        # Moves an SSL-related file into the right place.
+        fn1 = os.path.join(env["STORAGE_ROOT"], 'ssl', domain_name_escaped, file_type)
+        os.makedirs(os.path.dirname(fn1), exist_ok=True)
+        shutil.move(fn, fn1)
 
-	# Migrate the 'domains' directory.
-	for sslfn in glob.glob(os.path.join( env["STORAGE_ROOT"], 'ssl/domains/*' )):
-		fn = os.path.basename(sslfn)
-		m = re.match("(.*)_(certifiate.pem|cert_sign_req.csr|private_key.pem)$", fn)
-		if m:
-			# get the new name for the file
-			domain_name, file_type = m.groups()
-			if file_type == "certifiate.pem": file_type = "ssl_certificate.pem" # typo
-			if file_type == "cert_sign_req.csr": file_type = "certificate_signing_request.csr" # nicer
-			move_file(sslfn, domain_name, file_type)
+    # Migrate the 'domains' directory.
+    for sslfn in glob.glob(os.path.join(env["STORAGE_ROOT"], 'ssl/domains/*')):
+        fn = os.path.basename(sslfn)
+        m = re.match("(.*)_(certifiate.pem|cert_sign_req.csr|private_key.pem)$", fn)
+        if m:
+            # get the new name for the file
+            domain_name, file_type = m.groups()
+            # typo
+            if file_type == "certifiate.pem":
+                file_type = "ssl_certificate.pem"
+            # nicer
+            if file_type == "cert_sign_req.csr":
+                file_type = "certificate_signing_request.csr"
+            move_file(sslfn, domain_name, file_type)
+
+    # Move the old domains directory if it is now empty.
+    try:
+        os.rmdir(os.path.join(env["STORAGE_ROOT"], 'ssl/domains'))
+    except:
+        pass
 
-	# Move the old domains directory if it is now empty.
-	try:
-		os.rmdir(os.path.join( env["STORAGE_ROOT"], 'ssl/domains'))
-	except:
-		pass
 
 def migration_2(env):
-	# Delete the .dovecot_sieve script everywhere. This was formerly a copy of our spam -> Spam
-	# script. We now install it as a global script, and we use managesieve, so the old file is
-	# irrelevant. Also delete the compiled binary form.
-	for fn in glob.glob(os.path.join(env["STORAGE_ROOT"], 'mail/mailboxes/*/*/.dovecot.sieve')):
-		os.unlink(fn)
-	for fn in glob.glob(os.path.join(env["STORAGE_ROOT"], 'mail/mailboxes/*/*/.dovecot.svbin')):
-		os.unlink(fn)
+    # Delete the .dovecot_sieve script everywhere. This was formerly a copy of our spam -> Spam
+    # script. We now install it as a global script, and we use managesieve, so the old file is
+    # irrelevant. Also delete the compiled binary form.
+    for fn in glob.glob(os.path.join(env["STORAGE_ROOT"], 'mail/mailboxes/*/*/.dovecot.sieve')):
+        os.unlink(fn)
+    for fn in glob.glob(os.path.join(env["STORAGE_ROOT"], 'mail/mailboxes/*/*/.dovecot.svbin')):
+        os.unlink(fn)
+
 
 def migration_3(env):
-	# Move the migration ID from /etc/mailinabox.conf to $STORAGE_ROOT/mailinabox.version
-	# so that the ID stays with the data files that it describes the format of. The writing
-	# of the file will be handled by the main function.
-	pass
+    # Move the migration ID from /etc/mailinabox.conf to $STORAGE_ROOT/mailinabox.version
+    # so that the ID stays with the data files that it describes the format of. The writing
+    # of the file will be handled by the main function.
+    pass
+
 
 def migration_4(env):
-	# Add a new column to the mail users table where we can store administrative privileges.
-	db = os.path.join(env["STORAGE_ROOT"], 'mail/users.sqlite')
-	shell("check_call", ["sqlite3", db, "ALTER TABLE users ADD privileges TEXT NOT NULL DEFAULT ''"])
+    # Add a new column to the mail users table where we can store administrative privileges.
+    db = os.path.join(env["STORAGE_ROOT"], 'mail/users.sqlite')
+    shell("check_call", ["sqlite3", db, "ALTER TABLE users ADD privileges TEXT NOT NULL DEFAULT ''"])
+
 
 def migration_5(env):
-	# The secret key for encrypting backups was world readable. Fix here.
-	os.chmod(os.path.join(env["STORAGE_ROOT"], 'backup/secret_key.txt'), 0o600)
+    # The secret key for encrypting backups was world readable. Fix here.
+    os.chmod(os.path.join(env["STORAGE_ROOT"], 'backup/secret_key.txt'), 0o600)
+
 
 def migration_6(env):
-	# We now will generate multiple DNSSEC keys for different algorithms, since TLDs may
-	# not support them all. .email only supports RSA/SHA-256. Rename the keys.conf file
-	# to be algorithm-specific.
-	basepath = os.path.join(env["STORAGE_ROOT"], 'dns/dnssec')
-	shutil.move(os.path.join(basepath, 'keys.conf'), os.path.join(basepath, 'RSASHA1-NSEC3-SHA1.conf'))
+    # We now will generate multiple DNSSEC keys for different algorithms, since TLDs may
+    # not support them all. .email only supports RSA/SHA-256. Rename the keys.conf file
+    # to be algorithm-specific.
+    basepath = os.path.join(env["STORAGE_ROOT"], 'dns/dnssec')
+    shutil.move(os.path.join(basepath, 'keys.conf'), os.path.join(basepath, 'RSASHA1-NSEC3-SHA1.conf'))
+
 
 def get_current_migration():
-	ver = 0
-	while True:
-		next_ver = (ver + 1)
-		migration_func = globals().get("migration_%d" % next_ver)
-		if not migration_func:
-			return ver
-		ver = next_ver
+    ver = 0
+    while True:
+        next_ver = (ver + 1)
+        migration_func = globals().get("migration_%d" % next_ver)
+        if not migration_func:
+            return ver
+        ver = next_ver
+
 
 def run_migrations():
-	if not os.access("/etc/mailinabox.conf", os.W_OK, effective_ids=True):
-		print("This script must be run as root.", file=sys.stderr)
-		sys.exit(1)
+    if not os.access("/etc/mailinabox.conf", os.W_OK, effective_ids=True):
+        print("This script must be run as root.", file=sys.stderr)
+        sys.exit(1)
 
-	env = load_environment()
+    env = load_environment()
 
-	migration_id_file = os.path.join(env['STORAGE_ROOT'], 'mailinabox.version')
-	if os.path.exists(migration_id_file):
-		with open(migration_id_file) as f:
-			ourver = int(f.read().strip())
-	else:
-		# Load the legacy location of the migration ID. We'll drop support
-		# for this eventually.
-		ourver = int(env.get("MIGRATIONID", "0"))
+    migration_id_file = os.path.join(env['STORAGE_ROOT'], 'mailinabox.version')
+    if os.path.exists(migration_id_file):
+        with open(migration_id_file) as f:
+            ourver = int(f.read().strip())
+    else:
+        # Load the legacy location of the migration ID. We'll drop support
+        # for this eventually.
+        ourver = int(env.get("MIGRATIONID", "0"))
 
-	while True:
-		next_ver = (ourver + 1)
-		migration_func = globals().get("migration_%d" % next_ver)
+    while True:
+        next_ver = (ourver + 1)
+        migration_func = globals().get("migration_%d" % next_ver)
 
-		if not migration_func:
-			# No more migrations to run.
-			break
+        if not migration_func:
+            # No more migrations to run.
+            break
 
-		print()
-		print("Running migration to Mail-in-a-Box #%d..." % next_ver)
+        print()
+        print("Running migration to Mail-in-a-Box #%d..." % next_ver)
 
-		try:
-			migration_func(env)
-		except Exception as e:
-			print()
-			print("Error running the migration script:")
-			print()
-			print(e)
-			print()
-			print("Your system may be in an inconsistent state now. We're terribly sorry. A re-install from a backup might be the best way to continue.")
-			sys.exit(1)
+        try:
+            migration_func(env)
+        except Exception as e:
+            print()
+            print("Error running the migration script:")
+            print()
+            print(e)
+            print()
+            print("Your system may be in an inconsistent state now. We're terribly sorry. A re-install from a backup might be the best way to continue.")
+            sys.exit(1)
 
-		ourver = next_ver
+        ourver = next_ver
 
-		# Write out our current version now. Do this sooner rather than later
-		# in case of any problems.
-		with open(migration_id_file, "w") as f:
-			f.write(str(ourver) + "\n")
+        # Write out our current version now. Do this sooner rather than later
+        # in case of any problems.
+        with open(migration_id_file, "w") as f:
+            f.write(str(ourver) + "\n")
 
-		# Delete the legacy location of this field.
-		if "MIGRATIONID" in env:
-			del env["MIGRATIONID"]
-			save_environment(env)
+        # Delete the legacy location of this field.
+        if "MIGRATIONID" in env:
+            del env["MIGRATIONID"]
+            save_environment(env)
 
-		# iterate and try next version...
+        # iterate and try next version...
 
 if __name__ == "__main__":
-	if sys.argv[-1] == "--current":
-		# Return the number of the highest migration.
-		print(str(get_current_migration()))
-	elif sys.argv[-1] == "--migrate":
-		# Perform migrations.
-		run_migrations()
-
+    if sys.argv[-1] == "--current":
+        # Return the number of the highest migration.
+        print(str(get_current_migration()))
+    elif sys.argv[-1] == "--migrate":
+        # Perform migrations.
+        run_migrations()
diff --git a/tests/test_dns.py b/tests/test_dns.py
index c5fe8051..4fb98a2f 100755
--- a/tests/test_dns.py
+++ b/tests/test_dns.py
@@ -7,100 +7,110 @@
 # where ipaddr is the IP address of your Mail-in-a-Box
 # and hostname is the domain name to check the DNS for.
 
-import sys, re, difflib
-import dns.reversename, dns.resolver
+import sys
+import re
+import difflib
+import dns.reversename
+import dns.resolver
 
 if len(sys.argv) < 3:
-	print("Usage: tests/dns.py ipaddress hostname [primary hostname]")
-	sys.exit(1)
+    print("Usage: tests/dns.py ipaddress hostname [primary hostname]")
+    sys.exit(1)
 
 ipaddr, hostname = sys.argv[1:3]
 primary_hostname = hostname
 if len(sys.argv) == 4:
-	primary_hostname = sys.argv[3]
+    primary_hostname = sys.argv[3]
+
 
 def test(server, description):
-	tests = [
-		(hostname, "A", ipaddr),
-		#(hostname, "NS", "ns1.%s.;ns2.%s." % (primary_hostname, primary_hostname)),
-		("ns1." + primary_hostname, "A", ipaddr),
-		("ns2." + primary_hostname, "A", ipaddr),
-		("www." + hostname, "A", ipaddr),
-		(hostname, "MX", "10 " + primary_hostname + "."),
-		(hostname, "TXT", "\"v=spf1 mx -all\""),
-		("mail._domainkey." + hostname, "TXT", "\"v=DKIM1; k=rsa; s=email; \" \"p=__KEY__\""),
-		#("_adsp._domainkey." + hostname, "TXT", "\"dkim=all\""),
-		("_dmarc." + hostname, "TXT", "\"v=DMARC1; p=quarantine\""),
-	]
-	return test2(tests, server, description)
+    tests = [
+        (hostname, "A", ipaddr),
+        #(hostname, "NS", "ns1.%s.;ns2.%s." % (primary_hostname, primary_hostname)),
+        ("ns1." + primary_hostname, "A", ipaddr),
+        ("ns2." + primary_hostname, "A", ipaddr),
+        ("www." + hostname, "A", ipaddr),
+        (hostname, "MX", "10 " + primary_hostname + "."),
+        (hostname, "TXT", "\"v=spf1 mx -all\""),
+        ("mail._domainkey." + hostname, "TXT", "\"v=DKIM1; k=rsa; s=email; \" \"p=__KEY__\""),
+        #("_adsp._domainkey." + hostname, "TXT", "\"dkim=all\""),
+        ("_dmarc." + hostname, "TXT", "\"v=DMARC1; p=quarantine\""),
+    ]
+    return test2(tests, server, description)
+
 
 def test_ptr(server, description):
-	ipaddr_rev = dns.reversename.from_address(ipaddr)
-	tests = [
-		(ipaddr_rev, "PTR", hostname+'.'),
-	]
-	return test2(tests, server, description)
+    ipaddr_rev = dns.reversename.from_address(ipaddr)
+    tests = [
+        (ipaddr_rev, "PTR", hostname+'.'),
+    ]
+    return test2(tests, server, description)
+
 
 def test2(tests, server, description):
-	first = True
-	resolver = dns.resolver.get_default_resolver()
-	resolver.nameservers = [server]
-	for qname, rtype, expected_answer in tests:
-		# do the query and format the result as a string
-		try:
-			response = dns.resolver.query(qname, rtype)
-		except dns.resolver.NoNameservers:
-			# host did not have an answer for this query
-			print("Could not connect to %s for DNS query." % server)
-			sys.exit(1)
-		except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
-			# host did not have an answer for this query; not sure what the
-			# difference is between the two exceptions
-			response = ["[no value]"]
-		response = ";".join(str(r) for r in response)
-		response = re.sub(r"(\"p=).*(\")", r"\1__KEY__\2", response) # normalize DKIM key
-		response = response.replace("\"\" ", "") # normalize TXT records (DNSSEC signing inserts empty text string components)
+    first = True
+    resolver = dns.resolver.get_default_resolver()
+    resolver.nameservers = [server]
+    for qname, rtype, expected_answer in tests:
+        # do the query and format the result as a string
+        try:
+            response = dns.resolver.query(qname, rtype)
+        except dns.resolver.NoNameservers:
+            # host did not have an answer for this query
+            print("Could not connect to %s for DNS query." % server)
+            sys.exit(1)
+        except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
+            # host did not have an answer for this query; not sure what the
+            # difference is between the two exceptions
+            response = ["[no value]"]
+        response = ";".join(str(r) for r in response)
+        # normalize DKIM key
+        response = re.sub(r"(\"p=).*(\")", r"\1__KEY__\2", response)
+        # normalize TXT records (DNSSEC signing inserts empty text
+        # string components)
+        response = response.replace("\"\" ", "")
 
-		# is it right?
-		if response == expected_answer:
-			#print(server, ":", qname, rtype, "?", response)
-			continue
+        # is it right?
+        if response == expected_answer:
+            #print(server, ":", qname, rtype, "?", response)
+            continue
 
-		# show prolem
-		if first:
-			print("Incorrect DNS Response from", description)
-			print()
-			print("QUERY               ", "RESPONSE    ", "CORRECT VALUE", sep='\t')
-			first = False
+        # show problem
+        if first:
+            print("Incorrect DNS Response from", description)
+            print()
+            print("QUERY               ", "RESPONSE    ", "CORRECT VALUE", sep='\t')
+            first = False
 
-		print((qname + "/" + rtype).ljust(20), response.ljust(12), expected_answer, sep='\t')
-	return first # success
+        print((qname + "/" + rtype).ljust(20), response.ljust(12), expected_answer, sep='\t')
+    # success
+    return first
 
 # Test the response from the machine itself.
 if not test(ipaddr, "Mail-in-a-Box"):
-	print ()
-	print ("Please run the Mail-in-a-Box setup script on %s again." % hostname)
-	sys.exit(1)
+    print ()
+    print ("Please run the Mail-in-a-Box setup script on %s again." % hostname)
+    sys.exit(1)
 else:
-	print ("The Mail-in-a-Box provided correct DNS answers.")
-	print ()
+    print ("The Mail-in-a-Box provided correct DNS answers.")
+    print ()
 
-	# If those settings are OK, also test Google's Public DNS
-	# to see if the machine is hooked up to recursive DNS properly.
-	if not test("8.8.8.8", "Google Public DNS"):
-		print ()
-		print ("Check that the nameserver settings for %s are correct at your domain registrar. It may take a few hours for Google Public DNS to update after changes on your Mail-in-a-Box." % hostname)
-		sys.exit(1)
-	else:
-		print ("Your domain registrar or DNS host appears to be configured correctly as well. Public DNS provides the same answers.")
-		print ()
+    # If those settings are OK, also test Google's Public DNS
+    # to see if the machine is hooked up to recursive DNS properly.
+    if not test("8.8.8.8", "Google Public DNS"):
+        print ()
+        print ("Check that the nameserver settings for %s are correct at your domain registrar. It may take a few hours for Google Public DNS to update after changes on your Mail-in-a-Box." % hostname)
+        sys.exit(1)
+    else:
+        print ("Your domain registrar or DNS host appears to be configured correctly as well. Public DNS provides the same answers.")
+        print ()
 
-		# And if that's OK, also check reverse DNS (the PTR record).
-		if not test_ptr("8.8.8.8", "Google Public DNS (Reverse DNS)"):
-			print ()
-			print ("The reverse DNS for %s is not correct. Consult your ISP for how to set the reverse DNS (also called the PTR record) for %s to %s." % (hostname, hostname, ipaddr))
-			sys.exit(1)
-		else:
-			print ("And the reverse DNS for the domain is correct.")
-			print ()
-			print ("DNS is OK.")
+        # And if that's OK, also check reverse DNS (the PTR record).
+        if not test_ptr("8.8.8.8", "Google Public DNS (Reverse DNS)"):
+            print ()
+            print ("The reverse DNS for %s is not correct. Consult your ISP for how to set the reverse DNS (also called the PTR record) for %s to %s." % (hostname, hostname, ipaddr))
+            sys.exit(1)
+        else:
+            print ("And the reverse DNS for the domain is correct.")
+            print ()
+            print ("DNS is OK.")
diff --git a/tests/test_mail.py b/tests/test_mail.py
index 686d07a5..9991852b 100755
--- a/tests/test_mail.py
+++ b/tests/test_mail.py
@@ -1,28 +1,34 @@
 #!/usr/bin/env python3
 # Tests sending and receiving mail by sending a test message to yourself.
 
-import sys, imaplib, smtplib, uuid, time
-import socket, dns.reversename, dns.resolver
+import sys
+import imaplib
+import smtplib
+import uuid
+import time
+import socket
+import dns.reversename
+import dns.resolver
 
 if len(sys.argv) < 3:
-	print("Usage: tests/mail.py hostname emailaddress password")
-	sys.exit(1)
+    print("Usage: tests/mail.py hostname emailaddress password")
+    sys.exit(1)
 
 host, emailaddress, pw = sys.argv[1:4]
 
 # Attempt to login with IMAP. Our setup uses email addresses
 # as IMAP/SMTP usernames.
 try:
-	M = imaplib.IMAP4_SSL(host)
-	M.login(emailaddress, pw)
+    M = imaplib.IMAP4_SSL(host)
+    M.login(emailaddress, pw)
 except OSError as e:
-	print("Connection error:", e)
-	sys.exit(1)
+    print("Connection error:", e)
+    sys.exit(1)
 except imaplib.IMAP4.error as e:
-	# any sort of login error
-	e = ", ".join(a.decode("utf8") for a in e.args)
-	print("IMAP error:", e)
-	sys.exit(1)
+    # any sort of login error
+    e = ", ".join(a.decode("utf8") for a in e.args)
+    print("IMAP error:", e)
+    sys.exit(1)
 
 M.select()
 print("IMAP login is OK.")
@@ -35,10 +41,10 @@ To: {emailto}
 Subject: {subject}
 
 This is a test message. It should be automatically deleted by the test script.""".format(
-	emailaddress=emailaddress,
-	emailto=emailto,
-	subject=mailsubject,
-	)
+    emailaddress=emailaddress,
+    emailto=emailto,
+    subject=mailsubject,
+    )
 
 # Connect to the server on the SMTP submission TLS port.
 server = smtplib.SMTP(host, 587)
@@ -46,20 +52,21 @@ server = smtplib.SMTP(host, 587)
 server.starttls()
 
 # Verify that the EHLO name matches the server's reverse DNS.
-ipaddr = socket.gethostbyname(host) # IPv4 only!
-reverse_ip = dns.reversename.from_address(ipaddr) # e.g. "1.0.0.127.in-addr.arpa."
+ipaddr = socket.gethostbyname(host)  # IPv4 only!
+reverse_ip = dns.reversename.from_address(ipaddr)  # e.g. "1.0.0.127.in-addr.arpa."
+
 try:
-	reverse_dns = dns.resolver.query(reverse_ip, 'PTR')[0].target.to_text(omit_final_dot=True) # => hostname
+    reverse_dns = dns.resolver.query(reverse_ip, 'PTR')[0].target.to_text(omit_final_dot=True)  # => hostname
 except dns.resolver.NXDOMAIN:
-	print("Reverse DNS lookup failed for %s. SMTP EHLO name check skipped." % ipaddr)
-	reverse_dns = None
+    print("Reverse DNS lookup failed for %s. SMTP EHLO name check skipped." % ipaddr)
+    reverse_dns = None
 if reverse_dns is not None:
-	server.ehlo_or_helo_if_needed() # must send EHLO before getting the server's EHLO name
-	helo_name = server.ehlo_resp.decode("utf8").split("\n")[0] # first line is the EHLO name
-	if helo_name != reverse_dns:
-		print("The server's EHLO name does not match its reverse hostname. Check DNS settings.")
-	else:
-		print("SMTP EHLO name (%s) is OK." % helo_name)
+    server.ehlo_or_helo_if_needed()  # must send EHLO before getting the server's EHLO name
+    helo_name = server.ehlo_resp.decode("utf8").split("\n")[0]  # first line is the EHLO name
+    if helo_name != reverse_dns:
+        print("The server's EHLO name does not match its reverse hostname. Check DNS settings.")
+    else:
+        print("SMTP EHLO name (%s) is OK." % helo_name)
 
 # Login and send a test email.
 server.login(emailaddress, pw)
@@ -68,40 +75,40 @@ server.quit()
 print("SMTP submission is OK.")
 
 while True:
-	# Wait so the message can propagate to the inbox.
-	time.sleep(10)
+    # Wait so the message can propagate to the inbox.
+    time.sleep(10)
 
-	# Read the subject lines of all of the emails in the inbox
-	# to find our test message, and then delete it.
-	found = False
-	typ, data = M.search(None, 'ALL')
-	for num in data[0].split():
-		typ, data = M.fetch(num, '(BODY[HEADER.FIELDS (SUBJECT)])')
-		imapsubjectline = data[0][1].strip().decode("utf8")
-		if imapsubjectline == "Subject: " + mailsubject:
-			# We found our test message.
-			found = True
+    # Read the subject lines of all of the emails in the inbox
+    # to find our test message, and then delete it.
+    found = False
+    typ, data = M.search(None, 'ALL')
+    for num in data[0].split():
+        typ, data = M.fetch(num, '(BODY[HEADER.FIELDS (SUBJECT)])')
+        imapsubjectline = data[0][1].strip().decode("utf8")
+        if imapsubjectline == "Subject: " + mailsubject:
+            # We found our test message.
+            found = True
 
-			# To test DKIM, download the whole mssage body. Unfortunately,
-			# pydkim doesn't actually work.
-			# You must 'sudo apt-get install python3-dkim python3-dnspython' first.
-			#typ, msgdata = M.fetch(num, '(RFC822)')
-			#msg = msgdata[0][1]
-			#if dkim.verify(msg):
-			#	print("DKIM signature on the test message is OK (verified).")
-			#else:
-			#	print("DKIM signature on the test message failed verification.")
+            # To test DKIM, download the whole mssage body. Unfortunately,
+            # pydkim doesn't actually work.
+            # You must 'sudo apt-get install python3-dkim python3-dnspython' first.
+            #typ, msgdata = M.fetch(num, '(RFC822)')
+            #msg = msgdata[0][1]
+            #if dkim.verify(msg):
+            #    print("DKIM signature on the test message is OK (verified).")
+            #else:
+            #    print("DKIM signature on the test message failed verification.")
 
-			# Delete the test message.
-			M.store(num, '+FLAGS', '\\Deleted')
-			M.expunge()
+            # Delete the test message.
+            M.store(num, '+FLAGS', '\\Deleted')
+            M.expunge()
 
-			break
+            break
 
-	if found:
-		break
+    if found:
+        break
 
-	print("Test message not present in the inbox yet...")
+    print("Test message not present in the inbox yet...")
 
 M.close()
 M.logout()
diff --git a/tests/test_smtp_server.py b/tests/test_smtp_server.py
index 914c94b2..bb3c69c1 100755
--- a/tests/test_smtp_server.py
+++ b/tests/test_smtp_server.py
@@ -1,5 +1,6 @@
 #!/usr/bin/env python3
-import smtplib, sys
+import smtplib
+import sys
 
 if len(sys.argv) < 3:
         print("Usage: tests/smtp_server.py host email.to email.from")
@@ -16,4 +17,3 @@ server = smtplib.SMTP(host, 25)
 server.set_debuglevel(1)
 server.sendmail(fromaddr, [toaddr], msg)
 server.quit()
-
diff --git a/tools/editconf.py b/tools/editconf.py
index 7bc3d190..286a144f 100755
--- a/tools/editconf.py
+++ b/tools/editconf.py
@@ -18,14 +18,15 @@
 # lines while the lines start with whitespace, e.g.:
 #
 # NAME VAL
-#   UE 
+#   UE
 
-import sys, re
+import sys
+import re
 
 # sanity check
 if len(sys.argv) < 3:
-	print("usage: python3 editconf.py /etc/file.conf [-s] [-w] [-t] NAME=VAL [NAME=VAL ...]")
-	sys.exit(1)
+    print("usage: python3 editconf.py /etc/file.conf [-s] [-w] [-t] NAME=VAL [NAME=VAL ...]")
+    sys.exit(1)
 
 # parse command line arguments
 filename = sys.argv[1]
@@ -37,22 +38,22 @@ comment_char = "#"
 folded_lines = False
 testing = False
 while settings[0][0] == "-" and settings[0] != "--":
-	opt = settings.pop(0)
-	if opt == "-s":
-		# Space is the delimiter
-		delimiter = " "
-		delimiter_re = r"\s+"
-	elif opt == "-w":
-		# Line folding is possible in this file.
-		folded_lines = True
-	elif opt == "-c":
-		# Specifies a different comment character.
-		comment_char = settings.pop(0)
-	elif opt == "-t":
-		testing = True
-	else:
-		print("Invalid option.")
-		sys.exit(1)
+    opt = settings.pop(0)
+    if opt == "-s":
+        # Space is the delimiter
+        delimiter = " "
+        delimiter_re = r"\s+"
+    elif opt == "-w":
+        # Line folding is possible in this file.
+        folded_lines = True
+    elif opt == "-c":
+        # Specifies a different comment character.
+        comment_char = settings.pop(0)
+    elif opt == "-t":
+        testing = True
+    else:
+        print("Invalid option.")
+        sys.exit(1)
 
 # create the new config file in memory
 
@@ -61,67 +62,69 @@ buf = ""
 input_lines = list(open(filename))
 
 while len(input_lines) > 0:
-	line = input_lines.pop(0)
+    line = input_lines.pop(0)
 
-	# If this configuration file uses folded lines, append any folded lines
-	# into our input buffer.
-	if folded_lines and line[0] not in (comment_char, " ", ""):
-		while len(input_lines) > 0 and input_lines[0][0] in " \t":
-			line += input_lines.pop(0)
+    # If this configuration file uses folded lines, append any folded lines
+    # into our input buffer.
+    if folded_lines and line[0] not in (comment_char, " ", ""):
+        while len(input_lines) > 0 and input_lines[0][0] in " \t":
+            line += input_lines.pop(0)
 
-	# See if this line is for any settings passed on the command line.
-	for i in range(len(settings)):
-		# Check that this line contain this setting from the command-line arguments.
-		name, val = settings[i].split("=", 1)
-		m = re.match(
-			   "(\s*)"
-			 + "(" + re.escape(comment_char) + "\s*)?"
-			 + re.escape(name) + delimiter_re + "(.*?)\s*$",
-			 line, re.S)
-		if not m: continue
-		indent, is_comment, existing_val = m.groups()
+    # See if this line is for any settings passed on the command line.
+    for i in range(len(settings)):
+        # Check that this line contain this setting from the command-line arguments.
+        name, val = settings[i].split("=", 1)
+        m = re.match(
+            "(\s*)" +
+            "(" + re.escape(comment_char) + "\s*)?" +
+            re.escape(name) + delimiter_re + "(.*?)\s*$",
+            line, re.S)
+        if not m:
+            continue
+        indent, is_comment, existing_val = m.groups()
+
+        # If this is already the setting, do nothing.
+        if is_comment is None and existing_val == val:
+            # It may be that we've already inserted this setting higher
+            # in the file so check for that first.
+            if i in found:
+                break
+            buf += line
+            found.add(i)
+            break
+
+        # comment-out the existing line (also comment any folded lines)
+        if is_comment is None:
+            buf += comment_char + line.rstrip().replace("\n", "\n" + comment_char) + "\n"
+        else:
+            # the line is already commented, pass it through
+            buf += line
+
+        # if this option oddly appears more than once, don't add the setting again
+        if i in found:
+            break
+
+        # add the new setting
+        buf += indent + name + delimiter + val + "\n"
+
+        # note that we've applied this option
+        found.add(i)
+
+        break
+    else:
+        # If did not match any setting names, pass this line through.
+        buf += line
 
-		# If this is already the setting, do nothing.
-		if is_comment is None and existing_val == val:
-			# It may be that we've already inserted this setting higher
-			# in the file so check for that first.
-			if i in found: break
-			buf += line
-			found.add(i)
-			break
-		
-		# comment-out the existing line (also comment any folded lines)
-		if is_comment is None:
-			buf += comment_char + line.rstrip().replace("\n", "\n" + comment_char) + "\n"
-		else:
-			# the line is already commented, pass it through
-			buf += line
-		
-		# if this option oddly appears more than once, don't add the setting again
-		if i in found:
-			break
-		
-		# add the new setting
-		buf += indent + name + delimiter + val + "\n"
-		
-		# note that we've applied this option
-		found.add(i)
-		
-		break
-	else:
-		# If did not match any setting names, pass this line through.
-		buf += line
-		
 # Put any settings we didn't see at the end of the file.
 for i in range(len(settings)):
-	if i not in found:
-		name, val = settings[i].split("=", 1)
-		buf += name + delimiter + val + "\n"
+    if i not in found:
+        name, val = settings[i].split("=", 1)
+        buf += name + delimiter + val + "\n"
 
 if not testing:
-	# Write out the new file.
-	with open(filename, "w") as f:
-		f.write(buf)
+    # Write out the new file.
+    with open(filename, "w") as f:
+        f.write(buf)
 else:
-	# Just print the new file to stdout.
-	print(buf)
+    # Just print the new file to stdout.
+    print(buf)
diff --git a/tools/mail.py b/tools/mail.py
index c22c0adc..7c78c227 100755
--- a/tools/mail.py
+++ b/tools/mail.py
@@ -1,124 +1,132 @@
 #!/usr/bin/python3
 
-import sys, getpass, urllib.request, urllib.error, json
+import sys
+import getpass
+import urllib.request
+import urllib.error
+import json
+
 
 def mgmt(cmd, data=None, is_json=False):
-	# The base URL for the management daemon. (Listens on IPv4 only.)
-	mgmt_uri = 'http://127.0.0.1:10222'
+    # The base URL for the management daemon. (Listens on IPv4 only.)
+    mgmt_uri = 'http://127.0.0.1:10222'
 
-	setup_key_auth(mgmt_uri)
+    setup_key_auth(mgmt_uri)
+
+    req = urllib.request.Request(mgmt_uri + cmd, urllib.parse.urlencode(data).encode("utf8") if data else None)
+    try:
+        response = urllib.request.urlopen(req)
+    except urllib.error.HTTPError as e:
+        if e.code == 401:
+            try:
+                print(e.read().decode("utf8"))
+            except:
+                pass
+            print("The management daemon refused access. The API key file may be out of sync. Try 'service mailinabox restart'.", file=sys.stderr)
+        elif hasattr(e, 'read'):
+            print(e.read().decode('utf8'), file=sys.stderr)
+        else:
+            print(e, file=sys.stderr)
+        sys.exit(1)
+    resp = response.read().decode('utf8')
+    if is_json:
+        resp = json.loads(resp)
+    return resp
 
-	req = urllib.request.Request(mgmt_uri + cmd, urllib.parse.urlencode(data).encode("utf8") if data else None)
-	try:
-		response = urllib.request.urlopen(req)
-	except urllib.error.HTTPError as e:
-		if e.code == 401:
-			try:
-				print(e.read().decode("utf8"))
-			except:
-				pass
-			print("The management daemon refused access. The API key file may be out of sync. Try 'service mailinabox restart'.", file=sys.stderr)
-		elif hasattr(e, 'read'):
-			print(e.read().decode('utf8'), file=sys.stderr)
-		else:
-			print(e, file=sys.stderr)
-		sys.exit(1)
-	resp = response.read().decode('utf8')
-	if is_json: resp = json.loads(resp)
-	return resp
 
 def read_password():
-	first  = getpass.getpass('password: ')
-	second = getpass.getpass(' (again): ')
-	while first != second:
-		print('Passwords not the same. Try again.')
-		first  = getpass.getpass('password: ')
-		second = getpass.getpass(' (again): ')
-	return first
+    first = getpass.getpass('password: ')
+    second = getpass.getpass(' (again): ')
+    while first != second:
+        print('Passwords not the same. Try again.')
+        first = getpass.getpass('password: ')
+        second = getpass.getpass(' (again): ')
+    return first
+
 
 def setup_key_auth(mgmt_uri):
-	key = open('/var/lib/mailinabox/api.key').read().strip()
+    key = open('/var/lib/mailinabox/api.key').read().strip()
 
-	auth_handler = urllib.request.HTTPBasicAuthHandler()
-	auth_handler.add_password(
-		realm='Mail-in-a-Box Management Server',
-		uri=mgmt_uri,
-		user=key,
-		passwd='')
-	opener = urllib.request.build_opener(auth_handler)
-	urllib.request.install_opener(opener)
+    auth_handler = urllib.request.HTTPBasicAuthHandler()
+    auth_handler.add_password(
+        realm='Mail-in-a-Box Management Server',
+        uri=mgmt_uri,
+        user=key,
+        passwd='')
+    opener = urllib.request.build_opener(auth_handler)
+    urllib.request.install_opener(opener)
 
 if len(sys.argv) < 2:
-	print("Usage: ")
-	print("  tools/mail.py user  (lists users)")
-	print("  tools/mail.py user add user@domain.com [password]")
-	print("  tools/mail.py user password user@domain.com [password]")
-	print("  tools/mail.py user remove user@domain.com")
-	print("  tools/mail.py user make-admin user@domain.com")
-	print("  tools/mail.py user remove-admin user@domain.com")
-	print("  tools/mail.py user admins (lists admins)")
-	print("  tools/mail.py alias  (lists aliases)")
-	print("  tools/mail.py alias add incoming.name@domain.com sent.to@other.domain.com")
-	print("  tools/mail.py alias add incoming.name@domain.com 'sent.to@other.domain.com, multiple.people@other.domain.com'")
-	print("  tools/mail.py alias remove incoming.name@domain.com")
-	print()
-	print("Removing a mail user does not delete their mail folders on disk. It only prevents IMAP/SMTP login.")
-	print()
+    print("Usage: ")
+    print("  tools/mail.py user  (lists users)")
+    print("  tools/mail.py user add user@domain.com [password]")
+    print("  tools/mail.py user password user@domain.com [password]")
+    print("  tools/mail.py user remove user@domain.com")
+    print("  tools/mail.py user make-admin user@domain.com")
+    print("  tools/mail.py user remove-admin user@domain.com")
+    print("  tools/mail.py user admins (lists admins)")
+    print("  tools/mail.py alias  (lists aliases)")
+    print("  tools/mail.py alias add incoming.name@domain.com sent.to@other.domain.com")
+    print("  tools/mail.py alias add incoming.name@domain.com 'sent.to@other.domain.com, multiple.people@other.domain.com'")
+    print("  tools/mail.py alias remove incoming.name@domain.com")
+    print()
+    print("Removing a mail user does not delete their mail folders on disk. It only prevents IMAP/SMTP login.")
+    print()
 
 elif sys.argv[1] == "user" and len(sys.argv) == 2:
-	# Dump a list of users, one per line. Mark admins with an asterisk.
-	users = mgmt("/mail/users?format=json", is_json=True)
-	for domain in users:
-		for user in domain["users"]:
-			if user['status'] == 'inactive': continue
-			print(user['email'], end='')
-			if "admin" in user['privileges']:
-				print("*", end='')
-			print()
+    # Dump a list of users, one per line. Mark admins with an asterisk.
+    users = mgmt("/mail/users?format=json", is_json=True)
+    for domain in users:
+        for user in domain["users"]:
+            if user['status'] == 'inactive':
+                continue
+            print(user['email'], end='')
+            if "admin" in user['privileges']:
+                print("*", end='')
+            print()
 
 elif sys.argv[1] == "user" and sys.argv[2] in ("add", "password"):
-	if len(sys.argv) < 5:
-		if len(sys.argv) < 4:
-			email = input("email: ")
-		else:
-			email = sys.argv[3]
-		pw = read_password()
-	else:
-		email, pw = sys.argv[3:5]
+    if len(sys.argv) < 5:
+        if len(sys.argv) < 4:
+            email = input("email: ")
+        else:
+            email = sys.argv[3]
+        pw = read_password()
+    else:
+        email, pw = sys.argv[3:5]
 
-	if sys.argv[2] == "add":
-		print(mgmt("/mail/users/add", { "email": email, "password": pw }))
-	elif sys.argv[2] == "password":
-		print(mgmt("/mail/users/password", { "email": email, "password": pw }))
+    if sys.argv[2] == "add":
+        print(mgmt("/mail/users/add", {"email": email, "password": pw}))
+    elif sys.argv[2] == "password":
+        print(mgmt("/mail/users/password", {"email": email, "password": pw}))
 
 elif sys.argv[1] == "user" and sys.argv[2] == "remove" and len(sys.argv) == 4:
-	print(mgmt("/mail/users/remove", { "email": sys.argv[3] }))
+    print(mgmt("/mail/users/remove", {"email": sys.argv[3]}))
 
 elif sys.argv[1] == "user" and sys.argv[2] in ("make-admin", "remove-admin") and len(sys.argv) == 4:
-	if sys.argv[2] == "make-admin":
-		action = "add"
-	else:
-		action = "remove"
-	print(mgmt("/mail/users/privileges/" + action, { "email": sys.argv[3], "privilege": "admin" }))
+    if sys.argv[2] == "make-admin":
+        action = "add"
+    else:
+        action = "remove"
+    print(mgmt("/mail/users/privileges/" + action, {"email": sys.argv[3], "privilege": "admin"}))
 
 elif sys.argv[1] == "user" and sys.argv[2] == "admins":
-	# Dump a list of admin users.
-	users = mgmt("/mail/users?format=json", is_json=True)
-	for domain in users:
-		for user in domain["users"]:
-			if "admin" in user['privileges']:
-				print(user['email'])
+    # Dump a list of admin users.
+    users = mgmt("/mail/users?format=json", is_json=True)
+    for domain in users:
+        for user in domain["users"]:
+            if "admin" in user['privileges']:
+                print(user['email'])
 
 elif sys.argv[1] == "alias" and len(sys.argv) == 2:
-	print(mgmt("/mail/aliases"))
+    print(mgmt("/mail/aliases"))
 
 elif sys.argv[1] == "alias" and sys.argv[2] == "add" and len(sys.argv) == 5:
-	print(mgmt("/mail/aliases/add", { "source": sys.argv[3], "destination": sys.argv[4] }))
+    print(mgmt("/mail/aliases/add", {"source": sys.argv[3], "destination": sys.argv[4]}))
 
 elif sys.argv[1] == "alias" and sys.argv[2] == "remove" and len(sys.argv) == 4:
-	print(mgmt("/mail/aliases/remove", { "source": sys.argv[3] }))
+    print(mgmt("/mail/aliases/remove", {"source": sys.argv[3]}))
 
 else:
-	print("Invalid command-line arguments.")
-	sys.exit(1)
-
+    print("Invalid command-line arguments.")
+    sys.exit(1)
diff --git a/tools/parse-nginx-log-bootstrap-accesses.py b/tools/parse-nginx-log-bootstrap-accesses.py
index 9d5663cf..758aa774 100755
--- a/tools/parse-nginx-log-bootstrap-accesses.py
+++ b/tools/parse-nginx-log-bootstrap-accesses.py
@@ -4,7 +4,11 @@
 # access log to see how many people are installing Mail-in-a-Box each day, by
 # looking at accesses to the bootstrap.sh script.
 
-import re, glob, gzip, os.path, json
+import re
+import glob
+import gzip
+import os.path
+import json
 import dateutil.parser
 
 outfn = "/home/user-data/www/mailinabox.email/install-stats.json"
@@ -15,35 +19,35 @@ accesses = set()
 
 # Scan the current and rotated access logs.
 for fn in glob.glob("/var/log/nginx/access.log*"):
-	# Gunzip if necessary.
-	if fn.endswith(".gz"):
-		f = gzip.open(fn)
-	else:
-		f = open(fn, "rb")
+    # Gunzip if necessary.
+    if fn.endswith(".gz"):
+        f = gzip.open(fn)
+    else:
+        f = open(fn, "rb")
 
-	# Loop through the lines in the access log.
-	with f:
-		for line in f:
-			# Find lines that are GETs on /bootstrap.sh by either curl or wget.
-			m = re.match(rb"(?P<ip>\S+) - - \[(?P<date>.*?)\] \"GET /bootstrap.sh HTTP/.*\" 200 \d+ .* \"(?:curl|wget)", line, re.I)
-			if m:
-				date, time = m.group("date").decode("ascii").split(":", 1)
-				date = dateutil.parser.parse(date).date().isoformat()
-				ip = m.group("ip").decode("ascii")
-				accesses.add( (date, ip) )
+    # Loop through the lines in the access log.
+    with f:
+        for line in f:
+            # Find lines that are GETs on /bootstrap.sh by either curl or wget.
+            m = re.match(rb"(?P<ip>\S+) - - \[(?P<date>.*?)\] \"GET /bootstrap.sh HTTP/.*\" 200 \d+ .* \"(?:curl|wget)", line, re.I)
+            if m:
+                date, time = m.group("date").decode("ascii").split(":", 1)
+                date = dateutil.parser.parse(date).date().isoformat()
+                ip = m.group("ip").decode("ascii")
+                accesses.add((date, ip))
 
 # Aggregate by date.
-by_date = { }
+by_date = {}
 for date, ip in accesses:
-	by_date[date] = by_date.get(date, 0) + 1
+    by_date[date] = by_date.get(date, 0) + 1
 
 # Since logs are rotated, store the statistics permanently in a JSON file.
 # Load in the stats from an existing file.
 if os.path.exists(outfn):
-	existing_data = json.load(open(outfn))
-	for date, count in existing_data:
-		if date not in by_date:
-			by_date[date] = count
+    existing_data = json.load(open(outfn))
+    for date, count in existing_data:
+        if date not in by_date:
+            by_date[date] = count
 
 # Turn into a list rather than a dict structure to make it ordered.
 by_date = sorted(by_date.items())
@@ -53,4 +57,4 @@ by_date.pop(-1)
 
 # Write out.
 with open(outfn, "w") as f:
-	json.dump(by_date, f, sort_keys=True, indent=True)
+    json.dump(by_date, f, sort_keys=True, indent=True)
diff --git a/tools/readable_bash.py b/tools/readable_bash.py
index 36dafb7f..de2597d3 100644
--- a/tools/readable_bash.py
+++ b/tools/readable_bash.py
@@ -3,12 +3,14 @@
 # Generate documentation for how this machine works by
 # parsing our bash scripts!
 
-import cgi, re
+import cgi
+import re
 import markdown
 from modgrammar import *
 
+
 def generate_documentation():
-	print("""<!DOCTYPE html>
+    print("""<!DOCTYPE html>
 <html>
     <head>
         <meta charset="utf-8">
@@ -21,93 +23,93 @@ def generate_documentation():
         <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/css/bootstrap-theme.min.css">
 
         <style>
-		    @import url(https://fonts.googleapis.com/css?family=Iceland);
-		    @import url(https://fonts.googleapis.com/css?family=Raleway:400,700);
-			@import url(https://fonts.googleapis.com/css?family=Ubuntu:300,500);
-		   	body {
-		    		font-family: Raleway, sans-serif;
-		    		font-size: 16px;
-					color: #555;
-	    	}
-	    	h2, h3 {
-	    		margin-top: .25em;
-	    		margin-bottom: .75em;
-	    	}
-	    	p {
-	    		margin-bottom: 1em;
-	    	}
-		    	.intro p {
-		    		margin: 1.5em 0;
-		    	}
-		    li {
-		    	margin-bottom: .33em;
-		    }
+            @import url(https://fonts.googleapis.com/css?family=Iceland);
+            @import url(https://fonts.googleapis.com/css?family=Raleway:400,700);
+            @import url(https://fonts.googleapis.com/css?family=Ubuntu:300,500);
+               body {
+                    font-family: Raleway, sans-serif;
+                    font-size: 16px;
+                    color: #555;
+            }
+            h2, h3 {
+                margin-top: .25em;
+                margin-bottom: .75em;
+            }
+            p {
+                margin-bottom: 1em;
+            }
+                .intro p {
+                    margin: 1.5em 0;
+                }
+            li {
+                margin-bottom: .33em;
+            }
 
-			.sourcefile {
-	    		padding-top: 1.5em;
-	    		padding-bottom: 1em;
-	    		font-size: 90%;
-	    		text-align: right;
-			}
-				.sourcefile a {
-					color: red;
-				}
+            .sourcefile {
+                padding-top: 1.5em;
+                padding-bottom: 1em;
+                font-size: 90%;
+                text-align: right;
+            }
+                .sourcefile a {
+                    color: red;
+                }
 
-	    	.instructions .row.contd {
-	    		border-top: 1px solid #E0E0E0;
-	    	}
+            .instructions .row.contd {
+                border-top: 1px solid #E0E0E0;
+            }
 
-	    	.prose {
-	    		padding-top: 1em;    	
-	    		padding-bottom: 1em;
-	    	}
-	    	.terminal {
-	    		background-color: #EEE;
-	    		padding-top: 1em;
-	    		padding-bottom: 1em;
-	    	}
+            .prose {
+                padding-top: 1em;
+                padding-bottom: 1em;
+            }
+            .terminal {
+                background-color: #EEE;
+                padding-top: 1em;
+                padding-bottom: 1em;
+            }
 
-	    	ul {
-	    		padding-left: 1.25em;
-	    	}
+            ul {
+                padding-left: 1.25em;
+            }
 
-	    	pre {
-	    		color: black;
-	    		border: 0;
-	    		background: none;
-	    		font-size: 100%;
-	    	}
+            pre {
+                color: black;
+                border: 0;
+                background: none;
+                font-size: 100%;
+            }
 
-	    	div.write-to {
-	    		margin: 0 0 1em .5em;
-	    	}
-	    	div.write-to p {
-	    		padding: .5em;
-	    		margin: 0;
-	    	}
-	    	div.write-to .filename {
-	    		padding: .25em .5em;
-	    		background-color: #666;
-	    		color: white;
-	    		font-family: monospace;
-	    		font-weight: bold;
-	    	}
-	    	div.write-to .filename span {
-	    		font-family: sans-serif;
-	    		font-weight: normal;
-	    	}
-	    	div.write-to pre {
-	    		margin: 0;
-	    		padding: .5em;
-	    		border: 1px solid #999;
-	    		border-radius: 0;
-	    		font-size: 90%;
-	    	}
+            div.write-to {
+                margin: 0 0 1em .5em;
+            }
+            div.write-to p {
+                padding: .5em;
+                margin: 0;
+            }
+            div.write-to .filename {
+                padding: .25em .5em;
+                background-color: #666;
+                color: white;
+                font-family: monospace;
+                font-weight: bold;
+            }
+            div.write-to .filename span {
+                font-family: sans-serif;
+                font-weight: normal;
+            }
+            div.write-to pre {
+                margin: 0;
+                padding: .5em;
+                border: 1px solid #999;
+                border-radius: 0;
+                font-size: 90%;
+            }
 
-	    	pre.shell > div:before {
-	    		content: "$ ";
-	    		color: #666;
-	    	}
+            pre.shell > div:before {
+                content: "$ ";
+                color: #666;
+            }
         </style>
     </head>
     <body>
@@ -123,359 +125,408 @@ def generate_documentation():
     <div class="container instructions">
  """)
 
-	parser = Source.parser()
-	for line in open("setup/start.sh"):
-		try:
-			fn = parser.parse_string(line).filename()
-		except:
-			continue
-		if fn in ("setup/start.sh", "setup/preflight.sh", "setup/questions.sh", "setup/firstuser.sh", "setup/management.sh"):
-			continue
+    parser = Source.parser()
+    for line in open("setup/start.sh"):
+        try:
+            fn = parser.parse_string(line).filename()
+        except:
+            continue
+        if fn in ("setup/start.sh", "setup/preflight.sh", "setup/questions.sh", "setup/firstuser.sh", "setup/management.sh"):
+            continue
 
-		import sys
-		print(fn, file=sys.stderr)
+        import sys
+        print(fn, file=sys.stderr)
 
-		print(BashScript.parse(fn))
+        print(BashScript.parse(fn))
 
-	print("""
+    print("""
         <script src="https://ajax.googleapis.com/ajax/libs/jquery/1.10.1/jquery.min.js"></script>
         <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/js/bootstrap.min.js"></script>
         <script>
         $(function() {
-			$('.terminal').each(function() {
-			  $(this).outerHeight( $(this).parent().innerHeight() );
-			});
+            $('.terminal').each(function() {
+              $(this).outerHeight( $(this).parent().innerHeight() );
+            });
         })
-		</script>
+        </script>
     </body>
 </html>
 """)
 
+
 class HashBang(Grammar):
-	grammar = (L('#!'), REST_OF_LINE, EOL)
-	def value(self):
-		return ""
+    grammar = (L('#!'), REST_OF_LINE, EOL)
+
+    def value(self):
+        return ""
+
 
 def strip_indent(s):
-	s = s.replace("\t", "    ")
-	lines = s.split("\n")
-	try:
-		min_indent = min(len(re.match(r"\s*", line).group(0)) for line in lines if len(line) > 0)
-	except ValueError:
-		# No non-empty lines.
-		min_indent = 0
-	lines = [line[min_indent:] for line in lines]
-	return "\n".join(lines)
+    s = s.replace("\t", "    ")
+    lines = s.split("\n")
+    try:
+        min_indent = min(len(re.match(r"\s*", line).group(0)) for line in lines if len(line) > 0)
+    except ValueError:
+        # No non-empty lines.
+        min_indent = 0
+    lines = [line[min_indent:] for line in lines]
+    return "\n".join(lines)
+
 
 class Comment(Grammar):
-	grammar = ONE_OR_MORE(ZERO_OR_MORE(SPACE), L('#'), REST_OF_LINE, EOL)
-	def value(self):
-		if self.string.replace("#", "").strip() == "":
-			return "\n"
-		lines = [x[2].string for x in self[0]]
-		content = "\n".join(lines)
-		content = strip_indent(content)
-		return markdown.markdown(content, output_format="html4") + "\n\n"
+    grammar = ONE_OR_MORE(ZERO_OR_MORE(SPACE), L('#'), REST_OF_LINE, EOL)
+
+    def value(self):
+        if self.string.replace("#", "").strip() == "":
+            return "\n"
+        lines = [x[2].string for x in self[0]]
+        content = "\n".join(lines)
+        content = strip_indent(content)
+        return markdown.markdown(content, output_format="html4") + "\n\n"
 
 FILENAME = WORD('a-z0-9-/.')
 
+
 class Source(Grammar):
-	grammar = ((L('.') | L('source')), L(' '), FILENAME, Comment | EOL)
-	def filename(self):
-		return self[2].string.strip()
-	def value(self):
-		return BashScript.parse(self.filename())
+    grammar = ((L('.') | L('source')), L(' '), FILENAME, Comment | EOL)
+
+    def filename(self):
+        return self[2].string.strip()
+
+    def value(self):
+        return BashScript.parse(self.filename())
+
 
 class CatEOF(Grammar):
-	grammar = (ZERO_OR_MORE(SPACE), L('cat '), L('>') | L('>>'), L(' '), ANY_EXCEPT(WHITESPACE), L(" <<"), OPTIONAL(SPACE), L("EOF"), EOL, REPEAT(ANY, greedy=False), EOL, L("EOF"), EOL)
-	def value(self):
-		content = self[9].string
-		content = re.sub(r"\\([$])", r"\1", content) # un-escape bash-escaped characters
-		return "<div class='write-to'><div class='filename'>%s <span>(%s)</span></div><pre>%s</pre></div>\n" \
-			% (self[4].string,
-			   "overwrite" if ">>" not in self[2].string else "append to",
-			   cgi.escape(content))
+    grammar = (ZERO_OR_MORE(SPACE), L('cat '), L('>') | L('>>'), L(' '), ANY_EXCEPT(WHITESPACE), L(" <<"), OPTIONAL(SPACE), L("EOF"), EOL, REPEAT(ANY, greedy=False), EOL, L("EOF"), EOL)
+
+    def value(self):
+        content = self[9].string
+        # un-escape bash-escaped characters
+        content = re.sub(r"\\([$])", r"\1", content)
+        return "<div class='write-to'><div class='filename'>%s <span>(%s)</span></div><pre>%s</pre></div>\n" \
+            % (self[4].string,
+               "overwrite" if ">>" not in self[2].string else "append to",
+               cgi.escape(content))
+
 
 class HideOutput(Grammar):
-	grammar = (L("hide_output "), REF("BashElement"))
-	def value(self):
-		return self[1].value()
+    grammar = (L("hide_output "), REF("BashElement"))
+
+    def value(self):
+        return self[1].value()
+
 
 class EchoLine(Grammar):
-	grammar = (OPTIONAL(SPACE), L("echo "), REST_OF_LINE, EOL)
-	def value(self):
-		if "|" in self.string  or ">" in self.string:
-			return "<pre class='shell'><div>" + recode_bash(self.string.strip()) + "</div></pre>\n"
-		return ""
+    grammar = (OPTIONAL(SPACE), L("echo "), REST_OF_LINE, EOL)
+
+    def value(self):
+        if "|" in self.string or ">" in self.string:
+            return "<pre class='shell'><div>" + recode_bash(self.string.strip()) + "</div></pre>\n"
+        return ""
+
 
 class EditConf(Grammar):
-	grammar = (
-		L('tools/editconf.py '),
-		FILENAME,
-		SPACE,
-		OPTIONAL((LIST_OF(
-			L("-w") | L("-s") | L("-c ;"),
-			sep=SPACE,
-		), SPACE)),
-		REST_OF_LINE,
-		OPTIONAL(SPACE),
-		EOL
-		)
-	def value(self):
-		conffile = self[1]
-		options = []
-		eq = "="
-		if self[3] and "-s" in self[3].string: eq = " "
-		for opt in re.split("\s+", self[4].string):
-			k, v = opt.split("=", 1)
-			v = re.sub(r"\n+", "", fixup_tokens(v)) # not sure why newlines are getting doubled
-			options.append("%s%s%s" % (k, eq, v))
-		return "<div class='write-to'><div class='filename'>" + self[1].string + " <span>(change settings)</span></div><pre>" + "\n".join(cgi.escape(s) for s in options) + "</pre></div>\n"
+    grammar = (
+        L('tools/editconf.py '),
+        FILENAME,
+        SPACE,
+        OPTIONAL((LIST_OF(
+            L("-w") | L("-s") | L("-c ;"),
+            sep=SPACE,
+        ), SPACE)),
+        REST_OF_LINE,
+        OPTIONAL(SPACE),
+        EOL
+        )
+
+    def value(self):
+        conffile = self[1]
+        options = []
+        eq = "="
+        if self[3] and "-s" in self[3].string:
+            eq = " "
+        for opt in re.split("\s+", self[4].string):
+            k, v = opt.split("=", 1)
+            # not sure why newlines are getting doubled
+            v = re.sub(r"\n+", "", fixup_tokens(v))
+            options.append("%s%s%s" % (k, eq, v))
+        return "<div class='write-to'><div class='filename'>" + self[1].string + " <span>(change settings)</span></div><pre>" + "\n".join(cgi.escape(s) for s in options) + "</pre></div>\n"
+
 
 class CaptureOutput(Grammar):
-	grammar = OPTIONAL(SPACE), WORD("A-Za-z_"), L('=$('), REST_OF_LINE, L(")"), OPTIONAL(L(';')), EOL
-	def value(self):
-		cmd = self[3].string
-		cmd = cmd.replace("; ", "\n")
-		return "<div class='write-to'><div class='filename'>$" + self[1].string + "=</div><pre>" + cgi.escape(cmd) + "</pre></div>\n"
+    grammar = OPTIONAL(SPACE), WORD("A-Za-z_"), L('=$('), REST_OF_LINE, L(")"), OPTIONAL(L(';')), EOL
+
+    def value(self):
+        cmd = self[3].string
+        cmd = cmd.replace("; ", "\n")
+        return "<div class='write-to'><div class='filename'>$" + self[1].string + "=</div><pre>" + cgi.escape(cmd) + "</pre></div>\n"
+
 
 class SedReplace(Grammar):
-	grammar = OPTIONAL(SPACE), L('sed -i "s/'), OPTIONAL(L('^')), ONE_OR_MORE(WORD("-A-Za-z0-9 #=\\{};.*$_!()")), L('/'), ONE_OR_MORE(WORD("-A-Za-z0-9 #=\\{};.*$_!()")), L('/"'), SPACE, FILENAME, EOL
-	def value(self):
-		return "<div class='write-to'><div class='filename'>edit<br>" + self[8].string + "</div><p>replace</p><pre>" + cgi.escape(self[3].string.replace(".*", ". . .")) + "</pre><p>with</p><pre>" + cgi.escape(self[5].string.replace("\\n", "\n").replace("\\t", "\t")) + "</pre></div>\n"
+    grammar = OPTIONAL(SPACE), L('sed -i "s/'), OPTIONAL(L('^')), ONE_OR_MORE(WORD("-A-Za-z0-9 #=\\{};.*$_!()")), L('/'), ONE_OR_MORE(WORD("-A-Za-z0-9 #=\\{};.*$_!()")), L('/"'), SPACE, FILENAME, EOL
+
+    def value(self):
+        return "<div class='write-to'><div class='filename'>edit<br>" + self[8].string + "</div><p>replace</p><pre>" + cgi.escape(self[3].string.replace(".*", ". . .")) + "</pre><p>with</p><pre>" + cgi.escape(self[5].string.replace("\\n", "\n").replace("\\t", "\t")) + "</pre></div>\n"
+
 
 class EchoPipe(Grammar):
-	grammar = OPTIONAL(SPACE), L("echo "), REST_OF_LINE, L(' | '), REST_OF_LINE, EOL
-	def value(self):
-		text = " ".join("\"%s\"" % s for s in self[2].string.split(" "))
-		return "<pre class='shell'><div>echo " + recode_bash(text) + " \<br> | " + recode_bash(self[4].string) + "</div></pre>\n"
+    grammar = OPTIONAL(SPACE), L("echo "), REST_OF_LINE, L(' | '), REST_OF_LINE, EOL
+
+    def value(self):
+        text = " ".join("\"%s\"" % s for s in self[2].string.split(" "))
+        return "<pre class='shell'><div>echo " + recode_bash(text) + " \<br> | " + recode_bash(self[4].string) + "</div></pre>\n"
+
 
 def shell_line(bash):
-	return "<pre class='shell'><div>" + recode_bash(bash.strip()) + "</div></pre>\n"
+    return "<pre class='shell'><div>" + recode_bash(bash.strip()) + "</div></pre>\n"
+
 
 class AptGet(Grammar):
-	grammar = (ZERO_OR_MORE(SPACE), L("apt_install "), REST_OF_LINE, EOL)
-	def value(self):
-		return shell_line("apt-get install -y " + re.sub(r"\s+", " ", self[2].string))
+    grammar = (ZERO_OR_MORE(SPACE), L("apt_install "), REST_OF_LINE, EOL)
+
+    def value(self):
+        return shell_line("apt-get install -y " + re.sub(r"\s+", " ", self[2].string))
+
+
 class UfwAllow(Grammar):
-	grammar = (ZERO_OR_MORE(SPACE), L("ufw_allow "), REST_OF_LINE, EOL)
-	def value(self):
-		return shell_line("ufw allow " + self[2].string)
+    grammar = (ZERO_OR_MORE(SPACE), L("ufw_allow "), REST_OF_LINE, EOL)
+
+    def value(self):
+        return shell_line("ufw allow " + self[2].string)
+
+
 class RestartService(Grammar):
-	grammar = (ZERO_OR_MORE(SPACE), L("restart_service "), REST_OF_LINE, EOL)
-	def value(self):
-		return shell_line("service " + self[2].string + " restart")
+    grammar = (ZERO_OR_MORE(SPACE), L("restart_service "), REST_OF_LINE, EOL)
+
+    def value(self):
+        return shell_line("service " + self[2].string + " restart")
+
 
 class OtherLine(Grammar):
-	grammar = (REST_OF_LINE, EOL)
-	def value(self):
-		if self.string.strip() == "": return ""
-		if "source setup/functions.sh" in self.string: return ""
-		if "source /etc/mailinabox.conf" in self.string: return ""
-		return "<pre class='shell'><div>" + recode_bash(self.string.strip()) + "</div></pre>\n"
+    grammar = (REST_OF_LINE, EOL)
+
+    def value(self):
+        if self.string.strip() == "":
+            return ""
+        if "source setup/functions.sh" in self.string:
+            return ""
+        if "source /etc/mailinabox.conf" in self.string:
+            return ""
+        return "<pre class='shell'><div>" + recode_bash(self.string.strip()) + "</div></pre>\n"
+
 
 class BashElement(Grammar):
-	grammar = Comment | CatEOF | EchoPipe | EchoLine | HideOutput | EditConf | SedReplace | AptGet | UfwAllow | RestartService | OtherLine
-	def value(self):
-		return self[0].value()
+    grammar = Comment | CatEOF | EchoPipe | EchoLine | HideOutput | EditConf | SedReplace | AptGet | UfwAllow | RestartService | OtherLine
+
+    def value(self):
+        return self[0].value()
 
 # Make some special characters to private use Unicode code points.
 bash_special_characters1 = {
-	"\n": "\uE000",
-	" ": "\uE001",
+    "\n": "\uE000",
+    " ": "\uE001",
 }
 bash_special_characters2 = {
-	"$": "\uE010",
+    "$": "\uE010",
 }
 bash_escapes = {
-	"n": "\uE020",
-	"t": "\uE021",
+    "n": "\uE020",
+    "t": "\uE021",
 }
 
+
 def quasitokenize(bashscript):
-	# Make a parse of bash easier by making the tokenization easy.
-	newscript = ""
-	quote_mode = None
-	escape_next = False
-	line_comment = False
-	subshell = 0
-	for c in bashscript:
-		if line_comment:
-			# We're in a comment until the end of the line.
-			newscript += c
-			if c == '\n':
-				line_comment = False
-		elif escape_next:
-			# Previous character was a \. Normally the next character
-			# comes through literally, but escaped newlines are line
-			# continuations and some escapes are for special characters
-			# which we'll recode and then turn back into escapes later.
-			if c == "\n":
-				c = " "
-			elif c in bash_escapes:
-				c = bash_escapes[c]
-			newscript += c
-			escape_next = False
-		elif c == "\\":
-			# Escaping next character.
-			escape_next = True
-		elif quote_mode is None and c in ('"', "'"):
-			# Starting a quoted word.
-			quote_mode = c
-		elif c == quote_mode:
-			# Ending a quoted word.
-			quote_mode = None
-		elif quote_mode is not None and quote_mode != "EOF" and c in bash_special_characters1:
-			# Replace special tokens within quoted words so that they
-			# don't interfere with tokenization later.
-			newscript += bash_special_characters1[c]
-		elif quote_mode is None and c == '#':
-			# Start of a line comment.
-			newscript += c
-			line_comment = True
-		elif quote_mode is None and c == ';' and subshell == 0:
-			# End of a statement.
-			newscript += "\n"
-		elif quote_mode is None and c == '(':
-			# Start of a subshell.
-			newscript += c
-			subshell += 1
-		elif quote_mode is None and c == ')':
-			# End of a subshell.
-			newscript += c
-			subshell -= 1
-		elif quote_mode is None and c == '\t':
-			# Make these just spaces.
-			if newscript[-1] != " ":
-				newscript += " "
-		elif quote_mode is None and c == ' ':
-			# Collapse consecutive spaces.
-			if newscript[-1] != " ":
-				newscript += " "
-		elif c in bash_special_characters2:
-			newscript += bash_special_characters2[c]
-		else:
-			# All other characters.
-			newscript += c
+    # Make a parse of bash easier by making the tokenization easy.
+    newscript = ""
+    quote_mode = None
+    escape_next = False
+    line_comment = False
+    subshell = 0
+    for c in bashscript:
+        if line_comment:
+            # We're in a comment until the end of the line.
+            newscript += c
+            if c == '\n':
+                line_comment = False
+        elif escape_next:
+            # Previous character was a \. Normally the next character
+            # comes through literally, but escaped newlines are line
+            # continuations and some escapes are for special characters
+            # which we'll recode and then turn back into escapes later.
+            if c == "\n":
+                c = " "
+            elif c in bash_escapes:
+                c = bash_escapes[c]
+            newscript += c
+            escape_next = False
+        elif c == "\\":
+            # Escaping next character.
+            escape_next = True
+        elif quote_mode is None and c in ('"', "'"):
+            # Starting a quoted word.
+            quote_mode = c
+        elif c == quote_mode:
+            # Ending a quoted word.
+            quote_mode = None
+        elif quote_mode is not None and quote_mode != "EOF" and c in bash_special_characters1:
+            # Replace special tokens within quoted words so that they
+            # don't interfere with tokenization later.
+            newscript += bash_special_characters1[c]
+        elif quote_mode is None and c == '#':
+            # Start of a line comment.
+            newscript += c
+            line_comment = True
+        elif quote_mode is None and c == ';' and subshell == 0:
+            # End of a statement.
+            newscript += "\n"
+        elif quote_mode is None and c == '(':
+            # Start of a subshell.
+            newscript += c
+            subshell += 1
+        elif quote_mode is None and c == ')':
+            # End of a subshell.
+            newscript += c
+            subshell -= 1
+        elif quote_mode is None and c == '\t':
+            # Make these just spaces.
+            if newscript[-1] != " ":
+                newscript += " "
+        elif quote_mode is None and c == ' ':
+            # Collapse consecutive spaces.
+            if newscript[-1] != " ":
+                newscript += " "
+        elif c in bash_special_characters2:
+            newscript += bash_special_characters2[c]
+        else:
+            # All other characters.
+            newscript += c
 
-		# "<< EOF" escaping.
-		if quote_mode is None and re.search("<<\s*EOF\n$", newscript):
-			quote_mode = "EOF"
-		elif quote_mode == "EOF" and re.search("\nEOF\n$", newscript):
-			quote_mode = None
+        # "<< EOF" escaping.
+        if quote_mode is None and re.search("<<\s*EOF\n$", newscript):
+            quote_mode = "EOF"
+        elif quote_mode == "EOF" and re.search("\nEOF\n$", newscript):
+            quote_mode = None
+
+    return newscript
 
-	return newscript
 
 def recode_bash(s):
-	def requote(tok):
-		tok = tok.replace("\\", "\\\\")
-		for c in bash_special_characters2:
-			tok = tok.replace(c, "\\" + c)
-		tok = fixup_tokens(tok)
-		if " " in tok or '"' in tok:
-			tok = tok.replace("\"", "\\\"")
-			tok = '"' + tok +'"'
-		else:
-			tok = tok.replace("'", "\\'")
-		return tok
-	return cgi.escape(" ".join(requote(tok) for tok in s.split(" ")))
+    def requote(tok):
+        tok = tok.replace("\\", "\\\\")
+        for c in bash_special_characters2:
+            tok = tok.replace(c, "\\" + c)
+        tok = fixup_tokens(tok)
+        if " " in tok or '"' in tok:
+            tok = tok.replace("\"", "\\\"")
+            tok = '"' + tok + '"'
+        else:
+            tok = tok.replace("'", "\\'")
+        return tok
+    return cgi.escape(" ".join(requote(tok) for tok in s.split(" ")))
+
 
 def fixup_tokens(s):
-	for c, enc in bash_special_characters1.items():
-		s = s.replace(enc, c)
-	for c, enc in bash_special_characters2.items():
-		s = s.replace(enc, c)
-	for esc, c in bash_escapes.items():
-		s = s.replace(c, "\\" + esc)
-	return s
+    for c, enc in bash_special_characters1.items():
+        s = s.replace(enc, c)
+    for c, enc in bash_special_characters2.items():
+        s = s.replace(enc, c)
+    for esc, c in bash_escapes.items():
+        s = s.replace(c, "\\" + esc)
+    return s
+
 
 class BashScript(Grammar):
-	grammar = (OPTIONAL(HashBang), REPEAT(BashElement))
-	def value(self):
-		return [line.value() for line in self[1]]
+    grammar = (OPTIONAL(HashBang), REPEAT(BashElement))
 
-	@staticmethod
-	def parse(fn):
-		if fn in ("setup/functions.sh", "/etc/mailinabox.conf"): return ""
-		string = open(fn).read()
+    def value(self):
+        return [line.value() for line in self[1]]
 
-		# tokenize
-		string = re.sub(".* #NODOC\n", "", string)
-		string = re.sub("\n\s*if .*then.*|\n\s*fi|\n\s*else|\n\s*elif .*", "", string)
-		string = quasitokenize(string)
-		string = re.sub("hide_output ", "", string)
+    @staticmethod
+    def parse(fn):
+        if fn in ("setup/functions.sh", "/etc/mailinabox.conf"):
+            return ""
+        string = open(fn).read()
 
-		parser = BashScript.parser()
-		result = parser.parse_string(string)
+        # tokenize
+        string = re.sub(".* #NODOC\n", "", string)
+        string = re.sub("\n\s*if .*then.*|\n\s*fi|\n\s*else|\n\s*elif .*", "", string)
+        string = quasitokenize(string)
+        string = re.sub("hide_output ", "", string)
 
-		v = "<div class='row'><div class='col-xs-12 sourcefile'>view the bash source for the following section at <a href=\"%s\">%s</a></div></div>\n" \
-			 % ("https://github.com/mail-in-a-box/mailinabox/tree/master/" + fn, fn)
+        parser = BashScript.parser()
+        result = parser.parse_string(string)
 
-		mode = 0
-		for item in result.value():
-			if item.strip() == "":
-				pass
-			elif item.startswith("<p") and not item.startswith("<pre"):
-				clz = ""
-				if mode == 2:
-					v += "</div>\n" # col
-					v += "</div>\n" # row
-					mode = 0
-					clz = "contd"
-				if mode == 0:
-					v += "<div class='row %s'>\n" % clz
-					v += "<div class='col-md-6 prose'>\n"
-				v += item
-				mode = 1
-			elif item.startswith("<h"):
-				if mode != 0:
-					v += "</div>\n" # col
-					v += "</div>\n" # row
-				v += "<div class='row'>\n"
-				v += "<div class='col-md-6 header'>\n"
-				v += item
-				v += "</div>\n" # col
-				v += "<div class='col-md-6 terminal'> </div>\n"
-				v += "</div>\n" # row
-				mode = 0
-			else:
-				if mode == 0:
-					v += "<div class='row'>\n"
-					v += "<div class='col-md-offset-6 col-md-6 terminal'>\n"
-				elif mode == 1:
-					v += "</div>\n"
-					v += "<div class='col-md-6 terminal'>\n"
-				mode = 2
-				v += item
+        v = "<div class='row'><div class='col-xs-12 sourcefile'>view the bash source for the following section at <a href=\"%s\">%s</a></div></div>\n" \
+            % ("https://github.com/mail-in-a-box/mailinabox/tree/master/" + fn, fn)
 
-		v += "</div>\n" # col
-		v += "</div>\n" # row
+        mode = 0
+        for item in result.value():
+            if item.strip() == "":
+                pass
+            elif item.startswith("<p") and not item.startswith("<pre"):
+                clz = ""
+                if mode == 2:
+                    v += "</div>\n"  # col
+                    v += "</div>\n"  # row
+                    mode = 0
+                    clz = "contd"
+                if mode == 0:
+                    v += "<div class='row %s'>\n" % clz
+                    v += "<div class='col-md-6 prose'>\n"
+                v += item
+                mode = 1
+            elif item.startswith("<h"):
+                if mode != 0:
+                    v += "</div>\n"  # col
+                    v += "</div>\n"  # row
+                v += "<div class='row'>\n"
+                v += "<div class='col-md-6 header'>\n"
+                v += item
+                v += "</div>\n"  # col
+                v += "<div class='col-md-6 terminal'> </div>\n"
+                v += "</div>\n"  # row
+                mode = 0
+            else:
+                if mode == 0:
+                    v += "<div class='row'>\n"
+                    v += "<div class='col-md-offset-6 col-md-6 terminal'>\n"
+                elif mode == 1:
+                    v += "</div>\n"
+                    v += "<div class='col-md-6 terminal'>\n"
+                mode = 2
+                v += item
 
-		v = fixup_tokens(v)
+        v += "</div>\n"  # col
+        v += "</div>\n"  # row
 
-		v = v.replace("</pre>\n<pre class='shell'>", "")
-		v = re.sub("<pre>([\w\W]*?)</pre>", lambda m : "<pre>" + strip_indent(m.group(1)) + "</pre>", v)
+        v = fixup_tokens(v)
 
-		v = re.sub(r"(\$?)PRIMARY_HOSTNAME", r"<b>box.yourdomain.com</b>", v)
-		v = re.sub(r"\$STORAGE_ROOT", r"<b>$STORE</b>", v)
-		v = re.sub(r"\$CSR_COUNTRY", r"<b>US</b>", v)
-		v = v.replace("`pwd`",  "<code><b>/path/to/mailinabox</b></code>")
+        v = v.replace("</pre>\n<pre class='shell'>", "")
+        v = re.sub("<pre>([\w\W]*?)</pre>", lambda m: "<pre>" + strip_indent(m.group(1)) + "</pre>", v)
+
+        v = re.sub(r"(\$?)PRIMARY_HOSTNAME", r"<b>box.yourdomain.com</b>", v)
+        v = re.sub(r"\$STORAGE_ROOT", r"<b>$STORE</b>", v)
+        v = re.sub(r"\$CSR_COUNTRY", r"<b>US</b>", v)
+        v = v.replace("`pwd`",  "<code><b>/path/to/mailinabox</b></code>")
+
+        return v
 
-		return v
 
 def wrap_lines(text, cols=60):
-	ret = ""
-	words = re.split("(\s+)", text)
-	linelen = 0
-	for w in words:
-		if linelen + len(w) > cols-1:
-			ret += " \\\n"
-			ret += "   "
-			linelen = 0
-		if linelen == 0 and w.strip() == "": continue
-		ret += w
-		linelen += len(w)
-	return ret
+    ret = ""
+    words = re.split("(\s+)", text)
+    linelen = 0
+    for w in words:
+        if linelen + len(w) > cols-1:
+            ret += " \\\n"
+            ret += "   "
+            linelen = 0
+        if linelen == 0 and w.strip() == "":
+            continue
+        ret += w
+        linelen += len(w)
+    return ret
 
 if __name__ == '__main__':
-	generate_documentation()
+    generate_documentation()