diff --git a/management/auth.py b/management/auth.py index c576d01c..873047d5 100644 --- a/management/auth.py +++ b/management/auth.py @@ -1,4 +1,4 @@ -import base64, os, os.path, hmac, json, secrets +import base64, hmac, json, secrets from datetime import timedelta from expiringdict import ExpiringDict @@ -22,7 +22,7 @@ class AuthService: def init_system_api_key(self): """Write an API key to a local file so local processes can use the API""" - with open(self.key_path, 'r') as file: + with open(self.key_path, encoding='utf-8') as file: self.key = file.read() def authenticate(self, request, env, login_only=False, logout=False): @@ -48,11 +48,13 @@ class AuthService: return username, password username, password = parse_http_authorization_basic(request.headers.get('Authorization', '')) - if username in (None, ""): - raise ValueError("Authorization header invalid.") + if username in {None, ""}: + msg = "Authorization header invalid." + raise ValueError(msg) if username.strip() == "" and password.strip() == "": - raise ValueError("No email address, password, session key, or API key provided.") + msg = "No email address, password, session key, or API key provided." + raise ValueError(msg) # If user passed the system API key, grant administrative privs. This key # is not associated with a user. @@ -72,7 +74,8 @@ class AuthService: # If no password was given, but a username was given, we're missing some information. elif password.strip() == "": - raise ValueError("Enter a password.") + msg = "Enter a password." + raise ValueError(msg) else: # The user is trying to log in with a username and a password @@ -114,7 +117,8 @@ class AuthService: ]) except: # Login failed. - raise ValueError("Incorrect email address or password.") + msg = "Incorrect email address or password." + raise ValueError(msg) # If MFA is enabled, check that MFA passes. status, hints = validate_auth_mfa(email, request, env) diff --git a/management/backup.py b/management/backup.py index c2ef7676..f352fb41 100755 --- a/management/backup.py +++ b/management/backup.py @@ -7,7 +7,7 @@ # 4) The stopped services are restarted. # 5) STORAGE_ROOT/backup/after-backup is executed if it exists. -import os, os.path, shutil, glob, re, datetime, sys +import os, os.path, re, datetime, sys import dateutil.parser, dateutil.relativedelta, dateutil.tz import rtyaml from exclusiveprocess import Lock @@ -59,7 +59,7 @@ def backup_status(env): "--archive-dir", backup_cache_dir, "--gpg-options", "'--cipher-algo=AES256'", "--log-fd", "1", - ] + get_duplicity_additional_args(env) + [ + *get_duplicity_additional_args(env), get_duplicity_target_url(config) ], get_duplicity_env_vars(env), @@ -69,7 +69,7 @@ def backup_status(env): # destination for the backups or the last backup job terminated unexpectedly. raise Exception("Something is wrong with the backup: " + collection_status) for line in collection_status.split('\n'): - if line.startswith(" full") or line.startswith(" inc"): + if line.startswith((" full", " inc")): backup = parse_line(line) backups[backup["date"]] = backup @@ -185,7 +185,7 @@ def get_passphrase(env): # only needs to be 43 base64-characters to match AES256's key # length of 32 bytes. backup_root = os.path.join(env["STORAGE_ROOT"], 'backup') - with open(os.path.join(backup_root, 'secret_key.txt')) as f: + with open(os.path.join(backup_root, 'secret_key.txt'), encoding="utf-8") as f: passphrase = f.readline().strip() if len(passphrase) < 43: raise Exception("secret_key.txt's first line is too short!") @@ -226,7 +226,7 @@ def get_duplicity_additional_args(env): port = 22 if port is None: port = 22 - + return [ f"--ssh-options='-i /root/.ssh/id_rsa_miab -p {port}'", f"--rsync-options='-e \"/usr/bin/ssh -oStrictHostKeyChecking=no -oBatchMode=yes -p {port} -i /root/.ssh/id_rsa_miab\"'", @@ -257,8 +257,7 @@ def get_duplicity_env_vars(env): return env def get_target_type(config): - protocol = config["target"].split(":")[0] - return protocol + return config["target"].split(":")[0] def perform_backup(full_backup): env = load_environment() @@ -323,8 +322,8 @@ def perform_backup(full_backup): "--exclude", backup_root, "--volsize", "250", "--gpg-options", "'--cipher-algo=AES256'", - "--allow-source-mismatch" - ] + get_duplicity_additional_args(env) + [ + "--allow-source-mismatch", + *get_duplicity_additional_args(env), env["STORAGE_ROOT"], get_duplicity_target_url(config), ], @@ -345,7 +344,7 @@ def perform_backup(full_backup): "--verbosity", "error", "--archive-dir", backup_cache_dir, "--force", - ] + get_duplicity_additional_args(env) + [ + *get_duplicity_additional_args(env), get_duplicity_target_url(config) ], get_duplicity_env_vars(env)) @@ -361,7 +360,7 @@ def perform_backup(full_backup): "--verbosity", "error", "--archive-dir", backup_cache_dir, "--force", - ] + get_duplicity_additional_args(env) + [ + *get_duplicity_additional_args(env), get_duplicity_target_url(config) ], get_duplicity_env_vars(env)) @@ -400,7 +399,7 @@ def run_duplicity_verification(): "--compare-data", "--archive-dir", backup_cache_dir, "--exclude", backup_root, - ] + get_duplicity_additional_args(env) + [ + *get_duplicity_additional_args(env), get_duplicity_target_url(config), env["STORAGE_ROOT"], ], get_duplicity_env_vars(env)) @@ -413,9 +412,9 @@ def run_duplicity_restore(args): "/usr/bin/duplicity", "restore", "--archive-dir", backup_cache_dir, - ] + get_duplicity_additional_args(env) + [ - get_duplicity_target_url(config) - ] + args, + *get_duplicity_additional_args(env), + get_duplicity_target_url(config), + *args], get_duplicity_env_vars(env)) def print_duplicity_command(): @@ -427,7 +426,7 @@ def print_duplicity_command(): print(f"export {k}={shlex.quote(v)}") print("duplicity", "{command}", shlex.join([ "--archive-dir", backup_cache_dir, - ] + get_duplicity_additional_args(env) + [ + *get_duplicity_additional_args(env), get_duplicity_target_url(config) ])) @@ -483,21 +482,22 @@ def list_target_files(config): if 'Permission denied (publickey).' in listing: reason = "Invalid user or check you correctly copied the SSH key." elif 'No such file or directory' in listing: - reason = "Provided path {} is invalid.".format(target_path) + reason = f"Provided path {target_path} is invalid." elif 'Network is unreachable' in listing: - reason = "The IP address {} is unreachable.".format(target.hostname) + reason = f"The IP address {target.hostname} is unreachable." elif 'Could not resolve hostname' in listing: - reason = "The hostname {} cannot be resolved.".format(target.hostname) + reason = f"The hostname {target.hostname} cannot be resolved." else: - reason = "Unknown error." \ - "Please check running 'management/backup.py --verify'" \ - "from mailinabox sources to debug the issue." - raise ValueError("Connection to rsync host failed: {}".format(reason)) + reason = ("Unknown error." + "Please check running 'management/backup.py --verify'" + "from mailinabox sources to debug the issue.") + msg = f"Connection to rsync host failed: {reason}" + raise ValueError(msg) elif target.scheme == "s3": import boto3.s3 from botocore.exceptions import ClientError - + # separate bucket from path in target bucket = target.path[1:].split('/')[0] path = '/'.join(target.path[1:].split('/')[1:]) + '/' @@ -507,7 +507,8 @@ def list_target_files(config): path = '' if bucket == "": - raise ValueError("Enter an S3 bucket name.") + msg = "Enter an S3 bucket name." + raise ValueError(msg) # connect to the region & bucket try: @@ -525,7 +526,7 @@ def list_target_files(config): from b2sdk.v1.exception import NonExistentBucket info = InMemoryAccountInfo() b2_api = B2Api(info) - + # Extract information from target b2_application_keyid = target.netloc[:target.netloc.index(':')] b2_application_key = urllib.parse.unquote(target.netloc[target.netloc.index(':')+1:target.netloc.index('@')]) @@ -534,8 +535,9 @@ def list_target_files(config): try: b2_api.authorize_account("production", b2_application_keyid, b2_application_key) bucket = b2_api.get_bucket_by_name(b2_bucket) - except NonExistentBucket as e: - raise ValueError("B2 Bucket does not exist. Please double check your information!") + except NonExistentBucket: + msg = "B2 Bucket does not exist. Please double check your information!" + raise ValueError(msg) return [(key.file_name, key.size) for key, _ in bucket.ls()] else: @@ -556,7 +558,7 @@ def backup_set_custom(env, target, target_user, target_pass, min_age): # Validate. try: - if config["target"] not in ("off", "local"): + if config["target"] not in {"off", "local"}: # these aren't supported by the following function, which expects a full url in the target key, # which is what is there except when loading the config prior to saving list_target_files(config) @@ -578,9 +580,9 @@ def get_backup_config(env, for_save=False, for_ui=False): # Merge in anything written to custom.yaml. try: - with open(os.path.join(backup_root, 'custom.yaml'), 'r') as f: + with open(os.path.join(backup_root, 'custom.yaml'), encoding="utf-8") as f: custom_config = rtyaml.load(f) - if not isinstance(custom_config, dict): raise ValueError() # caught below + if not isinstance(custom_config, dict): raise ValueError # caught below config.update(custom_config) except: pass @@ -604,18 +606,17 @@ def get_backup_config(env, for_save=False, for_ui=False): config["target"] = "file://" + config["file_target_directory"] ssh_pub_key = os.path.join('/root', '.ssh', 'id_rsa_miab.pub') if os.path.exists(ssh_pub_key): - with open(ssh_pub_key, 'r') as f: + with open(ssh_pub_key, encoding="utf-8") as f: config["ssh_pub_key"] = f.read() return config def write_backup_config(env, newconfig): backup_root = os.path.join(env["STORAGE_ROOT"], 'backup') - with open(os.path.join(backup_root, 'custom.yaml'), "w") as f: + with open(os.path.join(backup_root, 'custom.yaml'), "w", encoding="utf-8") as f: f.write(rtyaml.dump(newconfig)) if __name__ == "__main__": - import sys if sys.argv[-1] == "--verify": # Run duplicity's verification command to check a) the backup files # are readable, and b) report if they are up to date. @@ -624,7 +625,7 @@ if __name__ == "__main__": elif sys.argv[-1] == "--list": # List the saved backup files. for fn, size in list_target_files(get_backup_config(load_environment())): - print("{}\t{}".format(fn, size)) + print(f"{fn}\t{size}") elif sys.argv[-1] == "--status": # Show backup status. diff --git a/management/cli.py b/management/cli.py index b32089c6..70dbe894 100755 --- a/management/cli.py +++ b/management/cli.py @@ -6,7 +6,8 @@ # root API key. This file is readable only by root, so this # tool can only be used as root. -import sys, getpass, urllib.request, urllib.error, json, re, csv +import sys, getpass, urllib.request, urllib.error, json, csv +import contextlib def mgmt(cmd, data=None, is_json=False): # The base URL for the management daemon. (Listens on IPv4 only.) @@ -19,10 +20,8 @@ def mgmt(cmd, data=None, is_json=False): response = urllib.request.urlopen(req) except urllib.error.HTTPError as e: if e.code == 401: - try: + with contextlib.suppress(Exception): print(e.read().decode("utf8")) - except: - pass print("The management daemon refused access. The API key file may be out of sync. Try 'service mailinabox restart'.", file=sys.stderr) elif hasattr(e, 'read'): print(e.read().decode('utf8'), file=sys.stderr) @@ -47,7 +46,7 @@ def read_password(): return first def setup_key_auth(mgmt_uri): - with open('/var/lib/mailinabox/api.key', 'r') as f: + with open('/var/lib/mailinabox/api.key', encoding='utf-8') as f: key = f.read().strip() auth_handler = urllib.request.HTTPBasicAuthHandler() @@ -91,12 +90,9 @@ elif sys.argv[1] == "user" and len(sys.argv) == 2: print("*", end='') print() -elif sys.argv[1] == "user" and sys.argv[2] in ("add", "password"): +elif sys.argv[1] == "user" and sys.argv[2] in {"add", "password"}: if len(sys.argv) < 5: - if len(sys.argv) < 4: - email = input("email: ") - else: - email = sys.argv[3] + email = input('email: ') if len(sys.argv) < 4 else sys.argv[3] pw = read_password() else: email, pw = sys.argv[3:5] @@ -109,11 +105,8 @@ elif sys.argv[1] == "user" and sys.argv[2] in ("add", "password"): elif sys.argv[1] == "user" and sys.argv[2] == "remove" and len(sys.argv) == 4: print(mgmt("/mail/users/remove", { "email": sys.argv[3] })) -elif sys.argv[1] == "user" and sys.argv[2] in ("make-admin", "remove-admin") and len(sys.argv) == 4: - if sys.argv[2] == "make-admin": - action = "add" - else: - action = "remove" +elif sys.argv[1] == "user" and sys.argv[2] in {"make-admin", "remove-admin"} and len(sys.argv) == 4: + action = 'add' if sys.argv[2] == 'make-admin' else 'remove' print(mgmt("/mail/users/privileges/" + action, { "email": sys.argv[3], "privilege": "admin" })) elif sys.argv[1] == "user" and sys.argv[2] == "admins": @@ -132,7 +125,7 @@ elif sys.argv[1] == "user" and len(sys.argv) == 5 and sys.argv[2:4] == ["mfa", " for mfa in status["enabled_mfa"]: W.writerow([mfa["id"], mfa["type"], mfa["label"]]) -elif sys.argv[1] == "user" and len(sys.argv) in (5, 6) and sys.argv[2:4] == ["mfa", "disable"]: +elif sys.argv[1] == "user" and len(sys.argv) in {5, 6} and sys.argv[2:4] == ["mfa", "disable"]: # Disable MFA (all or a particular device) for a user. print(mgmt("/mfa/disable", { "user": sys.argv[4], "mfa-id": sys.argv[5] if len(sys.argv) == 6 else None })) diff --git a/management/daemon.py b/management/daemon.py index 47548531..3aa6eed2 100755 --- a/management/daemon.py +++ b/management/daemon.py @@ -11,17 +11,18 @@ # service mailinabox start # when done debugging, start it up again import os, os.path, re, json, time -import multiprocessing.pool, subprocess +import multiprocessing.pool from functools import wraps -from flask import Flask, request, render_template, abort, Response, send_from_directory, make_response +from flask import Flask, request, render_template, Response, send_from_directory, make_response import auth, utils from mailconfig import get_mail_users, get_mail_users_ex, get_admins, add_mail_user, set_mail_password, remove_mail_user from mailconfig import get_mail_user_privileges, add_remove_mail_user_privilege from mailconfig import get_mail_aliases, get_mail_aliases_ex, get_mail_domains, add_mail_alias, remove_mail_alias from mfa import get_public_mfa_state, provision_totp, validate_totp_secret, enable_mfa, disable_mfa +import contextlib env = utils.load_environment() @@ -29,14 +30,12 @@ auth_service = auth.AuthService() # We may deploy via a symbolic link, which confuses flask's template finding. me = __file__ -try: +with contextlib.suppress(OSError): me = os.readlink(__file__) -except OSError: - pass # for generating CSRs we need a list of country codes csr_country_codes = [] -with open(os.path.join(os.path.dirname(me), "csr_country_codes.tsv")) as f: +with open(os.path.join(os.path.dirname(me), "csr_country_codes.tsv"), encoding="utf-8") as f: for line in f: if line.strip() == "" or line.startswith("#"): continue code, name = line.strip().split("\t")[0:2] @@ -80,7 +79,7 @@ def authorized_personnel_only(viewfunc): # Not authorized. Return a 401 (send auth) and a prompt to authorize by default. status = 401 headers = { - 'WWW-Authenticate': 'Basic realm="{0}"'.format(auth_service.auth_realm), + 'WWW-Authenticate': f'Basic realm="{auth_service.auth_realm}"', 'X-Reason': error, } @@ -90,7 +89,7 @@ def authorized_personnel_only(viewfunc): status = 403 headers = None - if request.headers.get('Accept') in (None, "", "*/*"): + if request.headers.get('Accept') in {None, "", "*/*"}: # Return plain text output. return Response(error+"\n", status=status, mimetype='text/plain', headers=headers) else: @@ -164,7 +163,7 @@ def login(): "api_key": auth_service.create_session_key(email, env, type='login'), } - app.logger.info("New login session created for {}".format(email)) + app.logger.info(f"New login session created for {email}") # Return. return json_response(resp) @@ -173,8 +172,8 @@ def login(): def logout(): try: email, _ = auth_service.authenticate(request, env, logout=True) - app.logger.info("{} logged out".format(email)) - except ValueError as e: + app.logger.info(f"{email} logged out") + except ValueError: pass finally: return json_response({ "status": "ok" }) @@ -355,9 +354,9 @@ def dns_set_record(qname, rtype="A"): # Get the existing records matching the qname and rtype. return dns_get_records(qname, rtype) - elif request.method in ("POST", "PUT"): + elif request.method in {"POST", "PUT"}: # There is a default value for A/AAAA records. - if rtype in ("A", "AAAA") and value == "": + if rtype in {"A", "AAAA"} and value == "": value = request.environ.get("HTTP_X_FORWARDED_FOR") # normally REMOTE_ADDR but we're behind nginx as a reverse proxy # Cannot add empty records. @@ -419,7 +418,7 @@ def ssl_get_status(): { "domain": d["domain"], "status": d["ssl_certificate"][0], - "text": d["ssl_certificate"][1] + ((" " + cant_provision[d["domain"]] if d["domain"] in cant_provision else "")) + "text": d["ssl_certificate"][1] + (" " + cant_provision[d["domain"]] if d["domain"] in cant_provision else "") } for d in domains_status ] # Warn the user about domain names not hosted here because of other settings. @@ -491,7 +490,7 @@ def totp_post_enable(): secret = request.form.get('secret') token = request.form.get('token') label = request.form.get('label') - if type(token) != str: + if not isinstance(token, str): return ("Bad Input", 400) try: validate_totp_secret(secret) @@ -580,8 +579,7 @@ def system_status(): def show_updates(): from status_checks import list_apt_updates return "".join( - "%s (%s)\n" - % (p["package"], p["version"]) + "{} ({})\n".format(p["package"], p["version"]) for p in list_apt_updates()) @app.route('/system/update-packages', methods=["POST"]) @@ -751,14 +749,11 @@ def log_failed_login(request): # During setup we call the management interface directly to determine the user # status. So we can't always use X-Forwarded-For because during setup that header # will not be present. - if request.headers.getlist("X-Forwarded-For"): - ip = request.headers.getlist("X-Forwarded-For")[0] - else: - ip = request.remote_addr + ip = request.headers.getlist("X-Forwarded-For")[0] if request.headers.getlist("X-Forwarded-For") else request.remote_addr # We need to add a timestamp to the log message, otherwise /dev/log will eat the "duplicate" # message. - app.logger.warning( "Mail-in-a-Box Management Daemon: Failed login attempt from ip %s - timestamp %s" % (ip, time.time())) + app.logger.warning( f"Mail-in-a-Box Management Daemon: Failed login attempt from ip {ip} - timestamp {time.time()}") # APP diff --git a/management/dns_update.py b/management/dns_update.py index 9a768ea8..597df243 100755 --- a/management/dns_update.py +++ b/management/dns_update.py @@ -4,19 +4,20 @@ # and mail aliases and restarts nsd. ######################################################################## -import sys, os, os.path, urllib.parse, datetime, re, hashlib, base64 +import sys, os, os.path, datetime, re, hashlib, base64 import ipaddress import rtyaml import dns.resolver from utils import shell, load_env_vars_from_file, safe_domain_name, sort_domains from ssl_certificates import get_ssl_certificates, check_certificate +import contextlib # From https://stackoverflow.com/questions/3026957/how-to-validate-a-domain-name-using-regex-php/16491074#16491074 # This regular expression matches domain names according to RFCs, it also accepts fqdn with an leading dot, # underscores, as well as asteriks which are allowed in domain names but not hostnames (i.e. allowed in # DNS but not in URLs), which are common in certain record types like for DKIM. -DOMAIN_RE = "^(?!\-)(?:[*][.])?(?:[a-zA-Z\d\-_]{0,62}[a-zA-Z\d_]\.){1,126}(?!\d+)[a-zA-Z\d_]{1,63}(\.?)$" +DOMAIN_RE = r"^(?!\-)(?:[*][.])?(?:[a-zA-Z\d\-_]{0,62}[a-zA-Z\d_]\.){1,126}(?!\d+)[a-zA-Z\d_]{1,63}(\.?)$" def get_dns_domains(env): # Add all domain names in use by email users and mail aliases, any @@ -38,7 +39,7 @@ def get_dns_zones(env): # Exclude domains that are subdomains of other domains we know. Proceed # by looking at shorter domains first. zone_domains = set() - for domain in sorted(domains, key=lambda d : len(d)): + for domain in sorted(domains, key=len): for d in zone_domains: if domain.endswith("." + d): # We found a parent domain already in the list. @@ -48,9 +49,7 @@ def get_dns_zones(env): zone_domains.add(domain) # Make a nice and safe filename for each domain. - zonefiles = [] - for domain in zone_domains: - zonefiles.append([domain, safe_domain_name(domain) + ".txt"]) + zonefiles = [[domain, safe_domain_name(domain) + ".txt"] for domain in zone_domains] # Sort the list so that the order is nice and so that nsd.conf has a # stable order so we don't rewrite the file & restart the service @@ -194,8 +193,7 @@ def build_zone(domain, domain_properties, additional_records, env, is_zone=True) # User may provide one or more additional nameservers secondary_ns_list = get_secondary_dns(additional_records, mode="NS") \ or ["ns2." + env["PRIMARY_HOSTNAME"]] - for secondary_ns in secondary_ns_list: - records.append((None, "NS", secondary_ns+'.', False)) + records.extend((None, "NS", secondary_ns+'.', False) for secondary_ns in secondary_ns_list) # In PRIMARY_HOSTNAME... @@ -212,8 +210,7 @@ def build_zone(domain, domain_properties, additional_records, env, is_zone=True) records.append(("_443._tcp", "TLSA", build_tlsa_record(env), "Optional. When DNSSEC is enabled, provides out-of-band HTTPS certificate validation for a few web clients that support it.")) # Add a SSHFP records to help SSH key validation. One per available SSH key on this system. - for value in build_sshfp_records(): - records.append((None, "SSHFP", value, "Optional. Provides an out-of-band method for verifying an SSH key before connecting. Use 'VerifyHostKeyDNS yes' (or 'VerifyHostKeyDNS ask') when connecting with ssh.")) + records.extend((None, "SSHFP", value, "Optional. Provides an out-of-band method for verifying an SSH key before connecting. Use 'VerifyHostKeyDNS yes' (or 'VerifyHostKeyDNS ask') when connecting with ssh.") for value in build_sshfp_records()) # Add DNS records for any subdomains of this domain. We should not have a zone for # both a domain and one of its subdomains. @@ -223,7 +220,7 @@ def build_zone(domain, domain_properties, additional_records, env, is_zone=True) subdomain_qname = subdomain[0:-len("." + domain)] subzone = build_zone(subdomain, domain_properties, additional_records, env, is_zone=False) for child_qname, child_rtype, child_value, child_explanation in subzone: - if child_qname == None: + if child_qname is None: child_qname = subdomain_qname else: child_qname += "." + subdomain_qname @@ -231,10 +228,7 @@ def build_zone(domain, domain_properties, additional_records, env, is_zone=True) has_rec_base = list(records) # clone current state def has_rec(qname, rtype, prefix=None): - for rec in has_rec_base: - if rec[0] == qname and rec[1] == rtype and (prefix is None or rec[2].startswith(prefix)): - return True - return False + return any(rec[0] == qname and rec[1] == rtype and (prefix is None or rec[2].startswith(prefix)) for rec in has_rec_base) # The user may set other records that don't conflict with our settings. # Don't put any TXT records above this line, or it'll prevent any custom TXT records. @@ -262,7 +256,7 @@ def build_zone(domain, domain_properties, additional_records, env, is_zone=True) has_rec_base = list(records) a_expl = "Required. May have a different value. Sets the IP address that %s resolves to for web hosting and other services besides mail. The A record must be present but its value does not affect mail delivery." % domain if domain_properties[domain]["auto"]: - if domain.startswith("ns1.") or domain.startswith("ns2."): a_expl = False # omit from 'External DNS' page since this only applies if box is its own DNS server + if domain.startswith(("ns1.", "ns2.")): a_expl = False # omit from 'External DNS' page since this only applies if box is its own DNS server if domain.startswith("www."): a_expl = "Optional. Sets the IP address that %s resolves to so that the box can provide a redirect to the parent domain." % domain if domain.startswith("mta-sts."): a_expl = "Optional. MTA-STS Policy Host serving /.well-known/mta-sts.txt." if domain.startswith("autoconfig."): a_expl = "Provides email configuration autodiscovery support for Thunderbird Autoconfig." @@ -298,7 +292,7 @@ def build_zone(domain, domain_properties, additional_records, env, is_zone=True) # Append the DKIM TXT record to the zone as generated by OpenDKIM. # Skip if the user has set a DKIM record already. opendkim_record_file = os.path.join(env['STORAGE_ROOT'], 'mail/dkim/mail.txt') - with open(opendkim_record_file) as orf: + with open(opendkim_record_file, encoding="utf-8") as orf: m = re.match(r'(\S+)\s+IN\s+TXT\s+\( ((?:"[^"]+"\s+)+)\)', orf.read(), re.S) val = "".join(re.findall(r'"([^"]+)"', m.group(2))) if not has_rec(m.group(1), "TXT", prefix="v=DKIM1; "): @@ -364,8 +358,8 @@ def build_zone(domain, domain_properties, additional_records, env, is_zone=True) # non-mail domain and also may include qnames from custom DNS records. # Do this once at the end of generating a zone. if is_zone: - qnames_with_a = set(qname for (qname, rtype, value, explanation) in records if rtype in ("A", "AAAA")) - qnames_with_mx = set(qname for (qname, rtype, value, explanation) in records if rtype == "MX") + qnames_with_a = {qname for (qname, rtype, value, explanation) in records if rtype in {"A", "AAAA"}} + qnames_with_mx = {qname for (qname, rtype, value, explanation) in records if rtype == "MX"} for qname in qnames_with_a - qnames_with_mx: # Mark this domain as not sending mail with hard-fail SPF and DMARC records. d = (qname+"." if qname else "") + domain @@ -455,14 +449,12 @@ def build_sshfp_records(): # specify that port to sshkeyscan. port = 22 - with open('/etc/ssh/sshd_config', 'r') as f: + with open('/etc/ssh/sshd_config', encoding="utf-8") as f: for line in f: s = line.rstrip().split() if len(s) == 2 and s[0] == 'Port': - try: + with contextlib.suppress(ValueError): port = int(s[1]) - except ValueError: - pass break keys = shell("check_output", ["ssh-keyscan", "-4", "-t", "rsa,dsa,ecdsa,ed25519", "-p", str(port), "localhost"]) @@ -471,7 +463,7 @@ def build_sshfp_records(): for key in keys: if key.strip() == "" or key[0] == "#": continue try: - host, keytype, pubkey = key.split(" ") + _host, keytype, pubkey = key.split(" ") yield "%d %d ( %s )" % ( algorithm_number[keytype], 2, # specifies we are using SHA-256 on next line @@ -516,7 +508,7 @@ $TTL 86400 ; default time to live zone = zone.format(domain=domain, primary_domain=env["PRIMARY_HOSTNAME"]) # Add records. - for subdomain, querytype, value, explanation in records: + for subdomain, querytype, value, _explanation in records: if subdomain: zone += subdomain zone += "\tIN\t" + querytype + "\t" @@ -534,7 +526,7 @@ $TTL 86400 ; default time to live zone += value + "\n" # Append a stable hash of DNSSEC signing keys in a comment. - zone += "\n; DNSSEC signing keys hash: {}\n".format(hash_dnssec_keys(domain, env)) + zone += f"\n; DNSSEC signing keys hash: {hash_dnssec_keys(domain, env)}\n" # DNSSEC requires re-signing a zone periodically. That requires # bumping the serial number even if no other records have changed. @@ -550,7 +542,7 @@ $TTL 86400 ; default time to live # We've signed the domain. Check if we are close to the expiration # time of the signature. If so, we'll force a bump of the serial # number so we can re-sign it. - with open(zonefile + ".signed") as f: + with open(zonefile + ".signed", encoding="utf-8") as f: signed_zone = f.read() expiration_times = re.findall(r"\sRRSIG\s+SOA\s+\d+\s+\d+\s\d+\s+(\d{14})", signed_zone) if len(expiration_times) == 0: @@ -569,7 +561,7 @@ $TTL 86400 ; default time to live if os.path.exists(zonefile): # If the zone already exists, is different, and has a later serial number, # increment the number. - with open(zonefile) as f: + with open(zonefile, encoding="utf-8") as f: existing_zone = f.read() m = re.search(r"(\d+)\s*;\s*serial number", existing_zone) if m: @@ -593,7 +585,7 @@ $TTL 86400 ; default time to live zone = zone.replace("__SERIAL__", serial) # Write the zone file. - with open(zonefile, "w") as f: + with open(zonefile, "w", encoding="utf-8") as f: f.write(zone) return True # file is updated @@ -606,7 +598,7 @@ def get_dns_zonefile(zone, env): raise ValueError("%s is not a domain name that corresponds to a zone." % zone) nsd_zonefile = "/etc/nsd/zones/" + fn - with open(nsd_zonefile, "r") as f: + with open(nsd_zonefile, encoding="utf-8") as f: return f.read() ######################################################################## @@ -618,11 +610,11 @@ def write_nsd_conf(zonefiles, additional_records, env): # Append the zones. for domain, zonefile in zonefiles: - nsdconf += """ + nsdconf += f""" zone: - name: %s - zonefile: %s -""" % (domain, zonefile) + name: {domain} + zonefile: {zonefile} +""" # If custom secondary nameservers have been set, allow zone transfers # and, if not a subnet, notifies to them. @@ -634,13 +626,13 @@ zone: # Check if the file is changing. If it isn't changing, # return False to flag that no change was made. if os.path.exists(nsd_conf_file): - with open(nsd_conf_file) as f: + with open(nsd_conf_file, encoding="utf-8") as f: if f.read() == nsdconf: return False # Write out new contents and return True to signal that # configuration changed. - with open(nsd_conf_file, "w") as f: + with open(nsd_conf_file, "w", encoding="utf-8") as f: f.write(nsdconf) return True @@ -674,9 +666,8 @@ def hash_dnssec_keys(domain, env): keydata = [] for keytype, keyfn in sorted(find_dnssec_signing_keys(domain, env)): oldkeyfn = os.path.join(env['STORAGE_ROOT'], 'dns/dnssec', keyfn + ".private") - keydata.append(keytype) - keydata.append(keyfn) - with open(oldkeyfn, "r") as fr: + keydata.extend((keytype, keyfn)) + with open(oldkeyfn, encoding="utf-8") as fr: keydata.append( fr.read() ) keydata = "".join(keydata).encode("utf8") return hashlib.sha1(keydata).hexdigest() @@ -704,12 +695,12 @@ def sign_zone(domain, zonefile, env): # Use os.umask and open().write() to securely create a copy that only # we (root) can read. oldkeyfn = os.path.join(env['STORAGE_ROOT'], 'dns/dnssec', keyfn + ext) - with open(oldkeyfn, "r") as fr: + with open(oldkeyfn, encoding="utf-8") as fr: keydata = fr.read() keydata = keydata.replace("_domain_", domain) prev_umask = os.umask(0o77) # ensure written file is not world-readable try: - with open(newkeyfn + ext, "w") as fw: + with open(newkeyfn + ext, "w", encoding="utf-8") as fw: fw.write(keydata) finally: os.umask(prev_umask) # other files we write should be world-readable @@ -743,7 +734,7 @@ def sign_zone(domain, zonefile, env): # be used, so we'll pre-generate all for each key. One DS record per line. Only one # needs to actually be deployed at the registrar. We'll select the preferred one # in the status checks. - with open("/etc/nsd/zones/" + zonefile + ".ds", "w") as f: + with open("/etc/nsd/zones/" + zonefile + ".ds", "w", encoding="utf-8") as f: for key in ksk_keys: for digest_type in ('1', '2', '4'): rr_ds = shell('check_output', ["/usr/bin/ldns-key2ds", @@ -780,7 +771,7 @@ def write_opendkim_tables(domains, env): # So we must have a separate KeyTable entry for each domain. "SigningTable": "".join( - "*@{domain} {domain}\n".format(domain=domain) + f"*@{domain} {domain}\n" for domain in domains ), @@ -789,7 +780,7 @@ def write_opendkim_tables(domains, env): # signing domain must match the sender's From: domain. "KeyTable": "".join( - "{domain} {domain}:mail:{key_file}\n".format(domain=domain, key_file=opendkim_key_file) + f"{domain} {domain}:mail:{opendkim_key_file}\n" for domain in domains ), } @@ -798,12 +789,12 @@ def write_opendkim_tables(domains, env): for filename, content in config.items(): # Don't write the file if it doesn't need an update. if os.path.exists("/etc/opendkim/" + filename): - with open("/etc/opendkim/" + filename) as f: + with open("/etc/opendkim/" + filename, encoding="utf-8") as f: if f.read() == content: continue # The contents needs to change. - with open("/etc/opendkim/" + filename, "w") as f: + with open("/etc/opendkim/" + filename, "w", encoding="utf-8") as f: f.write(content) did_update = True @@ -815,9 +806,9 @@ def write_opendkim_tables(domains, env): def get_custom_dns_config(env, only_real_records=False): try: - with open(os.path.join(env['STORAGE_ROOT'], 'dns/custom.yaml'), 'r') as f: + with open(os.path.join(env['STORAGE_ROOT'], 'dns/custom.yaml'), encoding="utf-8") as f: custom_dns = rtyaml.load(f) - if not isinstance(custom_dns, dict): raise ValueError() # caught below + if not isinstance(custom_dns, dict): raise ValueError # caught below except: return [ ] @@ -835,7 +826,7 @@ def get_custom_dns_config(env, only_real_records=False): # No other type of data is allowed. else: - raise ValueError() + raise ValueError for rtype, value2 in values: if isinstance(value2, str): @@ -845,7 +836,7 @@ def get_custom_dns_config(env, only_real_records=False): yield (qname, rtype, value3) # No other type of data is allowed. else: - raise ValueError() + raise ValueError def filter_custom_records(domain, custom_dns_iter): for qname, rtype, value in custom_dns_iter: @@ -861,10 +852,7 @@ def filter_custom_records(domain, custom_dns_iter): # our short form (None => domain, or a relative QNAME) if # domain is not None. if domain is not None: - if qname == domain: - qname = None - else: - qname = qname[0:len(qname)-len("." + domain)] + qname = None if qname == domain else qname[0:len(qname) - len("." + domain)] yield (qname, rtype, value) @@ -900,12 +888,12 @@ def write_custom_dns_config(config, env): # Write. config_yaml = rtyaml.dump(dns) - with open(os.path.join(env['STORAGE_ROOT'], 'dns/custom.yaml'), "w") as f: + with open(os.path.join(env['STORAGE_ROOT'], 'dns/custom.yaml'), "w", encoding="utf-8") as f: f.write(config_yaml) def set_custom_dns_record(qname, rtype, value, action, env): # validate qname - for zone, fn in get_dns_zones(env): + for zone, _fn in get_dns_zones(env): # It must match a zone apex or be a subdomain of a zone # that we are otherwise hosting. if qname == zone or qname.endswith("."+zone): @@ -919,24 +907,27 @@ def set_custom_dns_record(qname, rtype, value, action, env): rtype = rtype.upper() if value is not None and qname != "_secondary_nameserver": if not re.search(DOMAIN_RE, qname): - raise ValueError("Invalid name.") + msg = "Invalid name." + raise ValueError(msg) - if rtype in ("A", "AAAA"): + if rtype in {"A", "AAAA"}: if value != "local": # "local" is a special flag for us v = ipaddress.ip_address(value) # raises a ValueError if there's a problem if rtype == "A" and not isinstance(v, ipaddress.IPv4Address): raise ValueError("That's an IPv6 address.") if rtype == "AAAA" and not isinstance(v, ipaddress.IPv6Address): raise ValueError("That's an IPv4 address.") - elif rtype in ("CNAME", "NS"): + elif rtype in {"CNAME", "NS"}: if rtype == "NS" and qname == zone: - raise ValueError("NS records can only be set for subdomains.") + msg = "NS records can only be set for subdomains." + raise ValueError(msg) # ensure value has a trailing dot if not value.endswith("."): value = value + "." if not re.search(DOMAIN_RE, value): - raise ValueError("Invalid value.") - elif rtype in ("CNAME", "TXT", "SRV", "MX", "SSHFP", "CAA"): + msg = "Invalid value." + raise ValueError(msg) + elif rtype in {"CNAME", "TXT", "SRV", "MX", "SSHFP", "CAA"}: # anything goes pass else: @@ -969,7 +960,7 @@ def set_custom_dns_record(qname, rtype, value, action, env): # Drop this record. made_change = True continue - if value == None and (_qname, _rtype) == (qname, rtype): + if value is None and (_qname, _rtype) == (qname, rtype): # Drop all qname-rtype records. made_change = True continue @@ -979,7 +970,7 @@ def set_custom_dns_record(qname, rtype, value, action, env): # Preserve this record. newconfig.append((_qname, _rtype, _value)) - if action in ("add", "set") and needs_add and value is not None: + if action in {"add", "set"} and needs_add and value is not None: newconfig.append((qname, rtype, value)) made_change = True @@ -996,11 +987,11 @@ def get_secondary_dns(custom_dns, mode=None): resolver.lifetime = 10 values = [] - for qname, rtype, value in custom_dns: + for qname, _rtype, value in custom_dns: if qname != '_secondary_nameserver': continue for hostname in value.split(" "): hostname = hostname.strip() - if mode == None: + if mode is None: # Just return the setting. values.append(hostname) continue @@ -1041,24 +1032,24 @@ def set_secondary_dns(hostnames, env): resolver = dns.resolver.get_default_resolver() resolver.timeout = 5 resolver.lifetime = 5 - + for item in hostnames: if not item.startswith("xfr:"): # Resolve hostname. try: - response = resolver.resolve(item, "A") + resolver.resolve(item, "A") except (dns.resolver.NoNameservers, dns.resolver.NXDOMAIN, dns.resolver.NoAnswer, dns.resolver.Timeout): try: - response = resolver.resolve(item, "AAAA") + resolver.resolve(item, "AAAA") except (dns.resolver.NoNameservers, dns.resolver.NXDOMAIN, dns.resolver.NoAnswer, dns.resolver.Timeout): raise ValueError("Could not resolve the IP address of %s." % item) else: # Validate IP address. try: if "/" in item[4:]: - v = ipaddress.ip_network(item[4:]) # raises a ValueError if there's a problem + ipaddress.ip_network(item[4:]) # raises a ValueError if there's a problem else: - v = ipaddress.ip_address(item[4:]) # raises a ValueError if there's a problem + ipaddress.ip_address(item[4:]) # raises a ValueError if there's a problem except ValueError: raise ValueError("'%s' is not an IPv4 or IPv6 address or subnet." % item[4:]) @@ -1076,13 +1067,12 @@ def get_custom_dns_records(custom_dns, qname, rtype): for qname1, rtype1, value in custom_dns: if qname1 == qname and rtype1 == rtype: yield value - return None ######################################################################## def build_recommended_dns(env): ret = [] - for (domain, zonefile, records) in build_zones(env): + for (domain, _zonefile, records) in build_zones(env): # remove records that we don't display records = [r for r in records if r[3] is not False] @@ -1091,10 +1081,7 @@ def build_recommended_dns(env): # expand qnames for i in range(len(records)): - if records[i][0] == None: - qname = domain - else: - qname = records[i][0] + "." + domain + qname = domain if records[i][0] is None else records[i][0] + "." + domain records[i] = { "qname": qname, @@ -1113,7 +1100,7 @@ if __name__ == "__main__": if sys.argv[-1] == "--lint": write_custom_dns_config(get_custom_dns_config(env), env) else: - for zone, records in build_recommended_dns(env): + for _zone, records in build_recommended_dns(env): for record in records: print("; " + record['explanation']) print(record['qname'], record['rtype'], record['value'], sep="\t") diff --git a/management/email_administrator.py b/management/email_administrator.py index c87eda40..e5307e32 100755 --- a/management/email_administrator.py +++ b/management/email_administrator.py @@ -37,11 +37,11 @@ msg = MIMEMultipart('alternative') # In Python 3.6: #msg = Message() -msg['From'] = "\"%s\" <%s>" % (env['PRIMARY_HOSTNAME'], admin_addr) +msg['From'] = '"{}" <{}>'.format(env['PRIMARY_HOSTNAME'], admin_addr) msg['To'] = admin_addr -msg['Subject'] = "[%s] %s" % (env['PRIMARY_HOSTNAME'], subject) +msg['Subject'] = "[{}] {}".format(env['PRIMARY_HOSTNAME'], subject) -content_html = '
{}'.format(html.escape(content))
+content_html = f'{html.escape(content)}'
msg.attach(MIMEText(content, 'plain'))
msg.attach(MIMEText(content_html, 'html'))
diff --git a/management/mail_log.py b/management/mail_log.py
index 1a963966..e127af3b 100755
--- a/management/mail_log.py
+++ b/management/mail_log.py
@@ -116,12 +116,11 @@ def scan_mail_log(env):
try:
import mailconfig
collector["known_addresses"] = (set(mailconfig.get_mail_users(env)) |
- set(alias[0] for alias in mailconfig.get_mail_aliases(env)))
+ {alias[0] for alias in mailconfig.get_mail_aliases(env)})
except ImportError:
pass
- print("Scanning logs from {:%Y-%m-%d %H:%M:%S} to {:%Y-%m-%d %H:%M:%S}".format(
- START_DATE, END_DATE)
+ print(f"Scanning logs from {START_DATE:%Y-%m-%d %H:%M:%S} to {END_DATE:%Y-%m-%d %H:%M:%S}"
)
# Scan the lines in the log files until the date goes out of range
@@ -227,7 +226,7 @@ def scan_mail_log(env):
],
sub_data=[
("Protocol and Source", [[
- "{} {}: {} times".format(protocol_name, host, count)
+ f"{protocol_name} {host}: {count} times"
for (protocol_name, host), count
in sorted(u["totals_by_protocol_and_host"].items(), key=lambda kv:-kv[1])
] for u in data.values()])
@@ -303,8 +302,7 @@ def scan_mail_log(env):
for date, sender, message in user_data["blocked"]:
if len(sender) > 64:
sender = sender[:32] + "…" + sender[-32:]
- user_rejects.append("%s - %s " % (date, sender))
- user_rejects.append(" %s" % message)
+ user_rejects.extend((f'{date} - {sender} ', ' %s' % message))
rejects.append(user_rejects)
print_user_table(
@@ -322,7 +320,7 @@ def scan_mail_log(env):
if collector["other-services"] and VERBOSE and False:
print_header("Other services")
print("The following unkown services were found in the log file.")
- print(" ", *sorted(list(collector["other-services"])), sep='\n│ ')
+ print(" ", *sorted(collector["other-services"]), sep='\n│ ')
def scan_mail_log_line(line, collector):
@@ -333,7 +331,7 @@ def scan_mail_log_line(line, collector):
if not m:
return True
- date, system, service, log = m.groups()
+ date, _system, service, log = m.groups()
collector["scan_count"] += 1
# print()
@@ -344,7 +342,7 @@ def scan_mail_log_line(line, collector):
# Replaced the dateutil parser for a less clever way of parser that is roughly 4 times faster.
# date = dateutil.parser.parse(date)
-
+
# strptime fails on Feb 29 with ValueError: day is out of range for month if correct year is not provided.
# See https://bugs.python.org/issue26460
date = datetime.datetime.strptime(str(NOW.year) + ' ' + date, '%Y %b %d %H:%M:%S')
@@ -376,9 +374,9 @@ def scan_mail_log_line(line, collector):
elif service == "postfix/smtpd":
if SCAN_BLOCKED:
scan_postfix_smtpd_line(date, log, collector)
- elif service in ("postfix/qmgr", "postfix/pickup", "postfix/cleanup", "postfix/scache",
+ elif service in {"postfix/qmgr", "postfix/pickup", "postfix/cleanup", "postfix/scache",
"spampd", "postfix/anvil", "postfix/master", "opendkim", "postfix/lmtp",
- "postfix/tlsmgr", "anvil"):
+ "postfix/tlsmgr", "anvil"}:
# nothing to look at
return True
else:
@@ -392,7 +390,7 @@ def scan_mail_log_line(line, collector):
def scan_postgrey_line(date, log, collector):
""" Scan a postgrey log line and extract interesting data """
- m = re.match("action=(greylist|pass), reason=(.*?), (?:delay=\d+, )?client_name=(.*), "
+ m = re.match(r"action=(greylist|pass), reason=(.*?), (?:delay=\d+, )?client_name=(.*), "
"client_address=(.*), sender=(.*), recipient=(.*)",
log)
@@ -435,36 +433,35 @@ def scan_postfix_smtpd_line(date, log, collector):
return
# only log mail to known recipients
- if user_match(user):
- if collector["known_addresses"] is None or user in collector["known_addresses"]:
- data = collector["rejected"].get(
- user,
- {
- "blocked": [],
- "earliest": None,
- "latest": None,
- }
- )
- # simplify this one
+ if user_match(user) and (collector["known_addresses"] is None or user in collector["known_addresses"]):
+ data = collector["rejected"].get(
+ user,
+ {
+ "blocked": [],
+ "earliest": None,
+ "latest": None,
+ }
+ )
+ # simplify this one
+ m = re.search(
+ r"Client host \[(.*?)\] blocked using zen.spamhaus.org; (.*)", message
+ )
+ if m:
+ message = "ip blocked: " + m.group(2)
+ else:
+ # simplify this one too
m = re.search(
- r"Client host \[(.*?)\] blocked using zen.spamhaus.org; (.*)", message
+ r"Sender address \[.*@(.*)\] blocked using dbl.spamhaus.org; (.*)", message
)
if m:
- message = "ip blocked: " + m.group(2)
- else:
- # simplify this one too
- m = re.search(
- r"Sender address \[.*@(.*)\] blocked using dbl.spamhaus.org; (.*)", message
- )
- if m:
- message = "domain blocked: " + m.group(2)
+ message = "domain blocked: " + m.group(2)
- if data["earliest"] is None:
- data["earliest"] = date
- data["latest"] = date
- data["blocked"].append((date, sender, message))
+ if data["earliest"] is None:
+ data["earliest"] = date
+ data["latest"] = date
+ data["blocked"].append((date, sender, message))
- collector["rejected"][user] = data
+ collector["rejected"][user] = data
def scan_dovecot_login_line(date, log, collector, protocol_name):
@@ -500,7 +497,7 @@ def add_login(user, date, protocol_name, host, collector):
data["totals_by_protocol"][protocol_name] += 1
data["totals_by_protocol_and_host"][(protocol_name, host)] += 1
- if host not in ("127.0.0.1", "::1") or True:
+ if host not in {"127.0.0.1", "::1"} or True:
data["activity-by-hour"][protocol_name][date.hour] += 1
collector["logins"][user] = data
@@ -514,7 +511,7 @@ def scan_postfix_lmtp_line(date, log, collector):
"""
- m = re.match("([A-Z0-9]+): to=<(\S+)>, .* Saved", log)
+ m = re.match(r"([A-Z0-9]+): to=<(\S+)>, .* Saved", log)
if m:
_, user = m.groups()
@@ -550,12 +547,12 @@ def scan_postfix_submission_line(date, log, collector):
"""
# Match both the 'plain' and 'login' sasl methods, since both authentication methods are
- # allowed by Dovecot. Exclude trailing comma after the username when additional fields
+ # allowed by Dovecot. Exclude trailing comma after the username when additional fields
# follow after.
- m = re.match("([A-Z0-9]+): client=(\S+), sasl_method=(PLAIN|LOGIN), sasl_username=(\S+)(? 32 else d[row])
col_left[col] = True
elif isinstance(d[row], datetime.datetime):
- col_str = "{:<20}".format(str(d[row]))
+ col_str = f"{d[row]!s:<20}"
col_left[col] = True
else:
temp = "{:>%s}" % max(5, len(l) + 1, len(str(d[row])) + 1)
@@ -684,7 +679,7 @@ def print_user_table(users, data=None, sub_data=None, activity=None, latest=None
data_accum[col] += d[row]
try:
- if None not in [latest, earliest]:
+ if None not in {latest, earliest}:
vert_pos = len(line)
e = earliest[row]
l = latest[row]
@@ -712,10 +707,7 @@ def print_user_table(users, data=None, sub_data=None, activity=None, latest=None
if sub_data is not None:
for l, d in sub_data:
if d[row]:
- lines.append("┬")
- lines.append("│ %s" % l)
- lines.append("├─%s─" % (len(l) * "─"))
- lines.append("│")
+ lines.extend(('┬', '│ %s' % l, '├─%s─' % (len(l) * '─'), '│'))
max_len = 0
for v in list(d[row]):
lines.append("│ %s" % v)
@@ -740,7 +732,7 @@ def print_user_table(users, data=None, sub_data=None, activity=None, latest=None
else:
header += l.rjust(max(5, len(l) + 1, col_widths[col]))
- if None not in (latest, earliest):
+ if None not in {latest, earliest}:
header += " │ timespan "
lines.insert(0, header.rstrip())
@@ -765,7 +757,7 @@ def print_user_table(users, data=None, sub_data=None, activity=None, latest=None
footer += temp.format(data_accum[row])
try:
- if None not in [latest, earliest]:
+ if None not in {latest, earliest}:
max_l = max(latest)
min_e = min(earliest)
timespan = relativedelta(max_l, min_e)
@@ -844,7 +836,7 @@ if __name__ == "__main__":
END_DATE = args.enddate
if args.timespan == 'today':
args.timespan = 'day'
- print("Setting end date to {}".format(END_DATE))
+ print(f"Setting end date to {END_DATE}")
START_DATE = END_DATE - TIME_DELTAS[args.timespan]
diff --git a/management/mailconfig.py b/management/mailconfig.py
index 2fcb9703..39ca1b6e 100755
--- a/management/mailconfig.py
+++ b/management/mailconfig.py
@@ -9,7 +9,7 @@
# Python 3 in setup/questions.sh to validate the email
# address entered by the user.
-import subprocess, shutil, os, sqlite3, re
+import os, sqlite3, re
import utils
from email_validator import validate_email as validate_email_, EmailNotValidError
import idna
@@ -86,10 +86,7 @@ def prettify_idn_email_address(email):
def is_dcv_address(email):
email = email.lower()
- for localpart in ("admin", "administrator", "postmaster", "hostmaster", "webmaster", "abuse"):
- if email.startswith(localpart+"@") or email.startswith(localpart+"+"):
- return True
- return False
+ return any(email.startswith((localpart + "@", localpart + "+")) for localpart in ("admin", "administrator", "postmaster", "hostmaster", "webmaster", "abuse"))
def open_database(env, with_connection=False):
conn = sqlite3.connect(env["STORAGE_ROOT"] + "/mail/users.sqlite")
@@ -192,8 +189,7 @@ def get_mail_aliases(env):
aliases = { row[0]: row for row in c.fetchall() } # make dict
# put in a canonical order: sort by domain, then by email address lexicographically
- aliases = [ aliases[address] for address in utils.sort_email_addresses(aliases.keys(), env) ]
- return aliases
+ return [ aliases[address] for address in utils.sort_email_addresses(aliases.keys(), env) ]
def get_mail_aliases_ex(env):
# Returns a complex data structure of all mail aliases, similar
@@ -225,7 +221,7 @@ def get_mail_aliases_ex(env):
domain = get_domain(address)
# add to list
- if not domain in domains:
+ if domain not in domains:
domains[domain] = {
"domain": domain,
"aliases": [],
@@ -477,10 +473,7 @@ def add_mail_alias(address, forwards_to, permitted_senders, env, update_if_exist
forwards_to = ",".join(validated_forwards_to)
- if len(validated_permitted_senders) == 0:
- permitted_senders = None
- else:
- permitted_senders = ",".join(validated_permitted_senders)
+ permitted_senders = None if len(validated_permitted_senders) == 0 else ",".join(validated_permitted_senders)
conn, c = open_database(env, with_connection=True)
try:
@@ -498,6 +491,7 @@ def add_mail_alias(address, forwards_to, permitted_senders, env, update_if_exist
if do_kick:
# Update things in case any new domains are added.
return kick(env, return_status)
+ return None
def remove_mail_alias(address, env, do_kick=True):
# convert Unicode domain to IDNA
@@ -513,10 +507,11 @@ def remove_mail_alias(address, env, do_kick=True):
if do_kick:
# Update things in case any domains are removed.
return kick(env, "alias removed")
+ return None
def add_auto_aliases(aliases, env):
conn, c = open_database(env, with_connection=True)
- c.execute("DELETE FROM auto_aliases");
+ c.execute("DELETE FROM auto_aliases")
for source, destination in aliases.items():
c.execute("INSERT INTO auto_aliases (source, destination) VALUES (?, ?)", (source, destination))
conn.commit()
@@ -586,14 +581,14 @@ def kick(env, mail_result=None):
# Remove auto-generated postmaster/admin/abuse alises from the main aliases table.
# They are now stored in the auto_aliases table.
- for address, forwards_to, permitted_senders, auto in get_mail_aliases(env):
+ for address, forwards_to, _permitted_senders, auto in get_mail_aliases(env):
user, domain = address.split("@")
- if user in ("postmaster", "admin", "abuse") \
+ if user in {"postmaster", "admin", "abuse"} \
and address not in required_aliases \
and forwards_to == get_system_administrator(env) \
and not auto:
remove_mail_alias(address, env, do_kick=False)
- results.append("removed alias %s (was to %s; domain no longer used for email)\n" % (address, forwards_to))
+ results.append(f"removed alias {address} (was to {forwards_to}; domain no longer used for email)\n")
# Update DNS and nginx in case any domains are added/removed.
@@ -608,9 +603,11 @@ def kick(env, mail_result=None):
def validate_password(pw):
# validate password
if pw.strip() == "":
- raise ValueError("No password provided.")
+ msg = "No password provided."
+ raise ValueError(msg)
if len(pw) < 8:
- raise ValueError("Passwords must be at least eight characters.")
+ msg = "Passwords must be at least eight characters."
+ raise ValueError(msg)
if __name__ == "__main__":
import sys
diff --git a/management/mfa.py b/management/mfa.py
index 32eb5183..6b56ad86 100644
--- a/management/mfa.py
+++ b/management/mfa.py
@@ -41,9 +41,11 @@ def enable_mfa(email, type, secret, token, label, env):
# Sanity check with the provide current token.
totp = pyotp.TOTP(secret)
if not totp.verify(token, valid_window=1):
- raise ValueError("Invalid token.")
+ msg = "Invalid token."
+ raise ValueError(msg)
else:
- raise ValueError("Invalid MFA type.")
+ msg = "Invalid MFA type."
+ raise ValueError(msg)
conn, c = open_database(env, with_connection=True)
c.execute('INSERT INTO mfa (user_id, type, secret, label) VALUES (?, ?, ?, ?)', (get_user_id(email, c), type, secret, label))
@@ -66,10 +68,12 @@ def disable_mfa(email, mfa_id, env):
return c.rowcount > 0
def validate_totp_secret(secret):
- if type(secret) != str or secret.strip() == "":
- raise ValueError("No secret provided.")
+ if not isinstance(secret, str) or secret.strip() == "":
+ msg = "No secret provided."
+ raise ValueError(msg)
if len(secret) != 32:
- raise ValueError("Secret should be a 32 characters base32 string")
+ msg = "Secret should be a 32 characters base32 string"
+ raise ValueError(msg)
def provision_totp(email, env):
# Make a new secret.
diff --git a/management/ssl_certificates.py b/management/ssl_certificates.py
index 19f0266a..ab0cea4d 100755
--- a/management/ssl_certificates.py
+++ b/management/ssl_certificates.py
@@ -4,7 +4,8 @@
import os, os.path, re, shutil, subprocess, tempfile
from utils import shell, safe_domain_name, sort_domains
-import idna
+import functools
+import operator
# SELECTING SSL CERTIFICATES FOR USE IN WEB
@@ -83,9 +84,8 @@ def get_ssl_certificates(env):
for domain in cert_domains:
# The primary hostname can only use a certificate mapped
# to the system private key.
- if domain == env['PRIMARY_HOSTNAME']:
- if cert["private_key"]["filename"] != os.path.join(env['STORAGE_ROOT'], 'ssl', 'ssl_private_key.pem'):
- continue
+ if domain == env['PRIMARY_HOSTNAME'] and cert["private_key"]["filename"] != os.path.join(env['STORAGE_ROOT'], 'ssl', 'ssl_private_key.pem'):
+ continue
domains.setdefault(domain, []).append(cert)
@@ -150,13 +150,12 @@ def get_domain_ssl_files(domain, ssl_certificates, env, allow_missing_cert=False
"certificate_object": load_pem(load_cert_chain(ssl_certificate)[0]),
}
- if use_main_cert:
- if domain == env['PRIMARY_HOSTNAME']:
- # The primary domain must use the server certificate because
- # it is hard-coded in some service configuration files.
- return system_certificate
+ if use_main_cert and domain == env['PRIMARY_HOSTNAME']:
+ # The primary domain must use the server certificate because
+ # it is hard-coded in some service configuration files.
+ return system_certificate
- wildcard_domain = re.sub("^[^\.]+", "*", domain)
+ wildcard_domain = re.sub(r"^[^\.]+", "*", domain)
if domain in ssl_certificates:
return ssl_certificates[domain]
elif wildcard_domain in ssl_certificates:
@@ -212,7 +211,7 @@ def get_certificates_to_provision(env, limit_domains=None, show_valid_certs=True
if not value: continue # IPv6 is not configured
response = query_dns(domain, rtype)
if response != normalize_ip(value):
- bad_dns.append("%s (%s)" % (response, rtype))
+ bad_dns.append(f"{response} ({rtype})")
if bad_dns:
domains_cant_provision[domain] = "The domain name does not resolve to this machine: " \
@@ -265,11 +264,11 @@ def provision_certificates(env, limit_domains):
# primary domain listed in each certificate.
from dns_update import get_dns_zones
certs = { }
- for zone, zonefile in get_dns_zones(env):
+ for zone, _zonefile in get_dns_zones(env):
certs[zone] = [[]]
for domain in sort_domains(domains, env):
# Does the domain end with any domain we've seen so far.
- for parent in certs.keys():
+ for parent in certs:
if domain.endswith("." + parent):
# Add this to the parent's list of domains.
# Start a new group if the list already has
@@ -286,7 +285,7 @@ def provision_certificates(env, limit_domains):
# Flatten to a list of lists of domains (from a mapping). Remove empty
# lists (zones with no domains that need certs).
- certs = sum(certs.values(), [])
+ certs = functools.reduce(operator.iadd, certs.values(), [])
certs = [_ for _ in certs if len(_) > 0]
# Prepare to provision.
@@ -414,7 +413,7 @@ def create_csr(domain, ssl_key, country_code, env):
"openssl", "req", "-new",
"-key", ssl_key,
"-sha256",
- "-subj", "/C=%s/CN=%s" % (country_code, domain)])
+ "-subj", f"/C={country_code}/CN={domain}"])
def install_cert(domain, ssl_cert, ssl_chain, env, raw=False):
# Write the combined cert+chain to a temporary path and validate that it is OK.
@@ -450,8 +449,8 @@ def install_cert_copy_file(fn, env):
from cryptography.hazmat.primitives import hashes
from binascii import hexlify
cert = load_pem(load_cert_chain(fn)[0])
- all_domains, cn = get_certificate_domains(cert)
- path = "%s-%s-%s.pem" % (
+ _all_domains, cn = get_certificate_domains(cert)
+ path = "{}-{}-{}.pem".format(
safe_domain_name(cn), # common name, which should be filename safe because it is IDNA-encoded, but in case of a malformed cert make sure it's ok to use as a filename
cert.not_valid_after.date().isoformat().replace("-", ""), # expiration date
hexlify(cert.fingerprint(hashes.SHA256())).decode("ascii")[0:8], # fingerprint prefix
@@ -522,12 +521,12 @@ def check_certificate(domain, ssl_certificate, ssl_private_key, warn_if_expiring
# First check that the domain name is one of the names allowed by
# the certificate.
if domain is not None:
- certificate_names, cert_primary_name = get_certificate_domains(cert)
+ certificate_names, _cert_primary_name = get_certificate_domains(cert)
# Check that the domain appears among the acceptable names, or a wildcard
# form of the domain name (which is a stricter check than the specs but
# should work in normal cases).
- wildcard_domain = re.sub("^[^\.]+", "*", domain)
+ wildcard_domain = re.sub(r"^[^\.]+", "*", domain)
if domain not in certificate_names and wildcard_domain not in certificate_names:
return ("The certificate is for the wrong domain name. It is for %s."
% ", ".join(sorted(certificate_names)), None)
@@ -538,7 +537,7 @@ def check_certificate(domain, ssl_certificate, ssl_private_key, warn_if_expiring
with open(ssl_private_key, 'rb') as f:
priv_key = load_pem(f.read())
except ValueError as e:
- return ("The private key file %s is not a private key file: %s" % (ssl_private_key, str(e)), None)
+ return (f"The private key file {ssl_private_key} is not a private key file: {e!s}", None)
if not isinstance(priv_key, RSAPrivateKey):
return ("The private key file %s is not a private key file." % ssl_private_key, None)
@@ -566,7 +565,7 @@ def check_certificate(domain, ssl_certificate, ssl_private_key, warn_if_expiring
import datetime
now = datetime.datetime.utcnow()
if not(cert.not_valid_before <= now <= cert.not_valid_after):
- return ("The certificate has expired or is not yet valid. It is valid from %s to %s." % (cert.not_valid_before, cert.not_valid_after), None)
+ return (f"The certificate has expired or is not yet valid. It is valid from {cert.not_valid_before} to {cert.not_valid_after}.", None)
# Next validate that the certificate is valid. This checks whether the certificate
# is self-signed, that the chain of trust makes sense, that it is signed by a CA
@@ -625,7 +624,8 @@ def load_cert_chain(pemfile):
pem = f.read() + b"\n" # ensure trailing newline
pemblocks = re.findall(re_pem, pem)
if len(pemblocks) == 0:
- raise ValueError("File does not contain valid PEM data.")
+ msg = "File does not contain valid PEM data."
+ raise ValueError(msg)
return pemblocks
def load_pem(pem):
@@ -636,9 +636,10 @@ def load_pem(pem):
from cryptography.hazmat.backends import default_backend
pem_type = re.match(b"-+BEGIN (.*?)-+[\r\n]", pem)
if pem_type is None:
- raise ValueError("File is not a valid PEM-formatted file.")
+ msg = "File is not a valid PEM-formatted file."
+ raise ValueError(msg)
pem_type = pem_type.group(1)
- if pem_type in (b"RSA PRIVATE KEY", b"PRIVATE KEY"):
+ if pem_type in {b"RSA PRIVATE KEY", b"PRIVATE KEY"}:
return serialization.load_pem_private_key(pem, password=None, backend=default_backend())
if pem_type == b"CERTIFICATE":
return load_pem_x509_certificate(pem, default_backend())
diff --git a/management/status_checks.py b/management/status_checks.py
index e8faf6fb..cd3e9f28 100755
--- a/management/status_checks.py
+++ b/management/status_checks.py
@@ -4,11 +4,10 @@
# TLS certificates have been signed, etc., and if not tells the user
# what to do next.
-import sys, os, os.path, re, subprocess, datetime, multiprocessing.pool
+import sys, os, os.path, re, datetime, multiprocessing.pool
import asyncio
import dns.reversename, dns.resolver
-import dateutil.parser, dateutil.tz
import idna
import psutil
import postfix_mta_sts_resolver.resolver
@@ -89,7 +88,7 @@ def run_services_checks(env, output, pool):
all_running = True
fatal = False
ret = pool.starmap(check_service, ((i, service, env) for i, service in enumerate(get_services())), chunksize=1)
- for i, running, fatal2, output2 in sorted(ret):
+ for _i, running, fatal2, output2 in sorted(ret):
if output2 is None: continue # skip check (e.g. no port was set, e.g. no sshd)
all_running = all_running and running
fatal = fatal or fatal2
@@ -125,7 +124,7 @@ def check_service(i, service, env):
try:
s.connect((ip, service["port"]))
return True
- except OSError as e:
+ except OSError:
# timed out or some other odd error
return False
finally:
@@ -152,18 +151,17 @@ def check_service(i, service, env):
output.print_error("%s is not running (port %d)." % (service['name'], service['port']))
# Why is nginx not running?
- if not running and service["port"] in (80, 443):
+ if not running and service["port"] in {80, 443}:
output.print_line(shell('check_output', ['nginx', '-t'], capture_stderr=True, trap=True)[1].strip())
+ # Service should be running locally.
+ elif try_connect("127.0.0.1"):
+ running = True
else:
- # Service should be running locally.
- if try_connect("127.0.0.1"):
- running = True
- else:
- output.print_error("%s is not running (port %d)." % (service['name'], service['port']))
+ output.print_error("%s is not running (port %d)." % (service['name'], service['port']))
# Flag if local DNS is not running.
- if not running and service["port"] == 53 and service["public"] == False:
+ if not running and service["port"] == 53 and service["public"] is False:
fatal = True
return (i, running, fatal, output)
@@ -195,7 +193,7 @@ def check_ufw(env, output):
for service in get_services():
if service["public"] and not is_port_allowed(ufw, service["port"]):
not_allowed_ports += 1
- output.print_error("Port %s (%s) should be allowed in the firewall, please re-run the setup." % (service["port"], service["name"]))
+ output.print_error("Port {} ({}) should be allowed in the firewall, please re-run the setup.".format(service["port"], service["name"]))
if not_allowed_ports == 0:
output.print_ok("Firewall is active.")
@@ -213,10 +211,10 @@ def check_ssh_password(env, output):
# the configuration file.
if not os.path.exists("/etc/ssh/sshd_config"):
return
- with open("/etc/ssh/sshd_config", "r") as f:
+ with open("/etc/ssh/sshd_config", encoding="utf-8") as f:
sshd = f.read()
- if re.search("\nPasswordAuthentication\s+yes", sshd) \
- or not re.search("\nPasswordAuthentication\s+no", sshd):
+ if re.search("\nPasswordAuthentication\\s+yes", sshd) \
+ or not re.search("\nPasswordAuthentication\\s+no", sshd):
output.print_error("""The SSH server on this machine permits password-based login. A more secure
way to log in is using a public key. Add your SSH public key to $HOME/.ssh/authorized_keys, check
that you can log in without a password, set the option 'PasswordAuthentication no' in
@@ -237,7 +235,7 @@ def check_software_updates(env, output):
else:
output.print_error("There are %d software packages that can be updated." % len(pkgs))
for p in pkgs:
- output.print_line("%s (%s)" % (p["package"], p["version"]))
+ output.print_line("{} ({})".format(p["package"], p["version"]))
def check_system_aliases(env, output):
# Check that the administrator alias exists since that's where all
@@ -269,8 +267,7 @@ def check_free_disk_space(rounded_values, env, output):
except:
backup_cache_count = 0
if backup_cache_count > 1:
- output.print_warning("The backup cache directory {} has more than one backup target cache. Consider clearing this directory to save disk space."
- .format(backup_cache_path))
+ output.print_warning(f"The backup cache directory {backup_cache_path} has more than one backup target cache. Consider clearing this directory to save disk space.")
def check_free_memory(rounded_values, env, output):
# Check free memory.
@@ -296,7 +293,7 @@ def run_network_checks(env, output):
# Stop if we cannot make an outbound connection on port 25. Many residential
# networks block outbound port 25 to prevent their network from sending spam.
# See if we can reach one of Google's MTAs with a 5-second timeout.
- code, ret = shell("check_call", ["/bin/nc", "-z", "-w5", "aspmx.l.google.com", "25"], trap=True)
+ _code, ret = shell("check_call", ["/bin/nc", "-z", "-w5", "aspmx.l.google.com", "25"], trap=True)
if ret == 0:
output.print_ok("Outbound mail (SMTP port 25) is not blocked.")
else:
@@ -318,9 +315,8 @@ def run_network_checks(env, output):
elif zen == "[Not Set]":
output.print_warning("Could not connect to zen.spamhaus.org. We could not determine whether your server's IP address is blacklisted. Please try again later.")
else:
- output.print_error("""The IP address of this machine %s is listed in the Spamhaus Block List (code %s),
- which may prevent recipients from receiving your email. See http://www.spamhaus.org/query/ip/%s."""
- % (env['PUBLIC_IP'], zen, env['PUBLIC_IP']))
+ output.print_error("""The IP address of this machine {} is listed in the Spamhaus Block List (code {}),
+ which may prevent recipients from receiving your email. See http://www.spamhaus.org/query/ip/{}.""".format(env['PUBLIC_IP'], zen, env['PUBLIC_IP']))
def run_domain_checks(rounded_time, env, output, pool, domains_to_check=None):
# Get the list of domains we handle mail for.
@@ -341,7 +337,7 @@ def run_domain_checks(rounded_time, env, output, pool, domains_to_check=None):
domains_to_check = [
d for d in domains_to_check
if not (
- d.split(".", 1)[0] in ("www", "autoconfig", "autodiscover", "mta-sts")
+ d.split(".", 1)[0] in {"www", "autoconfig", "autodiscover", "mta-sts"}
and len(d.split(".", 1)) == 2
and d.split(".", 1)[1] in domains_to_check
)
@@ -423,10 +419,9 @@ def check_primary_hostname_dns(domain, env, output, dns_domains, dns_zonefiles):
# If a DS record is set on the zone containing this domain, check DNSSEC now.
has_dnssec = False
for zone in dns_domains:
- if zone == domain or domain.endswith("." + zone):
- if query_dns(zone, "DS", nxdomain=None) is not None:
- has_dnssec = True
- check_dnssec(zone, env, output, dns_zonefiles, is_checking_primary=True)
+ if (zone == domain or domain.endswith("." + zone)) and query_dns(zone, "DS", nxdomain=None) is not None:
+ has_dnssec = True
+ check_dnssec(zone, env, output, dns_zonefiles, is_checking_primary=True)
ip = query_dns(domain, "A")
ns_ips = query_dns("ns1." + domain, "A") + '/' + query_dns("ns2." + domain, "A")
@@ -438,44 +433,41 @@ def check_primary_hostname_dns(domain, env, output, dns_domains, dns_zonefiles):
# the nameserver, are reporting the right info --- but if the glue is incorrect this
# will probably fail.
if ns_ips == env['PUBLIC_IP'] + '/' + env['PUBLIC_IP']:
- output.print_ok("Nameserver glue records are correct at registrar. [ns1/ns2.%s ↦ %s]" % (env['PRIMARY_HOSTNAME'], env['PUBLIC_IP']))
+ output.print_ok("Nameserver glue records are correct at registrar. [ns1/ns2.{} ↦ {}]".format(env['PRIMARY_HOSTNAME'], env['PUBLIC_IP']))
elif ip == env['PUBLIC_IP']:
# The NS records are not what we expect, but the domain resolves correctly, so
# the user may have set up external DNS. List this discrepancy as a warning.
- output.print_warning("""Nameserver glue records (ns1.%s and ns2.%s) should be configured at your domain name
- registrar as having the IP address of this box (%s). They currently report addresses of %s. If you have set up External DNS, this may be OK."""
- % (env['PRIMARY_HOSTNAME'], env['PRIMARY_HOSTNAME'], env['PUBLIC_IP'], ns_ips))
+ output.print_warning("""Nameserver glue records (ns1.{} and ns2.{}) should be configured at your domain name
+ registrar as having the IP address of this box ({}). They currently report addresses of {}. If you have set up External DNS, this may be OK.""".format(env['PRIMARY_HOSTNAME'], env['PRIMARY_HOSTNAME'], env['PUBLIC_IP'], ns_ips))
else:
- output.print_error("""Nameserver glue records are incorrect. The ns1.%s and ns2.%s nameservers must be configured at your domain name
- registrar as having the IP address %s. They currently report addresses of %s. It may take several hours for
- public DNS to update after a change."""
- % (env['PRIMARY_HOSTNAME'], env['PRIMARY_HOSTNAME'], env['PUBLIC_IP'], ns_ips))
+ output.print_error("""Nameserver glue records are incorrect. The ns1.{} and ns2.{} nameservers must be configured at your domain name
+ registrar as having the IP address {}. They currently report addresses of {}. It may take several hours for
+ public DNS to update after a change.""".format(env['PRIMARY_HOSTNAME'], env['PRIMARY_HOSTNAME'], env['PUBLIC_IP'], ns_ips))
# Check that PRIMARY_HOSTNAME resolves to PUBLIC_IP[V6] in public DNS.
ipv6 = query_dns(domain, "AAAA") if env.get("PUBLIC_IPV6") else None
if ip == env['PUBLIC_IP'] and not (ipv6 and env['PUBLIC_IPV6'] and ipv6 != normalize_ip(env['PUBLIC_IPV6'])):
- output.print_ok("Domain resolves to box's IP address. [%s ↦ %s]" % (env['PRIMARY_HOSTNAME'], my_ips))
+ output.print_ok("Domain resolves to box's IP address. [{} ↦ {}]".format(env['PRIMARY_HOSTNAME'], my_ips))
else:
- output.print_error("""This domain must resolve to your box's IP address (%s) in public DNS but it currently resolves
- to %s. It may take several hours for public DNS to update after a change. This problem may result from other
- issues listed above."""
- % (my_ips, ip + ((" / " + ipv6) if ipv6 is not None else "")))
+ output.print_error("""This domain must resolve to your box's IP address ({}) in public DNS but it currently resolves
+ to {}. It may take several hours for public DNS to update after a change. This problem may result from other
+ issues listed above.""".format(my_ips, ip + ((" / " + ipv6) if ipv6 is not None else "")))
# Check reverse DNS matches the PRIMARY_HOSTNAME. Note that it might not be
# a DNS zone if it is a subdomain of another domain we have a zone for.
existing_rdns_v4 = query_dns(dns.reversename.from_address(env['PUBLIC_IP']), "PTR")
existing_rdns_v6 = query_dns(dns.reversename.from_address(env['PUBLIC_IPV6']), "PTR") if env.get("PUBLIC_IPV6") else None
- if existing_rdns_v4 == domain and existing_rdns_v6 in (None, domain):
- output.print_ok("Reverse DNS is set correctly at ISP. [%s ↦ %s]" % (my_ips, env['PRIMARY_HOSTNAME']))
+ if existing_rdns_v4 == domain and existing_rdns_v6 in {None, domain}:
+ output.print_ok("Reverse DNS is set correctly at ISP. [{} ↦ {}]".format(my_ips, env['PRIMARY_HOSTNAME']))
elif existing_rdns_v4 == existing_rdns_v6 or existing_rdns_v6 is None:
- output.print_error("""Your box's reverse DNS is currently %s, but it should be %s. Your ISP or cloud provider will have instructions
- on setting up reverse DNS for your box.""" % (existing_rdns_v4, domain) )
+ output.print_error(f"""Your box's reverse DNS is currently {existing_rdns_v4}, but it should be {domain}. Your ISP or cloud provider will have instructions
+ on setting up reverse DNS for your box.""" )
else:
- output.print_error("""Your box's reverse DNS is currently %s (IPv4) and %s (IPv6), but it should be %s. Your ISP or cloud provider will have instructions
- on setting up reverse DNS for your box.""" % (existing_rdns_v4, existing_rdns_v6, domain) )
+ output.print_error(f"""Your box's reverse DNS is currently {existing_rdns_v4} (IPv4) and {existing_rdns_v6} (IPv6), but it should be {domain}. Your ISP or cloud provider will have instructions
+ on setting up reverse DNS for your box.""" )
# Check the TLSA record.
tlsa_qname = "_25._tcp." + domain
@@ -489,18 +481,17 @@ def check_primary_hostname_dns(domain, env, output, dns_domains, dns_zonefiles):
# since TLSA shouldn't be used without DNSSEC.
output.print_warning("""The DANE TLSA record for incoming mail is not set. This is optional.""")
else:
- output.print_error("""The DANE TLSA record for incoming mail (%s) is not correct. It is '%s' but it should be '%s'.
- It may take several hours for public DNS to update after a change."""
- % (tlsa_qname, tlsa25, tlsa25_expected))
+ output.print_error(f"""The DANE TLSA record for incoming mail ({tlsa_qname}) is not correct. It is '{tlsa25}' but it should be '{tlsa25_expected}'.
+ It may take several hours for public DNS to update after a change.""")
# Check that the hostmaster@ email address exists.
check_alias_exists("Hostmaster contact address", "hostmaster@" + domain, env, output)
def check_alias_exists(alias_name, alias, env, output):
- mail_aliases = dict([(address, receivers) for address, receivers, *_ in get_mail_aliases(env)])
+ mail_aliases = {address: receivers for address, receivers, *_ in get_mail_aliases(env)}
if alias in mail_aliases:
if mail_aliases[alias]:
- output.print_ok("%s exists as a mail alias. [%s ↦ %s]" % (alias_name, alias, mail_aliases[alias]))
+ output.print_ok(f"{alias_name} exists as a mail alias. [{alias} ↦ {mail_aliases[alias]}]")
else:
output.print_error("""You must set the destination of the mail alias for %s to direct email to you or another administrator.""" % alias)
else:
@@ -526,7 +517,7 @@ def check_dns_zone(domain, env, output, dns_zonefiles):
secondary_ns = custom_secondary_ns or ["ns2." + env['PRIMARY_HOSTNAME']]
existing_ns = query_dns(domain, "NS")
- correct_ns = "; ".join(sorted(["ns1." + env['PRIMARY_HOSTNAME']] + secondary_ns))
+ correct_ns = "; ".join(sorted(["ns1." + env["PRIMARY_HOSTNAME"], *secondary_ns]))
ip = query_dns(domain, "A")
probably_external_dns = False
@@ -535,14 +526,12 @@ def check_dns_zone(domain, env, output, dns_zonefiles):
output.print_ok("Nameservers are set correctly at registrar. [%s]" % correct_ns)
elif ip == correct_ip:
# The domain resolves correctly, so maybe the user is using External DNS.
- output.print_warning("""The nameservers set on this domain at your domain name registrar should be %s. They are currently %s.
- If you are using External DNS, this may be OK."""
- % (correct_ns, existing_ns) )
+ output.print_warning(f"""The nameservers set on this domain at your domain name registrar should be {correct_ns}. They are currently {existing_ns}.
+ If you are using External DNS, this may be OK.""" )
probably_external_dns = True
else:
- output.print_error("""The nameservers set on this domain are incorrect. They are currently %s. Use your domain name registrar's
- control panel to set the nameservers to %s."""
- % (existing_ns, correct_ns) )
+ output.print_error(f"""The nameservers set on this domain are incorrect. They are currently {existing_ns}. Use your domain name registrar's
+ control panel to set the nameservers to {correct_ns}.""" )
# Check that each custom secondary nameserver resolves the IP address.
@@ -563,7 +552,7 @@ def check_dns_zone(domain, env, output, dns_zonefiles):
elif ip is None:
output.print_error("Secondary nameserver %s is not configured to resolve this domain." % ns)
else:
- output.print_error("Secondary nameserver %s is not configured correctly. (It resolved this domain as %s. It should be %s.)" % (ns, ip, correct_ip))
+ output.print_error(f"Secondary nameserver {ns} is not configured correctly. (It resolved this domain as {ip}. It should be {correct_ip}.)")
def check_dns_zone_suggestions(domain, env, output, dns_zonefiles, domains_with_a_records):
# Warn if a custom DNS record is preventing this or the automatic www redirect from
@@ -592,7 +581,7 @@ def check_dnssec(domain, env, output, dns_zonefiles, is_checking_primary=False):
expected_ds_records = { }
ds_file = '/etc/nsd/zones/' + dns_zonefiles[domain] + '.ds'
if not os.path.exists(ds_file): return # Domain is in our database but DNS has not yet been updated.
- with open(ds_file) as f:
+ with open(ds_file, encoding="utf-8") as f:
for rr_ds in f:
rr_ds = rr_ds.rstrip()
ds_keytag, ds_alg, ds_digalg, ds_digest = rr_ds.split("\t")[4].split(" ")
@@ -601,7 +590,7 @@ def check_dnssec(domain, env, output, dns_zonefiles, is_checking_primary=False):
# record that we suggest using is for the KSK (and that's how the DS records were generated).
# We'll also give the nice name for the key algorithm.
dnssec_keys = load_env_vars_from_file(os.path.join(env['STORAGE_ROOT'], 'dns/dnssec/%s.conf' % alg_name_map[ds_alg]))
- with open(os.path.join(env['STORAGE_ROOT'], 'dns/dnssec/' + dnssec_keys['KSK'] + '.key'), 'r') as f:
+ with open(os.path.join(env['STORAGE_ROOT'], 'dns/dnssec/' + dnssec_keys['KSK'] + '.key'), encoding="utf-8") as f:
dnsssec_pubkey = f.read().split("\t")[3].split(" ")[3]
expected_ds_records[ (ds_keytag, ds_alg, ds_digalg, ds_digest) ] = {
@@ -634,10 +623,10 @@ def check_dnssec(domain, env, output, dns_zonefiles, is_checking_primary=False):
#
# But it may not be preferred. Only algorithm 13 is preferred. Warn if any of the
# matched zones uses a different algorithm.
- if set(r[1] for r in matched_ds) == { '13' } and set(r[2] for r in matched_ds) <= { '2', '4' }: # all are alg 13 and digest type 2 or 4
+ if {r[1] for r in matched_ds} == { '13' } and {r[2] for r in matched_ds} <= { '2', '4' }: # all are alg 13 and digest type 2 or 4
output.print_ok("DNSSEC 'DS' record is set correctly at registrar.")
return
- elif len([r for r in matched_ds if r[1] == '13' and r[2] in ( '2', '4' )]) > 0: # some but not all are alg 13
+ elif len([r for r in matched_ds if r[1] == '13' and r[2] in { '2', '4' }]) > 0: # some but not all are alg 13
output.print_ok("DNSSEC 'DS' record is set correctly at registrar. (Records using algorithm other than ECDSAP256SHA256 and digest types other than SHA-256/384 should be removed.)")
return
else: # no record uses alg 13
@@ -669,8 +658,8 @@ def check_dnssec(domain, env, output, dns_zonefiles, is_checking_primary=False):
output.print_line("----------")
output.print_line("Key Tag: " + ds_suggestion['keytag'])
output.print_line("Key Flags: KSK / 257")
- output.print_line("Algorithm: %s / %s" % (ds_suggestion['alg'], ds_suggestion['alg_name']))
- output.print_line("Digest Type: %s / %s" % (ds_suggestion['digalg'], ds_suggestion['digalg_name']))
+ output.print_line("Algorithm: {} / {}".format(ds_suggestion['alg'], ds_suggestion['alg_name']))
+ output.print_line("Digest Type: {} / {}".format(ds_suggestion['digalg'], ds_suggestion['digalg_name']))
output.print_line("Digest: " + ds_suggestion['digest'])
output.print_line("Public Key: ")
output.print_line(ds_suggestion['pubkey'], monospace=True)
@@ -681,7 +670,7 @@ def check_dnssec(domain, env, output, dns_zonefiles, is_checking_primary=False):
output.print_line("")
output.print_line("The DS record is currently set to:")
for rr in sorted(ds):
- output.print_line("Key Tag: {0}, Algorithm: {1}, Digest Type: {2}, Digest: {3}".format(*rr))
+ output.print_line("Key Tag: {}, Algorithm: {}, Digest Type: {}, Digest: {}".format(*rr))
def check_mail_domain(domain, env, output):
# Check the MX record.
@@ -689,21 +678,19 @@ def check_mail_domain(domain, env, output):
recommended_mx = "10 " + env['PRIMARY_HOSTNAME']
mx = query_dns(domain, "MX", nxdomain=None)
- if mx is None:
- mxhost = None
- elif mx == "[timeout]":
+ if mx is None or mx == "[timeout]":
mxhost = None
else:
# query_dns returns a semicolon-delimited list
# of priority-host pairs.
mxhost = mx.split('; ')[0].split(' ')[1]
- if mxhost == None:
+ if mxhost is None:
# A missing MX record is okay on the primary hostname because
# the primary hostname's A record (the MX fallback) is... itself,
# which is what we want the MX to be.
if domain == env['PRIMARY_HOSTNAME']:
- output.print_ok("Domain's email is directed to this domain. [%s has no MX record, which is ok]" % (domain,))
+ output.print_ok(f"Domain's email is directed to this domain. [{domain} has no MX record, which is ok]")
# And a missing MX record is okay on other domains if the A record
# matches the A record of the PRIMARY_HOSTNAME. Actually this will
@@ -711,17 +698,17 @@ def check_mail_domain(domain, env, output):
else:
domain_a = query_dns(domain, "A", nxdomain=None)
primary_a = query_dns(env['PRIMARY_HOSTNAME'], "A", nxdomain=None)
- if domain_a != None and domain_a == primary_a:
- output.print_ok("Domain's email is directed to this domain. [%s has no MX record but its A record is OK]" % (domain,))
+ if domain_a is not None and domain_a == primary_a:
+ output.print_ok(f"Domain's email is directed to this domain. [{domain} has no MX record but its A record is OK]")
else:
- output.print_error("""This domain's DNS MX record is not set. It should be '%s'. Mail will not
+ output.print_error(f"""This domain's DNS MX record is not set. It should be '{recommended_mx}'. Mail will not
be delivered to this box. It may take several hours for public DNS to update after a
- change. This problem may result from other issues listed here.""" % (recommended_mx,))
+ change. This problem may result from other issues listed here.""")
elif mxhost == env['PRIMARY_HOSTNAME']:
- good_news = "Domain's email is directed to this domain. [%s ↦ %s]" % (domain, mx)
+ good_news = f"Domain's email is directed to this domain. [{domain} ↦ {mx}]"
if mx != recommended_mx:
- good_news += " This configuration is non-standard. The recommended configuration is '%s'." % (recommended_mx,)
+ good_news += f" This configuration is non-standard. The recommended configuration is '{recommended_mx}'."
output.print_ok(good_news)
# Check MTA-STS policy.
@@ -732,14 +719,14 @@ def check_mail_domain(domain, env, output):
if policy[1].get("mx") == [env['PRIMARY_HOSTNAME']] and policy[1].get("mode") == "enforce": # policy[0] is the policyid
output.print_ok("MTA-STS policy is present.")
else:
- output.print_error("MTA-STS policy is present but has unexpected settings. [{}]".format(policy[1]))
+ output.print_error(f"MTA-STS policy is present but has unexpected settings. [{policy[1]}]")
else:
- output.print_error("MTA-STS policy is missing: {}".format(valid))
+ output.print_error(f"MTA-STS policy is missing: {valid}")
else:
- output.print_error("""This domain's DNS MX record is incorrect. It is currently set to '%s' but should be '%s'. Mail will not
+ output.print_error(f"""This domain's DNS MX record is incorrect. It is currently set to '{mx}' but should be '{recommended_mx}'. Mail will not
be delivered to this box. It may take several hours for public DNS to update after a change. This problem may result from
- other issues listed here.""" % (mx, recommended_mx))
+ other issues listed here.""")
# Check that the postmaster@ email address exists. Not required if the domain has a
# catch-all address or domain alias.
@@ -753,13 +740,13 @@ def check_mail_domain(domain, env, output):
if dbl is None:
output.print_ok("Domain is not blacklisted by dbl.spamhaus.org.")
elif dbl == "[timeout]":
- output.print_warning("Connection to dbl.spamhaus.org timed out. We could not determine whether the domain {} is blacklisted. Please try again later.".format(domain))
+ output.print_warning(f"Connection to dbl.spamhaus.org timed out. We could not determine whether the domain {domain} is blacklisted. Please try again later.")
elif dbl == "[Not Set]":
- output.print_warning("Could not connect to dbl.spamhaus.org. We could not determine whether the domain {} is blacklisted. Please try again later.".format(domain))
+ output.print_warning(f"Could not connect to dbl.spamhaus.org. We could not determine whether the domain {domain} is blacklisted. Please try again later.")
else:
- output.print_error("""This domain is listed in the Spamhaus Domain Block List (code %s),
+ output.print_error(f"""This domain is listed in the Spamhaus Domain Block List (code {dbl}),
which may prevent recipients from receiving your mail.
- See http://www.spamhaus.org/dbl/ and http://www.spamhaus.org/query/domain/%s.""" % (dbl, domain))
+ See http://www.spamhaus.org/dbl/ and http://www.spamhaus.org/query/domain/{domain}.""")
def check_web_domain(domain, rounded_time, ssl_certificates, env, output):
# See if the domain's A record resolves to our PUBLIC_IP. This is already checked
@@ -773,13 +760,13 @@ def check_web_domain(domain, rounded_time, ssl_certificates, env, output):
if value == normalize_ip(expected):
ok_values.append(value)
else:
- output.print_error("""This domain should resolve to your box's IP address (%s %s) if you would like the box to serve
- webmail or a website on this domain. The domain currently resolves to %s in public DNS. It may take several hours for
- public DNS to update after a change. This problem may result from other issues listed here.""" % (rtype, expected, value))
+ output.print_error(f"""This domain should resolve to your box's IP address ({rtype} {expected}) if you would like the box to serve
+ webmail or a website on this domain. The domain currently resolves to {value} in public DNS. It may take several hours for
+ public DNS to update after a change. This problem may result from other issues listed here.""")
return
# If both A and AAAA are correct...
- output.print_ok("Domain resolves to this box's IP address. [%s ↦ %s]" % (domain, '; '.join(ok_values)))
+ output.print_ok("Domain resolves to this box's IP address. [{} ↦ {}]".format(domain, '; '.join(ok_values)))
# We need a TLS certificate for PRIMARY_HOSTNAME because that's where the
@@ -826,7 +813,7 @@ def query_dns(qname, rtype, nxdomain='[Not Set]', at=None, as_list=False):
# be expressed in equivalent string forms. Canonicalize the form before
# returning them. The caller should normalize any IP addresses the result
# of this method is compared with.
- if rtype in ("A", "AAAA"):
+ if rtype in {"A", "AAAA"}:
response = [normalize_ip(str(r)) for r in response]
if as_list:
@@ -842,7 +829,7 @@ def check_ssl_cert(domain, rounded_time, ssl_certificates, env, output):
# Check that TLS certificate is signed.
# Skip the check if the A record is not pointed here.
- if query_dns(domain, "A", None) not in (env['PUBLIC_IP'], None): return
+ if query_dns(domain, "A", None) not in {env['PUBLIC_IP'], None}: return
# Where is the certificate file stored?
tls_cert = get_domain_ssl_files(domain, ssl_certificates, env, allow_missing_cert=True)
@@ -916,18 +903,16 @@ def what_version_is_this(env):
# Git may not be installed and Mail-in-a-Box may not have been cloned from github,
# so this function may raise all sorts of exceptions.
miab_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
- tag = shell("check_output", ["/usr/bin/git", "describe", "--always", "--abbrev=0"], env={"GIT_DIR": os.path.join(miab_dir, '.git')}).strip()
- return tag
+ return shell("check_output", ["/usr/bin/git", "describe", "--always", "--abbrev=0"], env={"GIT_DIR": os.path.join(miab_dir, '.git')}).strip()
def get_latest_miab_version():
# This pings https://mailinabox.email/setup.sh and extracts the tag named in
# the script to determine the current product version.
from urllib.request import urlopen, HTTPError, URLError
- from socket import timeout
try:
return re.search(b'TAG=(.*)', urlopen("https://mailinabox.email/setup.sh?ping=1", timeout=5).read()).group(1).decode("utf8")
- except (HTTPError, URLError, timeout):
+ except (TimeoutError, HTTPError, URLError):
return None
def check_miab_version(env, output):
@@ -948,8 +933,7 @@ def check_miab_version(env, output):
elif latest_ver is None:
output.print_error("Latest Mail-in-a-Box version could not be determined. You are running version %s." % this_ver)
else:
- output.print_error("A new version of Mail-in-a-Box is available. You are running version %s. The latest version is %s. For upgrade instructions, see https://mailinabox.email. "
- % (this_ver, latest_ver))
+ output.print_error(f"A new version of Mail-in-a-Box is available. You are running version {this_ver}. The latest version is {latest_ver}. For upgrade instructions, see https://mailinabox.email. ")
def run_and_output_changes(env, pool):
import json
@@ -964,7 +948,7 @@ def run_and_output_changes(env, pool):
# Load previously saved status checks.
cache_fn = "/var/cache/mailinabox/status_checks.json"
if os.path.exists(cache_fn):
- with open(cache_fn, 'r') as f:
+ with open(cache_fn, encoding="utf-8") as f:
try:
prev = json.load(f)
except json.JSONDecodeError:
@@ -1003,14 +987,14 @@ def run_and_output_changes(env, pool):
out.add_heading(category + " -- Previously:")
elif op == "delete":
out.add_heading(category + " -- Removed")
- if op in ("replace", "delete"):
+ if op in {"replace", "delete"}:
BufferedOutput(with_lines=prev_lines[i1:i2]).playback(out)
if op == "replace":
out.add_heading(category + " -- Currently:")
elif op == "insert":
out.add_heading(category + " -- Added")
- if op in ("replace", "insert"):
+ if op in {"replace", "insert"}:
BufferedOutput(with_lines=cur_lines[j1:j2]).playback(out)
for category, prev_lines in prev_status.items():
@@ -1020,7 +1004,7 @@ def run_and_output_changes(env, pool):
# Store the current status checks output for next time.
os.makedirs(os.path.dirname(cache_fn), exist_ok=True)
- with open(cache_fn, "w") as f:
+ with open(cache_fn, "w", encoding="utf-8") as f:
json.dump(cur.buf, f, indent=True)
def normalize_ip(ip):
@@ -1054,8 +1038,8 @@ class FileOutput:
def print_block(self, message, first_line=" "):
print(first_line, end='', file=self.buf)
- message = re.sub("\n\s*", " ", message)
- words = re.split("(\s+)", message)
+ message = re.sub("\n\\s*", " ", message)
+ words = re.split(r"(\s+)", message)
linelen = 0
for w in words:
if self.width and (linelen + len(w) > self.width-1-len(first_line)):
@@ -1094,9 +1078,9 @@ class ConsoleOutput(FileOutput):
class BufferedOutput:
# Record all of the instance method calls so we can play them back later.
def __init__(self, with_lines=None):
- self.buf = [] if not with_lines else with_lines
+ self.buf = with_lines if with_lines else []
def __getattr__(self, attr):
- if attr not in ("add_heading", "print_ok", "print_error", "print_warning", "print_block", "print_line"):
+ if attr not in {"add_heading", "print_ok", "print_error", "print_warning", "print_block", "print_line"}:
raise AttributeError
# Return a function that just records the call & arguments to our buffer.
def w(*args, **kwargs):
diff --git a/management/utils.py b/management/utils.py
index b5ca7e59..929544d1 100644
--- a/management/utils.py
+++ b/management/utils.py
@@ -14,31 +14,31 @@ def load_env_vars_from_file(fn):
# Load settings from a KEY=VALUE file.
import collections
env = collections.OrderedDict()
- with open(fn, 'r') as f:
+ with open(fn, encoding="utf-8") as f:
for line in f:
env.setdefault(*line.strip().split("=", 1))
return env
def save_environment(env):
- with open("/etc/mailinabox.conf", "w") as f:
+ with open("/etc/mailinabox.conf", "w", encoding="utf-8") as f:
for k, v in env.items():
- f.write("%s=%s\n" % (k, v))
+ f.write(f"{k}={v}\n")
# THE SETTINGS FILE AT STORAGE_ROOT/settings.yaml.
def write_settings(config, env):
import rtyaml
fn = os.path.join(env['STORAGE_ROOT'], 'settings.yaml')
- with open(fn, "w") as f:
+ with open(fn, "w", encoding="utf-8") as f:
f.write(rtyaml.dump(config))
def load_settings(env):
import rtyaml
fn = os.path.join(env['STORAGE_ROOT'], 'settings.yaml')
try:
- with open(fn, "r") as f:
+ with open(fn, encoding="utf-8") as f:
config = rtyaml.load(f)
- if not isinstance(config, dict): raise ValueError() # caught below
+ if not isinstance(config, dict): raise ValueError # caught below
return config
except:
return { }
@@ -59,7 +59,7 @@ def sort_domains(domain_names, env):
# from shortest to longest since zones are always shorter than their
# subdomains.
zones = { }
- for domain in sorted(domain_names, key=lambda d : len(d)):
+ for domain in sorted(domain_names, key=len):
for z in zones.values():
if domain.endswith("." + z):
# We found a parent domain already in the list.
@@ -81,7 +81,7 @@ def sort_domains(domain_names, env):
))
# Now sort the domain names that fall within each zone.
- domain_names = sorted(domain_names,
+ return sorted(domain_names,
key = lambda d : (
# First by zone.
zone_domains.index(zones[d]),
@@ -95,25 +95,26 @@ def sort_domains(domain_names, env):
# Then in right-to-left lexicographic order of the .-separated parts of the name.
list(reversed(d.split("."))),
))
-
- return domain_names
+
def sort_email_addresses(email_addresses, env):
email_addresses = set(email_addresses)
- domains = set(email.split("@", 1)[1] for email in email_addresses if "@" in email)
+ domains = {email.split("@", 1)[1] for email in email_addresses if "@" in email}
ret = []
for domain in sort_domains(domains, env):
- domain_emails = set(email for email in email_addresses if email.endswith("@" + domain))
+ domain_emails = {email for email in email_addresses if email.endswith("@" + domain)}
ret.extend(sorted(domain_emails))
email_addresses -= domain_emails
ret.extend(sorted(email_addresses)) # whatever is left
return ret
-def shell(method, cmd_args, env={}, capture_stderr=False, return_bytes=False, trap=False, input=None):
+def shell(method, cmd_args, env=None, capture_stderr=False, return_bytes=False, trap=False, input=None):
# A safe way to execute processes.
# Some processes like apt-get require being given a sane PATH.
import subprocess
+ if env is None:
+ env = {}
env.update({ "PATH": "/sbin:/bin:/usr/sbin:/usr/bin" })
kwargs = {
'env': env,
@@ -149,7 +150,7 @@ def du(path):
# soft and hard links.
total_size = 0
seen = set()
- for dirpath, dirnames, filenames in os.walk(path):
+ for dirpath, _dirnames, filenames in os.walk(path):
for f in filenames:
fp = os.path.join(dirpath, f)
try:
diff --git a/management/web_update.py b/management/web_update.py
index e23bb2d8..a0f0f799 100644
--- a/management/web_update.py
+++ b/management/web_update.py
@@ -22,17 +22,17 @@ def get_web_domains(env, include_www_redirects=True, include_auto=True, exclude_
# Add 'www.' subdomains that we want to provide default redirects
# to the main domain for. We'll add 'www.' to any DNS zones, i.e.
# the topmost of each domain we serve.
- domains |= set('www.' + zone for zone, zonefile in get_dns_zones(env))
+ domains |= {'www.' + zone for zone, zonefile in get_dns_zones(env)}
if include_auto:
# Add Autoconfiguration domains for domains that there are user accounts at:
# 'autoconfig.' for Mozilla Thunderbird auto setup.
# 'autodiscover.' for ActiveSync autodiscovery (Z-Push).
- domains |= set('autoconfig.' + maildomain for maildomain in get_mail_domains(env, users_only=True))
- domains |= set('autodiscover.' + maildomain for maildomain in get_mail_domains(env, users_only=True))
+ domains |= {'autoconfig.' + maildomain for maildomain in get_mail_domains(env, users_only=True)}
+ domains |= {'autodiscover.' + maildomain for maildomain in get_mail_domains(env, users_only=True)}
# 'mta-sts.' for MTA-STS support for all domains that have email addresses.
- domains |= set('mta-sts.' + maildomain for maildomain in get_mail_domains(env))
+ domains |= {'mta-sts.' + maildomain for maildomain in get_mail_domains(env)}
if exclude_dns_elsewhere:
# ...Unless the domain has an A/AAAA record that maps it to a different
@@ -45,15 +45,14 @@ def get_web_domains(env, include_www_redirects=True, include_auto=True, exclude_
domains.add(env['PRIMARY_HOSTNAME'])
# Sort the list so the nginx conf gets written in a stable order.
- domains = sort_domains(domains, env)
+ return sort_domains(domains, env)
- return domains
def get_domains_with_a_records(env):
domains = set()
dns = get_custom_dns_config(env)
for domain, rtype, value in dns:
- if rtype == "CNAME" or (rtype in ("A", "AAAA") and value not in ("local", env['PUBLIC_IP'])):
+ if rtype == "CNAME" or (rtype in {"A", "AAAA"} and value not in {"local", env['PUBLIC_IP']}):
domains.add(domain)
return domains
@@ -63,7 +62,7 @@ def get_web_domains_with_root_overrides(env):
root_overrides = { }
nginx_conf_custom_fn = os.path.join(env["STORAGE_ROOT"], "www/custom.yaml")
if os.path.exists(nginx_conf_custom_fn):
- with open(nginx_conf_custom_fn, 'r') as f:
+ with open(nginx_conf_custom_fn, encoding='utf-8') as f:
custom_settings = rtyaml.load(f)
for domain, settings in custom_settings.items():
for type, value in [('redirect', settings.get('redirects', {}).get('/')),
@@ -78,7 +77,7 @@ def do_web_update(env):
# Helper for reading config files and templates
def read_conf(conf_fn):
- with open(os.path.join(os.path.dirname(__file__), "../conf", conf_fn), "r") as f:
+ with open(os.path.join(os.path.dirname(__file__), "../conf", conf_fn), encoding='utf-8') as f:
return f.read()
# Build an nginx configuration file.
@@ -113,12 +112,12 @@ def do_web_update(env):
# Did the file change? If not, don't bother writing & restarting nginx.
nginx_conf_fn = "/etc/nginx/conf.d/local.conf"
if os.path.exists(nginx_conf_fn):
- with open(nginx_conf_fn) as f:
+ with open(nginx_conf_fn, encoding='utf-8') as f:
if f.read() == nginx_conf:
return ""
# Save the file.
- with open(nginx_conf_fn, "w") as f:
+ with open(nginx_conf_fn, "w", encoding='utf-8') as f:
f.write(nginx_conf)
# Kick nginx. Since this might be called from the web admin
@@ -150,13 +149,13 @@ def make_domain_config(domain, templates, ssl_certificates, env):
with open(filepath, 'rb') as f:
sha1.update(f.read())
return sha1.hexdigest()
- nginx_conf_extra += "\t# ssl files sha1: %s / %s\n" % (hashfile(tls_cert["private-key"]), hashfile(tls_cert["certificate"]))
+ nginx_conf_extra += "\t# ssl files sha1: {} / {}\n".format(hashfile(tls_cert["private-key"]), hashfile(tls_cert["certificate"]))
# Add in any user customizations in YAML format.
hsts = "yes"
nginx_conf_custom_fn = os.path.join(env["STORAGE_ROOT"], "www/custom.yaml")
if os.path.exists(nginx_conf_custom_fn):
- with open(nginx_conf_custom_fn, 'r') as f:
+ with open(nginx_conf_custom_fn, encoding='utf-8') as f:
yaml = rtyaml.load(f)
if domain in yaml:
yaml = yaml[domain]
@@ -196,16 +195,16 @@ def make_domain_config(domain, templates, ssl_certificates, env):
nginx_conf_extra += "\n\t\talias %s;" % alias
nginx_conf_extra += "\n\t}\n"
for path, url in yaml.get("redirects", {}).items():
- nginx_conf_extra += "\trewrite %s %s permanent;\n" % (path, url)
+ nginx_conf_extra += f"\trewrite {path} {url} permanent;\n"
# override the HSTS directive type
hsts = yaml.get("hsts", hsts)
# Add the HSTS header.
if hsts == "yes":
- nginx_conf_extra += "\tadd_header Strict-Transport-Security \"max-age=15768000\" always;\n"
+ nginx_conf_extra += '\tadd_header Strict-Transport-Security "max-age=15768000" always;\n'
elif hsts == "preload":
- nginx_conf_extra += "\tadd_header Strict-Transport-Security \"max-age=15768000; includeSubDomains; preload\" always;\n"
+ nginx_conf_extra += '\tadd_header Strict-Transport-Security "max-age=15768000; includeSubDomains; preload" always;\n'
# Add in any user customizations in the includes/ folder.
nginx_conf_custom_include = os.path.join(env["STORAGE_ROOT"], "www", safe_domain_name(domain) + ".conf")
@@ -216,7 +215,7 @@ def make_domain_config(domain, templates, ssl_certificates, env):
# Combine the pieces. Iteratively place each template into the "# ADDITIONAL DIRECTIVES HERE" placeholder
# of the previous template.
nginx_conf = "# ADDITIONAL DIRECTIVES HERE\n"
- for t in templates + [nginx_conf_extra]:
+ for t in [*templates, nginx_conf_extra]:
nginx_conf = re.sub("[ \t]*# ADDITIONAL DIRECTIVES HERE *\n", t, nginx_conf)
# Replace substitution strings in the template & return.
@@ -225,9 +224,8 @@ def make_domain_config(domain, templates, ssl_certificates, env):
nginx_conf = nginx_conf.replace("$ROOT", root)
nginx_conf = nginx_conf.replace("$SSL_KEY", tls_cert["private-key"])
nginx_conf = nginx_conf.replace("$SSL_CERTIFICATE", tls_cert["certificate"])
- nginx_conf = nginx_conf.replace("$REDIRECT_DOMAIN", re.sub(r"^www\.", "", domain)) # for default www redirects to parent domain
+ return nginx_conf.replace("$REDIRECT_DOMAIN", re.sub(r"^www\.", "", domain)) # for default www redirects to parent domain
- return nginx_conf
def get_web_root(domain, env, test_exists=True):
# Try STORAGE_ROOT/web/domain_name if it exists, but fall back to STORAGE_ROOT/web/default.
diff --git a/management/wsgi.py b/management/wsgi.py
index 86cf3af4..1d5a37b8 100644
--- a/management/wsgi.py
+++ b/management/wsgi.py
@@ -1,7 +1,7 @@
from daemon import app
-import auth, utils
+import utils
app.logger.addHandler(utils.create_syslog_handler())
if __name__ == "__main__":
- app.run(port=10222)
\ No newline at end of file
+ app.run(port=10222)
diff --git a/setup/migrate.py b/setup/migrate.py
index 9065cf40..066e0e03 100755
--- a/setup/migrate.py
+++ b/setup/migrate.py
@@ -9,6 +9,7 @@ import sys, os, os.path, glob, re, shutil
sys.path.insert(0, 'management')
from utils import load_environment, save_environment, shell
+import contextlib
def migration_1(env):
# Re-arrange where we store SSL certificates. There was a typo also.
@@ -31,10 +32,8 @@ def migration_1(env):
move_file(sslfn, domain_name, file_type)
# Move the old domains directory if it is now empty.
- try:
+ with contextlib.suppress(Exception):
os.rmdir(os.path.join( env["STORAGE_ROOT"], 'ssl/domains'))
- except:
- pass
def migration_2(env):
# Delete the .dovecot_sieve script everywhere. This was formerly a copy of our spam -> Spam
@@ -168,7 +167,7 @@ def migration_12(env):
dropcmd = "DROP TABLE %s" % table
c.execute(dropcmd)
except:
- print("Failed to drop table", table, e)
+ print("Failed to drop table", table)
# Save.
conn.commit()
conn.close()
@@ -212,8 +211,8 @@ def run_migrations():
migration_id_file = os.path.join(env['STORAGE_ROOT'], 'mailinabox.version')
migration_id = None
if os.path.exists(migration_id_file):
- with open(migration_id_file) as f:
- migration_id = f.read().strip();
+ with open(migration_id_file, encoding='utf-8') as f:
+ migration_id = f.read().strip()
if migration_id is None:
# Load the legacy location of the migration ID. We'll drop support
@@ -222,7 +221,7 @@ def run_migrations():
if migration_id is None:
print()
- print("%s file doesn't exists. Skipping migration..." % (migration_id_file,))
+ print(f"{migration_id_file} file doesn't exists. Skipping migration...")
return
ourver = int(migration_id)
@@ -253,7 +252,7 @@ def run_migrations():
# Write out our current version now. Do this sooner rather than later
# in case of any problems.
- with open(migration_id_file, "w") as f:
+ with open(migration_id_file, "w", encoding='utf-8') as f:
f.write(str(ourver) + "\n")
# Delete the legacy location of this field.
diff --git a/tests/fail2ban.py b/tests/fail2ban.py
index cb55c51f..dbab874a 100644
--- a/tests/fail2ban.py
+++ b/tests/fail2ban.py
@@ -6,12 +6,12 @@
# try to log in to.
######################################################################
-import sys, os, time, functools
+import sys, os, time
# parse command line
if len(sys.argv) != 4:
- print("Usage: tests/fail2ban.py \"ssh user@hostname\" hostname owncloud_user")
+ print('Usage: tests/fail2ban.py "ssh user@hostname" hostname owncloud_user')
sys.exit(1)
ssh_command, hostname, owncloud_user = sys.argv[1:4]
@@ -24,7 +24,6 @@ socket.setdefaulttimeout(10)
class IsBlocked(Exception):
"""Tests raise this exception when it appears that a fail2ban
jail is in effect, i.e. on a connection refused error."""
- pass
def smtp_test():
import smtplib
@@ -33,13 +32,14 @@ def smtp_test():
server = smtplib.SMTP(hostname, 587)
except ConnectionRefusedError:
# looks like fail2ban worked
- raise IsBlocked()
+ raise IsBlocked
server.starttls()
server.ehlo_or_helo_if_needed()
try:
server.login("fakeuser", "fakepassword")
- raise Exception("authentication didn't fail")
+ msg = "authentication didn't fail"
+ raise Exception(msg)
except smtplib.SMTPAuthenticationError:
# athentication should fail
pass
@@ -57,11 +57,12 @@ def imap_test():
M = imaplib.IMAP4_SSL(hostname)
except ConnectionRefusedError:
# looks like fail2ban worked
- raise IsBlocked()
+ raise IsBlocked
try:
M.login("fakeuser", "fakepassword")
- raise Exception("authentication didn't fail")
+ msg = "authentication didn't fail"
+ raise Exception(msg)
except imaplib.IMAP4.error:
# authentication should fail
pass
@@ -75,17 +76,18 @@ def pop_test():
M = poplib.POP3_SSL(hostname)
except ConnectionRefusedError:
# looks like fail2ban worked
- raise IsBlocked()
+ raise IsBlocked
try:
M.user('fakeuser')
try:
M.pass_('fakepassword')
- except poplib.error_proto as e:
+ except poplib.error_proto:
# Authentication should fail.
M = None # don't .quit()
return
M.list()
- raise Exception("authentication didn't fail")
+ msg = "authentication didn't fail"
+ raise Exception(msg)
finally:
if M:
M.quit()
@@ -99,11 +101,12 @@ def managesieve_test():
M = imaplib.IMAP4(hostname, 4190)
except ConnectionRefusedError:
# looks like fail2ban worked
- raise IsBlocked()
+ raise IsBlocked
try:
M.login("fakeuser", "fakepassword")
- raise Exception("authentication didn't fail")
+ msg = "authentication didn't fail"
+ raise Exception(msg)
except imaplib.IMAP4.error:
# authentication should fail
pass
@@ -129,17 +132,17 @@ def http_test(url, expected_status, postdata=None, qsargs=None, auth=None):
headers={'User-Agent': 'Mail-in-a-Box fail2ban tester'},
timeout=8,
verify=False) # don't bother with HTTPS validation, it may not be configured yet
- except requests.exceptions.ConnectTimeout as e:
- raise IsBlocked()
+ except requests.exceptions.ConnectTimeout:
+ raise IsBlocked
except requests.exceptions.ConnectionError as e:
if "Connection refused" in str(e):
- raise IsBlocked()
+ raise IsBlocked
raise # some other unexpected condition
# return response status code
if r.status_code != expected_status:
r.raise_for_status() # anything but 200
- raise IOError("Got unexpected status code %s." % r.status_code)
+ raise OSError("Got unexpected status code %s." % r.status_code)
# define how to run a test
@@ -149,7 +152,7 @@ def restart_fail2ban_service(final=False):
if not final:
# Stop recidive jails during testing.
command += " && sudo fail2ban-client stop recidive"
- os.system("%s \"%s\"" % (ssh_command, command))
+ os.system(f'{ssh_command} "{command}"')
def testfunc_runner(i, testfunc, *args):
print(i+1, end=" ", flush=True)
@@ -163,7 +166,6 @@ def run_test(testfunc, args, count, within_seconds, parallel):
# run testfunc sequentially and still get to count requests within
# the required time. So we split the requests across threads.
- import requests.exceptions
from multiprocessing import Pool
restart_fail2ban_service()
@@ -179,7 +181,7 @@ def run_test(testfunc, args, count, within_seconds, parallel):
# Distribute the requests across the pool.
asyncresults = []
for i in range(count):
- ar = p.apply_async(testfunc_runner, [i, testfunc] + list(args))
+ ar = p.apply_async(testfunc_runner, [i, testfunc, *list(args)])
asyncresults.append(ar)
# Wait for all runs to finish.
diff --git a/tests/test_dns.py b/tests/test_dns.py
index 25c64bf1..53fdb545 100755
--- a/tests/test_dns.py
+++ b/tests/test_dns.py
@@ -7,7 +7,7 @@
# where ipaddr is the IP address of your Mail-in-a-Box
# and hostname is the domain name to check the DNS for.
-import sys, re, difflib
+import sys, re
import dns.reversename, dns.resolver
if len(sys.argv) < 3:
@@ -27,10 +27,10 @@ def test(server, description):
("ns2." + primary_hostname, "A", ipaddr),
("www." + hostname, "A", ipaddr),
(hostname, "MX", "10 " + primary_hostname + "."),
- (hostname, "TXT", "\"v=spf1 mx -all\""),
- ("mail._domainkey." + hostname, "TXT", "\"v=DKIM1; k=rsa; s=email; \" \"p=__KEY__\""),
+ (hostname, "TXT", '"v=spf1 mx -all"'),
+ ("mail._domainkey." + hostname, "TXT", '"v=DKIM1; k=rsa; s=email; " "p=__KEY__"'),
#("_adsp._domainkey." + hostname, "TXT", "\"dkim=all\""),
- ("_dmarc." + hostname, "TXT", "\"v=DMARC1; p=quarantine;\""),
+ ("_dmarc." + hostname, "TXT", '"v=DMARC1; p=quarantine;"'),
]
return test2(tests, server, description)
@@ -59,7 +59,7 @@ def test2(tests, server, description):
response = ["[no value]"]
response = ";".join(str(r) for r in response)
response = re.sub(r"(\"p=).*(\")", r"\1__KEY__\2", response) # normalize DKIM key
- response = response.replace("\"\" ", "") # normalize TXT records (DNSSEC signing inserts empty text string components)
+ response = response.replace('"" ', "") # normalize TXT records (DNSSEC signing inserts empty text string components)
# is it right?
if response == expected_answer:
@@ -98,7 +98,7 @@ else:
# And if that's OK, also check reverse DNS (the PTR record).
if not test_ptr("8.8.8.8", "Google Public DNS (Reverse DNS)"):
print ()
- print ("The reverse DNS for %s is not correct. Consult your ISP for how to set the reverse DNS (also called the PTR record) for %s to %s." % (hostname, hostname, ipaddr))
+ print (f"The reverse DNS for {hostname} is not correct. Consult your ISP for how to set the reverse DNS (also called the PTR record) for {hostname} to {ipaddr}.")
sys.exit(1)
else:
print ("And the reverse DNS for the domain is correct.")
diff --git a/tests/test_mail.py b/tests/test_mail.py
index 312f3332..64ed3679 100755
--- a/tests/test_mail.py
+++ b/tests/test_mail.py
@@ -30,15 +30,11 @@ print("IMAP login is OK.")
# Attempt to send a mail to ourself.
mailsubject = "Mail-in-a-Box Automated Test Message " + uuid.uuid4().hex
emailto = emailaddress
-msg = """From: {emailaddress}
+msg = f"""From: {emailaddress}
To: {emailto}
-Subject: {subject}
+Subject: {mailsubject}
-This is a test message. It should be automatically deleted by the test script.""".format(
- emailaddress=emailaddress,
- emailto=emailto,
- subject=mailsubject,
- )
+This is a test message. It should be automatically deleted by the test script."""
# Connect to the server on the SMTP submission TLS port.
server = smtplib.SMTP_SSL(host)
diff --git a/tests/test_smtp_server.py b/tests/test_smtp_server.py
index 914c94b2..246f4ecc 100755
--- a/tests/test_smtp_server.py
+++ b/tests/test_smtp_server.py
@@ -6,11 +6,11 @@ if len(sys.argv) < 3:
sys.exit(1)
host, toaddr, fromaddr = sys.argv[1:4]
-msg = """From: %s
-To: %s
+msg = f"""From: {fromaddr}
+To: {toaddr}
Subject: SMTP server test
-This is a test message.""" % (fromaddr, toaddr)
+This is a test message."""
server = smtplib.SMTP(host, 25)
server.set_debuglevel(1)
diff --git a/tests/tls.py b/tests/tls.py
index e06ddcc9..8795f36a 100644
--- a/tests/tls.py
+++ b/tests/tls.py
@@ -88,14 +88,14 @@ def sslyze(opts, port, ok_ciphers):
try:
# Execute SSLyze.
- out = subprocess.check_output([SSLYZE] + common_opts + opts + [connection_string])
+ out = subprocess.check_output([SSLYZE, *common_opts, *opts, connection_string])
out = out.decode("utf8")
# Trim output to make better for storing in git.
if "SCAN RESULTS FOR" not in out:
# Failed. Just output the error.
- out = re.sub("[\w\W]*CHECKING HOST\(S\) AVAILABILITY\n\s*-+\n", "", out) # chop off header that shows the host we queried
- out = re.sub("[\w\W]*SCAN RESULTS FOR.*\n\s*-+\n", "", out) # chop off header that shows the host we queried
+ out = re.sub("[\\w\\W]*CHECKING HOST\\(S\\) AVAILABILITY\n\\s*-+\n", "", out) # chop off header that shows the host we queried
+ out = re.sub("[\\w\\W]*SCAN RESULTS FOR.*\n\\s*-+\n", "", out) # chop off header that shows the host we queried
out = re.sub("SCAN COMPLETED IN .*", "", out)
out = out.rstrip(" \n-") + "\n"
@@ -105,8 +105,8 @@ def sslyze(opts, port, ok_ciphers):
# Pull out the accepted ciphers list for each SSL/TLS protocol
# version outputted.
accepted_ciphers = set()
- for ciphers in re.findall(" Accepted:([\w\W]*?)\n *\n", out):
- accepted_ciphers |= set(re.findall("\n\s*(\S*)", ciphers))
+ for ciphers in re.findall(" Accepted:([\\w\\W]*?)\n *\n", out):
+ accepted_ciphers |= set(re.findall("\n\\s*(\\S*)", ciphers))
# Compare to what Mozilla recommends, for a given modernness-level.
print(" Should Not Offer: " + (", ".join(sorted(accepted_ciphers-set(ok_ciphers))) or "(none -- good)"))
@@ -142,7 +142,7 @@ for cipher in csv.DictReader(io.StringIO(urllib.request.urlopen("https://raw.git
client_compatibility = json.loads(urllib.request.urlopen("https://raw.githubusercontent.com/mail-in-a-box/user-agent-tls-capabilities/master/clients.json").read().decode("utf8"))
cipher_clients = { }
for client in client_compatibility:
- if len(set(client['protocols']) & set(["TLS 1.0", "TLS 1.1", "TLS 1.2"])) == 0: continue # does not support TLS
+ if len(set(client['protocols']) & {"TLS 1.0", "TLS 1.1", "TLS 1.2"}) == 0: continue # does not support TLS
for cipher in client['ciphers']:
cipher_clients.setdefault(cipher_names.get(cipher), set()).add("/".join(x for x in [client['client']['name'], client['client']['version'], client['client']['platform']] if x))
diff --git a/tools/editconf.py b/tools/editconf.py
index ec112b02..0438695b 100755
--- a/tools/editconf.py
+++ b/tools/editconf.py
@@ -24,7 +24,7 @@
# lines while the lines start with whitespace, e.g.:
#
# NAME VAL
-# UE
+# UE
import sys, re
@@ -76,7 +76,7 @@ for setting in settings:
found = set()
buf = ""
-with open(filename, "r") as f:
+with open(filename, encoding="utf-8") as f:
input_lines = list(f)
while len(input_lines) > 0:
@@ -84,7 +84,7 @@ while len(input_lines) > 0:
# If this configuration file uses folded lines, append any folded lines
# into our input buffer.
- if folded_lines and line[0] not in (comment_char, " ", ""):
+ if folded_lines and line[0] not in {comment_char, " ", ""}:
while len(input_lines) > 0 and input_lines[0][0] in " \t":
line += input_lines.pop(0)
@@ -93,9 +93,9 @@ while len(input_lines) > 0:
# Check if this line contain this setting from the command-line arguments.
name, val = settings[i].split("=", 1)
m = re.match(
- "(\s*)"
- + "(" + re.escape(comment_char) + "\s*)?"
- + re.escape(name) + delimiter_re + "(.*?)\s*$",
+ r"(\s*)"
+ "(" + re.escape(comment_char) + r"\s*)?"
+ + re.escape(name) + delimiter_re + r"(.*?)\s*$",
line, re.S)
if not m: continue
indent, is_comment, existing_val = m.groups()
@@ -110,30 +110,30 @@ while len(input_lines) > 0:
buf += line
found.add(i)
break
-
+
# comment-out the existing line (also comment any folded lines)
if is_comment is None:
buf += comment_char + line.rstrip().replace("\n", "\n" + comment_char) + "\n"
else:
# the line is already commented, pass it through
buf += line
-
+
# if this option already is set don't add the setting again,
# or if we're clearing the setting with -e, don't add it
if (i in found) or (not val and erase_setting):
break
-
+
# add the new setting
buf += indent + name + delimiter + val + "\n"
-
+
# note that we've applied this option
found.add(i)
-
+
break
else:
# If did not match any setting names, pass this line through.
buf += line
-
+
# Put any settings we didn't see at the end of the file,
# except settings being cleared.
for i in range(len(settings)):
@@ -144,7 +144,7 @@ for i in range(len(settings)):
if not testing:
# Write out the new file.
- with open(filename, "w") as f:
+ with open(filename, "w", encoding="utf-8") as f:
f.write(buf)
else:
# Just print the new file to stdout.
diff --git a/tools/parse-nginx-log-bootstrap-accesses.py b/tools/parse-nginx-log-bootstrap-accesses.py
index c61ed79e..8eb74dec 100755
--- a/tools/parse-nginx-log-bootstrap-accesses.py
+++ b/tools/parse-nginx-log-bootstrap-accesses.py
@@ -38,7 +38,7 @@ for date, ip in accesses:
# Since logs are rotated, store the statistics permanently in a JSON file.
# Load in the stats from an existing file.
if os.path.exists(outfn):
- with open(outfn, "r") as f:
+ with open(outfn, encoding="utf-8") as f:
existing_data = json.load(f)
for date, count in existing_data:
if date not in by_date:
@@ -51,5 +51,5 @@ by_date = sorted(by_date.items())
by_date.pop(-1)
# Write out.
-with open(outfn, "w") as f:
+with open(outfn, "w", encoding="utf-8") as f:
json.dump(by_date, f, sort_keys=True, indent=True)
diff --git a/tools/readable_bash.py b/tools/readable_bash.py
index 3cb5908c..66f6196c 100644
--- a/tools/readable_bash.py
+++ b/tools/readable_bash.py
@@ -124,7 +124,7 @@ def generate_documentation():
""")
parser = Source.parser()
- with open("setup/start.sh", "r") as start_file:
+ with open("setup/start.sh", "r") as start_file:
for line in start_file:
try:
fn = parser.parse_string(line).filename()