diff --git a/CHANGELOG.md b/CHANGELOG.md index a7d548ff..eb4c0b60 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,13 +1,46 @@ CHANGELOG ========= +In Development +-------------- + +* Migrate to the ECDSAP256SHA256 DNSSEC algorithm. If a DS record is set for any of your domain names that have DNS hosted on your box, you will be prompted by status checks to update the DS record. +* Roundcube's login cookie is updated to use a new encryption algorithm (AES-256-CBC instead of DES-EDE-CBC). + +v0.53a (May 8, 2021) +-------------------- + +The download URL for Z-Push has been revised becaue the old URL stopped working. + +v0.53 (April 12, 2021) +---------------------- + +Software updates: + +* Upgraded Roundcube to version 1.4.11 addressing a security issue, and its desktop notifications plugin. +* Upgraded Z-Push (for Exchange/ActiveSync) to version 2.6.2. + +Control panel: + +* Backblaze B2 is now a supported backup protocol. +* Fixed an issue in the daily mail reports. +* Sort the Custom DNS by zone and qname, and add an option to go back to the old sort order (creation order). + +Mail: + +* Enable sending DMARC failure reports to senders that request them. + +Setup: + +* Fixed error when upgrading from Nextcloud 13. + v0.52 (January 31, 2021) ------------------------ Software updates: * Upgraded Roundcube to version 1.4.10. -* Upgraded zpush to 2.6.1. +* Upgraded Z-Push to 2.6.1. Mail: diff --git a/README.md b/README.md index 02445a20..e08312fa 100644 --- a/README.md +++ b/README.md @@ -58,7 +58,7 @@ Clone this repository and checkout the tag corresponding to the most recent rele $ git clone https://github.com/mail-in-a-box/mailinabox $ cd mailinabox - $ git checkout v0.52 + $ git checkout v0.53a Begin the installation. diff --git a/management/daemon.py b/management/daemon.py index a0cfefa6..8490ee44 100755 --- a/management/daemon.py +++ b/management/daemon.py @@ -277,17 +277,50 @@ def dns_set_secondary_nameserver(): @app.route('/dns/custom') @authorized_personnel_only def dns_get_records(qname=None, rtype=None): - from dns_update import get_custom_dns_config - return json_response([ - { - "qname": r[0], - "rtype": r[1], - "value": r[2], - } - for r in get_custom_dns_config(env) - if r[0] != "_secondary_nameserver" - and (not qname or r[0] == qname) - and (not rtype or r[1] == rtype) ]) + # Get the current set of custom DNS records. + from dns_update import get_custom_dns_config, get_dns_zones + records = get_custom_dns_config(env, only_real_records=True) + + # Filter per the arguments for the more complex GET routes below. + records = [r for r in records + if (not qname or r[0] == qname) + and (not rtype or r[1] == rtype) ] + + # Make a better data structure. + records = [ + { + "qname": r[0], + "rtype": r[1], + "value": r[2], + "sort-order": { }, + } for r in records ] + + # To help with grouping by zone in qname sorting, label each record with which zone it is in. + # There's an inconsistency in how we handle zones in get_dns_zones and in sort_domains, so + # do this first before sorting the domains within the zones. + zones = utils.sort_domains([z[0] for z in get_dns_zones(env)], env) + for r in records: + for z in zones: + if r["qname"] == z or r["qname"].endswith("." + z): + r["zone"] = z + break + + # Add sorting information. The 'created' order follows the order in the YAML file on disk, + # which tracs the order entries were added in the control panel since we append to the end. + # The 'qname' sort order sorts by our standard domain name sort (by zone then by qname), + # then by rtype, and last by the original order in the YAML file (since sorting by value + # may not make sense, unless we parse IP addresses, for example). + for i, r in enumerate(records): + r["sort-order"]["created"] = i + domain_sort_order = utils.sort_domains([r["qname"] for r in records], env) + for i, r in enumerate(sorted(records, key = lambda r : ( + zones.index(r["zone"]), + domain_sort_order.index(r["qname"]), + r["rtype"]))): + r["sort-order"]["qname"] = i + + # Return. + return json_response(records) @app.route('/dns/custom/', methods=['GET', 'POST', 'PUT', 'DELETE']) @app.route('/dns/custom//', methods=['GET', 'POST', 'PUT', 'DELETE']) diff --git a/management/dns_update.py b/management/dns_update.py index 781fb1dc..c595bc3b 100755 --- a/management/dns_update.py +++ b/management/dns_update.py @@ -127,6 +127,10 @@ def build_zones(env): from web_update import get_web_domains www_redirect_domains = set(get_web_domains(env)) - set(get_web_domains(env, include_www_redirects=False)) + # For MTA-STS, we'll need to check if the PRIMARY_HOSTNAME certificate is + # singned and valid. Check that now rather than repeatedly for each domain. + env["-primary-hostname-certificate-is-valid"] = is_domain_cert_signed_and_valid(env["PRIMARY_HOSTNAME"], env) + # Build DNS records for each zone. for domain, zonefile in zonefiles: # Build the records to put in the zone. @@ -322,24 +326,11 @@ def build_zone(domain, all_domains, additional_records, www_redirect_domains, en # certificate in use is not valid (e.g. because it is self-signed and a valid certificate has not # yet been provisioned). Since we cannot provision a certificate without A/AAAA records, we # always set them --- only the TXT records depend on there being valid certificates. - mta_sts_enabled = False mta_sts_records = [ ("mta-sts", "A", env["PUBLIC_IP"], "Optional. MTA-STS Policy Host serving /.well-known/mta-sts.txt."), ("mta-sts", "AAAA", env.get('PUBLIC_IPV6'), "Optional. MTA-STS Policy Host serving /.well-known/mta-sts.txt."), ] - if domain in get_mail_domains(env): - # Check that PRIMARY_HOSTNAME and the mta_sts domain both have valid certificates. - for d in (env['PRIMARY_HOSTNAME'], "mta-sts." + domain): - cert = get_ssl_certificates(env).get(d) - if not cert: - break # no certificate provisioned for this domain - cert_status = check_certificate(d, cert['certificate'], cert['private-key']) - if cert_status[0] != 'OK': - break # certificate is not valid - else: - # 'break' was not encountered above, so both domains are good - mta_sts_enabled = True - if mta_sts_enabled: + if domain in get_mail_domains(env) and env["-primary-hostname-certificate-is-valid"] and is_domain_cert_signed_and_valid("mta-sts." + domain, env): # Compute an up-to-32-character hash of the policy file. We'll take a SHA-1 hash of the policy # file (20 bytes) and encode it as base-64 (28 bytes, using alphanumeric alternate characters # instead of '+' and '/' which are not allowed in an MTA-STS policy id) but then just take its @@ -365,6 +356,13 @@ def build_zone(domain, all_domains, additional_records, www_redirect_domains, en return records +def is_domain_cert_signed_and_valid(domain, env): + cert = get_ssl_certificates(env).get(domain) + if not cert: return False # no certificate provisioned + cert_status = check_certificate(domain, cert['certificate'], cert['private-key']) + print(domain, cert_status) + return cert_status[0] == 'OK' + ######################################################################## def build_tlsa_record(env): @@ -429,6 +427,7 @@ def build_sshfp_records(): # to the zone file (that trigger bumping the serial number). However, # if SSH has been configured to listen on a nonstandard port, we must # specify that port to sshkeyscan. + port = 22 with open('/etc/ssh/sshd_config', 'r') as f: for line in f: @@ -439,8 +438,11 @@ def build_sshfp_records(): except ValueError: pass break + keys = shell("check_output", ["ssh-keyscan", "-t", "rsa,dsa,ecdsa,ed25519", "-p", str(port), "localhost"]) - for key in sorted(keys.split("\n")): + keys = sorted(keys.split("\n")) + + for key in keys: if key.strip() == "" or key[0] == "#": continue try: host, keytype, pubkey = key.split(" ") @@ -460,13 +462,16 @@ def write_nsd_zone(domain, zonefile, records, env, force): # On the $ORIGIN line, there's typically a ';' comment at the end explaining # what the $ORIGIN line does. Any further data after the domain confuses # ldns-signzone, however. It used to say '; default zone domain'. - + # # The SOA contact address for all of the domains on this system is hostmaster # @ the PRIMARY_HOSTNAME. Hopefully that's legit. - + # # For the refresh through TTL fields, a good reference is: # http://www.peerwisdom.org/2013/05/15/dns-understanding-the-soa-record/ - + # + # A hash of the available DNSSEC keys are added in a comment so that when + # the keys change we force a re-generation of the zone which triggers + # re-signing it. zone = """ $ORIGIN {domain}. @@ -502,6 +507,9 @@ $TTL 86400 ; default time to live value = v2 zone += value + "\n" + # Append a stable hash of DNSSEC signing keys in a comment. + zone += "\n; DNSSEC signing keys hash: {}\n".format(hash_dnssec_keys(domain, env)) + # DNSSEC requires re-signing a zone periodically. That requires # bumping the serial number even if no other records have changed. # We don't see the DNSSEC records yet, so we have to figure out @@ -612,53 +620,77 @@ zone: ######################################################################## -def dnssec_choose_algo(domain, env): - if '.' in domain and domain.rsplit('.')[-1] in \ - ("email", "guide", "fund", "be", "lv"): - # At GoDaddy, RSASHA256 is the only algorithm supported - # for .email and .guide. - # A variety of algorithms are supported for .fund. This - # is preferred. - # Gandi tells me that .be does not support RSASHA1-NSEC3-SHA1 - # Nic.lv does not support RSASHA1-NSEC3-SHA1 for .lv tld's - return "RSASHA256" +def find_dnssec_signing_keys(domain, env): + # For key that we generated (one per algorithm)... + d = os.path.join(env['STORAGE_ROOT'], 'dns/dnssec') + keyconfs = [f for f in os.listdir(d) if f.endswith(".conf")] + for keyconf in keyconfs: + # Load the file holding the KSK and ZSK key filenames. + keyconf_fn = os.path.join(d, keyconf) + keyinfo = load_env_vars_from_file(keyconf_fn) - # For any domain we were able to sign before, don't change the algorithm - # on existing users. We'll probably want to migrate to SHA256 later. - return "RSASHA1-NSEC3-SHA1" + # Skip this key if the conf file has a setting named DOMAINS, + # holding a comma-separated list of domain names, and if this + # domain is not in the list. This allows easily disabling a + # key by setting "DOMAINS=" or "DOMAINS=none", other than + # deleting the key's .conf file, which might result in the key + # being regenerated next upgrade. Keys should be disabled if + # they are not needed to reduce the DNSSEC query response size. + if "DOMAINS" in keyinfo and domain not in [dd.strip() for dd in keyinfo["DOMAINS"].split(",")]: + continue + + for keytype in ("KSK", "ZSK"): + yield keytype, keyinfo[keytype] + +def hash_dnssec_keys(domain, env): + # Create a stable (by sorting the items) hash of all of the private keys + # that will be used to sign this domain. + keydata = [] + for keytype, keyfn in sorted(find_dnssec_signing_keys(domain, env)): + oldkeyfn = os.path.join(env['STORAGE_ROOT'], 'dns/dnssec', keyfn + ".private") + keydata.append(keytype) + keydata.append(keyfn) + with open(oldkeyfn, "r") as fr: + keydata.append( fr.read() ) + keydata = "".join(keydata).encode("utf8") + return hashlib.sha1(keydata).hexdigest() def sign_zone(domain, zonefile, env): - algo = dnssec_choose_algo(domain, env) - dnssec_keys = load_env_vars_from_file(os.path.join(env['STORAGE_ROOT'], 'dns/dnssec/%s.conf' % algo)) + # Sign the zone with all of the keys that were generated during + # setup so that the user can choose which to use in their DS record at + # their registrar, and also to support migration to newer algorithms. - # In order to use the same keys for all domains, we have to generate - # a new .key file with a DNSSEC record for the specific domain. We - # can reuse the same key, but it won't validate without a DNSSEC - # record specifically for the domain. + # In order to use the key files generated at setup which are for + # the domain _domain_, we have to re-write the files and place + # the actual domain name in it, so that ldns-signzone works. # - # Copy the .key and .private files to /tmp to patch them up. - # - # Use os.umask and open().write() to securely create a copy that only - # we (root) can read. - files_to_kill = [] - for key in ("KSK", "ZSK"): - if dnssec_keys.get(key, "").strip() == "": raise Exception("DNSSEC is not properly set up.") - oldkeyfn = os.path.join(env['STORAGE_ROOT'], 'dns/dnssec/' + dnssec_keys[key]) - newkeyfn = '/tmp/' + dnssec_keys[key].replace("_domain_", domain) - dnssec_keys[key] = newkeyfn + # Patch each key, storing the patched version in /tmp for now. + # Each key has a .key and .private file. Collect a list of filenames + # for all of the keys (and separately just the key-signing keys). + all_keys = [] + ksk_keys = [] + for keytype, keyfn in find_dnssec_signing_keys(domain, env): + newkeyfn = '/tmp/' + keyfn.replace("_domain_", domain) + for ext in (".private", ".key"): - if not os.path.exists(oldkeyfn + ext): raise Exception("DNSSEC is not properly set up.") - with open(oldkeyfn + ext, "r") as fr: + # Copy the .key and .private files to /tmp to patch them up. + # + # Use os.umask and open().write() to securely create a copy that only + # we (root) can read. + oldkeyfn = os.path.join(env['STORAGE_ROOT'], 'dns/dnssec', keyfn + ext) + with open(oldkeyfn, "r") as fr: keydata = fr.read() - keydata = keydata.replace("_domain_", domain) # trick ldns-signkey into letting our generic key be used by this zone - fn = newkeyfn + ext + keydata = keydata.replace("_domain_", domain) prev_umask = os.umask(0o77) # ensure written file is not world-readable try: - with open(fn, "w") as fw: + with open(newkeyfn + ext, "w") as fw: fw.write(keydata) finally: os.umask(prev_umask) # other files we write should be world-readable - files_to_kill.append(fn) + + # Put the patched key filename base (without extension) into the list of keys we'll sign with. + all_keys.append(newkeyfn) + if keytype == "KSK": ksk_keys.append(newkeyfn) # Do the signing. expiry_date = (datetime.datetime.now() + datetime.timedelta(days=30)).strftime("%Y%m%d") @@ -671,32 +703,34 @@ def sign_zone(domain, zonefile, env): # zonefile to sign "/etc/nsd/zones/" + zonefile, - + ] # keys to sign with (order doesn't matter -- it'll figure it out) - dnssec_keys["KSK"], - dnssec_keys["ZSK"], - ]) + + all_keys + ) # Create a DS record based on the patched-up key files. The DS record is specific to the # zone being signed, so we can't use the .ds files generated when we created the keys. # The DS record points to the KSK only. Write this next to the zone file so we can # get it later to give to the user with instructions on what to do with it. # - # We want to be able to validate DS records too, but multiple forms may be valid depending - # on the digest type. So we'll write all (both) valid records. Only one DS record should - # actually be deployed. Preferebly the first. + # Generate a DS record for each key. There are also several possible hash algorithms that may + # be used, so we'll pre-generate all for each key. One DS record per line. Only one + # needs to actually be deployed at the registrar. We'll select the preferred one + # in the status checks. with open("/etc/nsd/zones/" + zonefile + ".ds", "w") as f: - for digest_type in ('2', '1'): - rr_ds = shell('check_output', ["/usr/bin/ldns-key2ds", - "-n", # output to stdout - "-" + digest_type, # 1=SHA1, 2=SHA256 - dnssec_keys["KSK"] + ".key" - ]) - f.write(rr_ds) + for key in ksk_keys: + for digest_type in ('1', '2', '4'): + rr_ds = shell('check_output', ["/usr/bin/ldns-key2ds", + "-n", # output to stdout + "-" + digest_type, # 1=SHA1, 2=SHA256, 4=SHA384 + key + ".key" + ]) + f.write(rr_ds) - # Remove our temporary file. - for fn in files_to_kill: - os.unlink(fn) + # Remove the temporary patched key files. + for fn in all_keys: + os.unlink(fn + ".private") + os.unlink(fn + ".key") ######################################################################## @@ -753,7 +787,7 @@ def write_opendkim_tables(domains, env): ######################################################################## -def get_custom_dns_config(env): +def get_custom_dns_config(env, only_real_records=False): try: custom_dns = rtyaml.load(open(os.path.join(env['STORAGE_ROOT'], 'dns/custom.yaml'))) if not isinstance(custom_dns, dict): raise ValueError() # caught below @@ -761,6 +795,8 @@ def get_custom_dns_config(env): return [ ] for qname, value in custom_dns.items(): + if qname == "_secondary_nameserver" and only_real_records: continue # skip fake record + # Short form. Mapping a domain name to a string is short-hand # for creating A records. if isinstance(value, str): diff --git a/management/mail_log.py b/management/mail_log.py index 9e08df77..1626f820 100755 --- a/management/mail_log.py +++ b/management/mail_log.py @@ -44,9 +44,8 @@ TIME_DELTAS = OrderedDict([ ('today', datetime.datetime.now() - datetime.datetime.now().replace(hour=0, minute=0, second=0)) ]) -# Start date > end date! -START_DATE = datetime.datetime.now() -END_DATE = None +END_DATE = NOW = datetime.datetime.now() +START_DATE = None VERBOSE = False @@ -121,7 +120,7 @@ def scan_mail_log(env): pass print("Scanning logs from {:%Y-%m-%d %H:%M:%S} to {:%Y-%m-%d %H:%M:%S}".format( - END_DATE, START_DATE) + START_DATE, END_DATE) ) # Scan the lines in the log files until the date goes out of range @@ -253,7 +252,7 @@ def scan_mail_log(env): if collector["postgrey"]: msg = "Greylisted Email {:%Y-%m-%d %H:%M:%S} and {:%Y-%m-%d %H:%M:%S}" - print_header(msg.format(END_DATE, START_DATE)) + print_header(msg.format(START_DATE, END_DATE)) print(textwrap.fill( "The following mail was greylisted, meaning the emails were temporarily rejected. " @@ -291,7 +290,7 @@ def scan_mail_log(env): if collector["rejected"]: msg = "Blocked Email {:%Y-%m-%d %H:%M:%S} and {:%Y-%m-%d %H:%M:%S}" - print_header(msg.format(END_DATE, START_DATE)) + print_header(msg.format(START_DATE, END_DATE)) data = OrderedDict(sorted(collector["rejected"].items(), key=email_sort)) @@ -344,20 +343,20 @@ def scan_mail_log_line(line, collector): # Replaced the dateutil parser for a less clever way of parser that is roughly 4 times faster. # date = dateutil.parser.parse(date) - - # date = datetime.datetime.strptime(date, '%b %d %H:%M:%S') - # date = date.replace(START_DATE.year) - - # strptime fails on Feb 29 if correct year is not provided. See https://bugs.python.org/issue26460 - date = datetime.datetime.strptime(str(START_DATE.year) + ' ' + date, '%Y %b %d %H:%M:%S') - # print("date:", date) + + # strptime fails on Feb 29 with ValueError: day is out of range for month if correct year is not provided. + # See https://bugs.python.org/issue26460 + date = datetime.datetime.strptime(str(NOW.year) + ' ' + date, '%Y %b %d %H:%M:%S') + # if log date in future, step back a year + if date > NOW: + date = date.replace(year = NOW.year - 1) + #print("date:", date) # Check if the found date is within the time span we are scanning - # END_DATE < START_DATE - if date > START_DATE: + if date > END_DATE: # Don't process, and halt return False - elif date < END_DATE: + elif date < START_DATE: # Don't process, but continue return True @@ -606,7 +605,7 @@ def email_sort(email): def valid_date(string): - """ Validate the given date string fetched from the --startdate argument """ + """ Validate the given date string fetched from the --enddate argument """ try: date = dateutil.parser.parse(string) except ValueError: @@ -820,12 +819,14 @@ if __name__ == "__main__": parser.add_argument("-t", "--timespan", choices=TIME_DELTAS.keys(), default='today', metavar='