1
0
mirror of https://github.com/mail-in-a-box/mailinabox.git synced 2024-11-23 02:27:05 +00:00

Merge branch 'main' into patch-1

This commit is contained in:
Scott Sievert 2021-05-08 16:24:51 -05:00 committed by GitHub
commit 8311669378
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
26 changed files with 440 additions and 260 deletions

View File

@ -1,13 +1,46 @@
CHANGELOG
=========
In Development
--------------
* Migrate to the ECDSAP256SHA256 DNSSEC algorithm. If a DS record is set for any of your domain names that have DNS hosted on your box, you will be prompted by status checks to update the DS record.
* Roundcube's login cookie is updated to use a new encryption algorithm (AES-256-CBC instead of DES-EDE-CBC).
v0.53a (May 8, 2021)
--------------------
The download URL for Z-Push has been revised becaue the old URL stopped working.
v0.53 (April 12, 2021)
----------------------
Software updates:
* Upgraded Roundcube to version 1.4.11 addressing a security issue, and its desktop notifications plugin.
* Upgraded Z-Push (for Exchange/ActiveSync) to version 2.6.2.
Control panel:
* Backblaze B2 is now a supported backup protocol.
* Fixed an issue in the daily mail reports.
* Sort the Custom DNS by zone and qname, and add an option to go back to the old sort order (creation order).
Mail:
* Enable sending DMARC failure reports to senders that request them.
Setup:
* Fixed error when upgrading from Nextcloud 13.
v0.52 (January 31, 2021)
------------------------
Software updates:
* Upgraded Roundcube to version 1.4.10.
* Upgraded zpush to 2.6.1.
* Upgraded Z-Push to 2.6.1.
Mail:

View File

@ -58,7 +58,7 @@ Clone this repository and checkout the tag corresponding to the most recent rele
$ git clone https://github.com/mail-in-a-box/mailinabox
$ cd mailinabox
$ git checkout v0.52
$ git checkout v0.53a
Begin the installation.

View File

@ -277,17 +277,50 @@ def dns_set_secondary_nameserver():
@app.route('/dns/custom')
@authorized_personnel_only
def dns_get_records(qname=None, rtype=None):
from dns_update import get_custom_dns_config
return json_response([
{
"qname": r[0],
"rtype": r[1],
"value": r[2],
}
for r in get_custom_dns_config(env)
if r[0] != "_secondary_nameserver"
and (not qname or r[0] == qname)
and (not rtype or r[1] == rtype) ])
# Get the current set of custom DNS records.
from dns_update import get_custom_dns_config, get_dns_zones
records = get_custom_dns_config(env, only_real_records=True)
# Filter per the arguments for the more complex GET routes below.
records = [r for r in records
if (not qname or r[0] == qname)
and (not rtype or r[1] == rtype) ]
# Make a better data structure.
records = [
{
"qname": r[0],
"rtype": r[1],
"value": r[2],
"sort-order": { },
} for r in records ]
# To help with grouping by zone in qname sorting, label each record with which zone it is in.
# There's an inconsistency in how we handle zones in get_dns_zones and in sort_domains, so
# do this first before sorting the domains within the zones.
zones = utils.sort_domains([z[0] for z in get_dns_zones(env)], env)
for r in records:
for z in zones:
if r["qname"] == z or r["qname"].endswith("." + z):
r["zone"] = z
break
# Add sorting information. The 'created' order follows the order in the YAML file on disk,
# which tracs the order entries were added in the control panel since we append to the end.
# The 'qname' sort order sorts by our standard domain name sort (by zone then by qname),
# then by rtype, and last by the original order in the YAML file (since sorting by value
# may not make sense, unless we parse IP addresses, for example).
for i, r in enumerate(records):
r["sort-order"]["created"] = i
domain_sort_order = utils.sort_domains([r["qname"] for r in records], env)
for i, r in enumerate(sorted(records, key = lambda r : (
zones.index(r["zone"]),
domain_sort_order.index(r["qname"]),
r["rtype"]))):
r["sort-order"]["qname"] = i
# Return.
return json_response(records)
@app.route('/dns/custom/<qname>', methods=['GET', 'POST', 'PUT', 'DELETE'])
@app.route('/dns/custom/<qname>/<rtype>', methods=['GET', 'POST', 'PUT', 'DELETE'])

View File

@ -127,6 +127,10 @@ def build_zones(env):
from web_update import get_web_domains
www_redirect_domains = set(get_web_domains(env)) - set(get_web_domains(env, include_www_redirects=False))
# For MTA-STS, we'll need to check if the PRIMARY_HOSTNAME certificate is
# singned and valid. Check that now rather than repeatedly for each domain.
env["-primary-hostname-certificate-is-valid"] = is_domain_cert_signed_and_valid(env["PRIMARY_HOSTNAME"], env)
# Build DNS records for each zone.
for domain, zonefile in zonefiles:
# Build the records to put in the zone.
@ -322,24 +326,11 @@ def build_zone(domain, all_domains, additional_records, www_redirect_domains, en
# certificate in use is not valid (e.g. because it is self-signed and a valid certificate has not
# yet been provisioned). Since we cannot provision a certificate without A/AAAA records, we
# always set them --- only the TXT records depend on there being valid certificates.
mta_sts_enabled = False
mta_sts_records = [
("mta-sts", "A", env["PUBLIC_IP"], "Optional. MTA-STS Policy Host serving /.well-known/mta-sts.txt."),
("mta-sts", "AAAA", env.get('PUBLIC_IPV6'), "Optional. MTA-STS Policy Host serving /.well-known/mta-sts.txt."),
]
if domain in get_mail_domains(env):
# Check that PRIMARY_HOSTNAME and the mta_sts domain both have valid certificates.
for d in (env['PRIMARY_HOSTNAME'], "mta-sts." + domain):
cert = get_ssl_certificates(env).get(d)
if not cert:
break # no certificate provisioned for this domain
cert_status = check_certificate(d, cert['certificate'], cert['private-key'])
if cert_status[0] != 'OK':
break # certificate is not valid
else:
# 'break' was not encountered above, so both domains are good
mta_sts_enabled = True
if mta_sts_enabled:
if domain in get_mail_domains(env) and env["-primary-hostname-certificate-is-valid"] and is_domain_cert_signed_and_valid("mta-sts." + domain, env):
# Compute an up-to-32-character hash of the policy file. We'll take a SHA-1 hash of the policy
# file (20 bytes) and encode it as base-64 (28 bytes, using alphanumeric alternate characters
# instead of '+' and '/' which are not allowed in an MTA-STS policy id) but then just take its
@ -365,6 +356,13 @@ def build_zone(domain, all_domains, additional_records, www_redirect_domains, en
return records
def is_domain_cert_signed_and_valid(domain, env):
cert = get_ssl_certificates(env).get(domain)
if not cert: return False # no certificate provisioned
cert_status = check_certificate(domain, cert['certificate'], cert['private-key'])
print(domain, cert_status)
return cert_status[0] == 'OK'
########################################################################
def build_tlsa_record(env):
@ -429,6 +427,7 @@ def build_sshfp_records():
# to the zone file (that trigger bumping the serial number). However,
# if SSH has been configured to listen on a nonstandard port, we must
# specify that port to sshkeyscan.
port = 22
with open('/etc/ssh/sshd_config', 'r') as f:
for line in f:
@ -439,8 +438,11 @@ def build_sshfp_records():
except ValueError:
pass
break
keys = shell("check_output", ["ssh-keyscan", "-t", "rsa,dsa,ecdsa,ed25519", "-p", str(port), "localhost"])
for key in sorted(keys.split("\n")):
keys = sorted(keys.split("\n"))
for key in keys:
if key.strip() == "" or key[0] == "#": continue
try:
host, keytype, pubkey = key.split(" ")
@ -460,13 +462,16 @@ def write_nsd_zone(domain, zonefile, records, env, force):
# On the $ORIGIN line, there's typically a ';' comment at the end explaining
# what the $ORIGIN line does. Any further data after the domain confuses
# ldns-signzone, however. It used to say '; default zone domain'.
#
# The SOA contact address for all of the domains on this system is hostmaster
# @ the PRIMARY_HOSTNAME. Hopefully that's legit.
#
# For the refresh through TTL fields, a good reference is:
# http://www.peerwisdom.org/2013/05/15/dns-understanding-the-soa-record/
#
# A hash of the available DNSSEC keys are added in a comment so that when
# the keys change we force a re-generation of the zone which triggers
# re-signing it.
zone = """
$ORIGIN {domain}.
@ -502,6 +507,9 @@ $TTL 86400 ; default time to live
value = v2
zone += value + "\n"
# Append a stable hash of DNSSEC signing keys in a comment.
zone += "\n; DNSSEC signing keys hash: {}\n".format(hash_dnssec_keys(domain, env))
# DNSSEC requires re-signing a zone periodically. That requires
# bumping the serial number even if no other records have changed.
# We don't see the DNSSEC records yet, so we have to figure out
@ -612,53 +620,77 @@ zone:
########################################################################
def dnssec_choose_algo(domain, env):
if '.' in domain and domain.rsplit('.')[-1] in \
("email", "guide", "fund", "be", "lv"):
# At GoDaddy, RSASHA256 is the only algorithm supported
# for .email and .guide.
# A variety of algorithms are supported for .fund. This
# is preferred.
# Gandi tells me that .be does not support RSASHA1-NSEC3-SHA1
# Nic.lv does not support RSASHA1-NSEC3-SHA1 for .lv tld's
return "RSASHA256"
def find_dnssec_signing_keys(domain, env):
# For key that we generated (one per algorithm)...
d = os.path.join(env['STORAGE_ROOT'], 'dns/dnssec')
keyconfs = [f for f in os.listdir(d) if f.endswith(".conf")]
for keyconf in keyconfs:
# Load the file holding the KSK and ZSK key filenames.
keyconf_fn = os.path.join(d, keyconf)
keyinfo = load_env_vars_from_file(keyconf_fn)
# For any domain we were able to sign before, don't change the algorithm
# on existing users. We'll probably want to migrate to SHA256 later.
return "RSASHA1-NSEC3-SHA1"
# Skip this key if the conf file has a setting named DOMAINS,
# holding a comma-separated list of domain names, and if this
# domain is not in the list. This allows easily disabling a
# key by setting "DOMAINS=" or "DOMAINS=none", other than
# deleting the key's .conf file, which might result in the key
# being regenerated next upgrade. Keys should be disabled if
# they are not needed to reduce the DNSSEC query response size.
if "DOMAINS" in keyinfo and domain not in [dd.strip() for dd in keyinfo["DOMAINS"].split(",")]:
continue
for keytype in ("KSK", "ZSK"):
yield keytype, keyinfo[keytype]
def hash_dnssec_keys(domain, env):
# Create a stable (by sorting the items) hash of all of the private keys
# that will be used to sign this domain.
keydata = []
for keytype, keyfn in sorted(find_dnssec_signing_keys(domain, env)):
oldkeyfn = os.path.join(env['STORAGE_ROOT'], 'dns/dnssec', keyfn + ".private")
keydata.append(keytype)
keydata.append(keyfn)
with open(oldkeyfn, "r") as fr:
keydata.append( fr.read() )
keydata = "".join(keydata).encode("utf8")
return hashlib.sha1(keydata).hexdigest()
def sign_zone(domain, zonefile, env):
algo = dnssec_choose_algo(domain, env)
dnssec_keys = load_env_vars_from_file(os.path.join(env['STORAGE_ROOT'], 'dns/dnssec/%s.conf' % algo))
# Sign the zone with all of the keys that were generated during
# setup so that the user can choose which to use in their DS record at
# their registrar, and also to support migration to newer algorithms.
# In order to use the same keys for all domains, we have to generate
# a new .key file with a DNSSEC record for the specific domain. We
# can reuse the same key, but it won't validate without a DNSSEC
# record specifically for the domain.
# In order to use the key files generated at setup which are for
# the domain _domain_, we have to re-write the files and place
# the actual domain name in it, so that ldns-signzone works.
#
# Copy the .key and .private files to /tmp to patch them up.
#
# Use os.umask and open().write() to securely create a copy that only
# we (root) can read.
files_to_kill = []
for key in ("KSK", "ZSK"):
if dnssec_keys.get(key, "").strip() == "": raise Exception("DNSSEC is not properly set up.")
oldkeyfn = os.path.join(env['STORAGE_ROOT'], 'dns/dnssec/' + dnssec_keys[key])
newkeyfn = '/tmp/' + dnssec_keys[key].replace("_domain_", domain)
dnssec_keys[key] = newkeyfn
# Patch each key, storing the patched version in /tmp for now.
# Each key has a .key and .private file. Collect a list of filenames
# for all of the keys (and separately just the key-signing keys).
all_keys = []
ksk_keys = []
for keytype, keyfn in find_dnssec_signing_keys(domain, env):
newkeyfn = '/tmp/' + keyfn.replace("_domain_", domain)
for ext in (".private", ".key"):
if not os.path.exists(oldkeyfn + ext): raise Exception("DNSSEC is not properly set up.")
with open(oldkeyfn + ext, "r") as fr:
# Copy the .key and .private files to /tmp to patch them up.
#
# Use os.umask and open().write() to securely create a copy that only
# we (root) can read.
oldkeyfn = os.path.join(env['STORAGE_ROOT'], 'dns/dnssec', keyfn + ext)
with open(oldkeyfn, "r") as fr:
keydata = fr.read()
keydata = keydata.replace("_domain_", domain) # trick ldns-signkey into letting our generic key be used by this zone
fn = newkeyfn + ext
keydata = keydata.replace("_domain_", domain)
prev_umask = os.umask(0o77) # ensure written file is not world-readable
try:
with open(fn, "w") as fw:
with open(newkeyfn + ext, "w") as fw:
fw.write(keydata)
finally:
os.umask(prev_umask) # other files we write should be world-readable
files_to_kill.append(fn)
# Put the patched key filename base (without extension) into the list of keys we'll sign with.
all_keys.append(newkeyfn)
if keytype == "KSK": ksk_keys.append(newkeyfn)
# Do the signing.
expiry_date = (datetime.datetime.now() + datetime.timedelta(days=30)).strftime("%Y%m%d")
@ -671,32 +703,34 @@ def sign_zone(domain, zonefile, env):
# zonefile to sign
"/etc/nsd/zones/" + zonefile,
]
# keys to sign with (order doesn't matter -- it'll figure it out)
dnssec_keys["KSK"],
dnssec_keys["ZSK"],
])
+ all_keys
)
# Create a DS record based on the patched-up key files. The DS record is specific to the
# zone being signed, so we can't use the .ds files generated when we created the keys.
# The DS record points to the KSK only. Write this next to the zone file so we can
# get it later to give to the user with instructions on what to do with it.
#
# We want to be able to validate DS records too, but multiple forms may be valid depending
# on the digest type. So we'll write all (both) valid records. Only one DS record should
# actually be deployed. Preferebly the first.
# Generate a DS record for each key. There are also several possible hash algorithms that may
# be used, so we'll pre-generate all for each key. One DS record per line. Only one
# needs to actually be deployed at the registrar. We'll select the preferred one
# in the status checks.
with open("/etc/nsd/zones/" + zonefile + ".ds", "w") as f:
for digest_type in ('2', '1'):
rr_ds = shell('check_output', ["/usr/bin/ldns-key2ds",
"-n", # output to stdout
"-" + digest_type, # 1=SHA1, 2=SHA256
dnssec_keys["KSK"] + ".key"
])
f.write(rr_ds)
for key in ksk_keys:
for digest_type in ('1', '2', '4'):
rr_ds = shell('check_output', ["/usr/bin/ldns-key2ds",
"-n", # output to stdout
"-" + digest_type, # 1=SHA1, 2=SHA256, 4=SHA384
key + ".key"
])
f.write(rr_ds)
# Remove our temporary file.
for fn in files_to_kill:
os.unlink(fn)
# Remove the temporary patched key files.
for fn in all_keys:
os.unlink(fn + ".private")
os.unlink(fn + ".key")
########################################################################
@ -753,7 +787,7 @@ def write_opendkim_tables(domains, env):
########################################################################
def get_custom_dns_config(env):
def get_custom_dns_config(env, only_real_records=False):
try:
custom_dns = rtyaml.load(open(os.path.join(env['STORAGE_ROOT'], 'dns/custom.yaml')))
if not isinstance(custom_dns, dict): raise ValueError() # caught below
@ -761,6 +795,8 @@ def get_custom_dns_config(env):
return [ ]
for qname, value in custom_dns.items():
if qname == "_secondary_nameserver" and only_real_records: continue # skip fake record
# Short form. Mapping a domain name to a string is short-hand
# for creating A records.
if isinstance(value, str):

View File

@ -44,9 +44,8 @@ TIME_DELTAS = OrderedDict([
('today', datetime.datetime.now() - datetime.datetime.now().replace(hour=0, minute=0, second=0))
])
# Start date > end date!
START_DATE = datetime.datetime.now()
END_DATE = None
END_DATE = NOW = datetime.datetime.now()
START_DATE = None
VERBOSE = False
@ -121,7 +120,7 @@ def scan_mail_log(env):
pass
print("Scanning logs from {:%Y-%m-%d %H:%M:%S} to {:%Y-%m-%d %H:%M:%S}".format(
END_DATE, START_DATE)
START_DATE, END_DATE)
)
# Scan the lines in the log files until the date goes out of range
@ -253,7 +252,7 @@ def scan_mail_log(env):
if collector["postgrey"]:
msg = "Greylisted Email {:%Y-%m-%d %H:%M:%S} and {:%Y-%m-%d %H:%M:%S}"
print_header(msg.format(END_DATE, START_DATE))
print_header(msg.format(START_DATE, END_DATE))
print(textwrap.fill(
"The following mail was greylisted, meaning the emails were temporarily rejected. "
@ -291,7 +290,7 @@ def scan_mail_log(env):
if collector["rejected"]:
msg = "Blocked Email {:%Y-%m-%d %H:%M:%S} and {:%Y-%m-%d %H:%M:%S}"
print_header(msg.format(END_DATE, START_DATE))
print_header(msg.format(START_DATE, END_DATE))
data = OrderedDict(sorted(collector["rejected"].items(), key=email_sort))
@ -345,19 +344,19 @@ def scan_mail_log_line(line, collector):
# Replaced the dateutil parser for a less clever way of parser that is roughly 4 times faster.
# date = dateutil.parser.parse(date)
# date = datetime.datetime.strptime(date, '%b %d %H:%M:%S')
# date = date.replace(START_DATE.year)
# strptime fails on Feb 29 if correct year is not provided. See https://bugs.python.org/issue26460
date = datetime.datetime.strptime(str(START_DATE.year) + ' ' + date, '%Y %b %d %H:%M:%S')
# print("date:", date)
# strptime fails on Feb 29 with ValueError: day is out of range for month if correct year is not provided.
# See https://bugs.python.org/issue26460
date = datetime.datetime.strptime(str(NOW.year) + ' ' + date, '%Y %b %d %H:%M:%S')
# if log date in future, step back a year
if date > NOW:
date = date.replace(year = NOW.year - 1)
#print("date:", date)
# Check if the found date is within the time span we are scanning
# END_DATE < START_DATE
if date > START_DATE:
if date > END_DATE:
# Don't process, and halt
return False
elif date < END_DATE:
elif date < START_DATE:
# Don't process, but continue
return True
@ -606,7 +605,7 @@ def email_sort(email):
def valid_date(string):
""" Validate the given date string fetched from the --startdate argument """
""" Validate the given date string fetched from the --enddate argument """
try:
date = dateutil.parser.parse(string)
except ValueError:
@ -820,12 +819,14 @@ if __name__ == "__main__":
parser.add_argument("-t", "--timespan", choices=TIME_DELTAS.keys(), default='today',
metavar='<time span>',
help="Time span to scan, going back from the start date. Possible values: "
help="Time span to scan, going back from the end date. Possible values: "
"{}. Defaults to 'today'.".format(", ".join(list(TIME_DELTAS.keys()))))
parser.add_argument("-d", "--startdate", action="store", dest="startdate",
type=valid_date, metavar='<start date>',
help="Date and time to start scanning the log file from. If no date is "
"provided, scanning will start from the current date and time.")
# keep the --startdate arg for backward compatibility
parser.add_argument("-d", "--enddate", "--startdate", action="store", dest="enddate",
type=valid_date, metavar='<end date>',
help="Date and time to end scanning the log file. If no date is "
"provided, scanning will end at the current date and time. "
"Alias --startdate is for compatibility.")
parser.add_argument("-u", "--users", action="store", dest="users",
metavar='<email1,email2,email...>',
help="Comma separated list of (partial) email addresses to filter the "
@ -837,13 +838,13 @@ if __name__ == "__main__":
args = parser.parse_args()
if args.startdate is not None:
START_DATE = args.startdate
if args.enddate is not None:
END_DATE = args.enddate
if args.timespan == 'today':
args.timespan = 'day'
print("Setting start date to {}".format(START_DATE))
print("Setting end date to {}".format(END_DATE))
END_DATE = START_DATE - TIME_DELTAS[args.timespan]
START_DATE = END_DATE - TIME_DELTAS[args.timespan]
VERBOSE = args.verbose

View File

@ -42,7 +42,7 @@ def get_services():
{ "name": "HTTPS Web (nginx)", "port": 443, "public": True, },
]
def run_checks(rounded_values, env, output, pool):
def run_checks(rounded_values, env, output, pool, domains_to_check=None):
# run systems checks
output.add_heading("System")
@ -63,7 +63,7 @@ def run_checks(rounded_values, env, output, pool):
# perform other checks asynchronously
run_network_checks(env, output)
run_domain_checks(rounded_values, env, output, pool)
run_domain_checks(rounded_values, env, output, pool, domains_to_check=domains_to_check)
def get_ssh_port():
# Returns ssh port
@ -300,7 +300,7 @@ def run_network_checks(env, output):
which may prevent recipients from receiving your email. See http://www.spamhaus.org/query/ip/%s."""
% (env['PUBLIC_IP'], zen, env['PUBLIC_IP']))
def run_domain_checks(rounded_time, env, output, pool):
def run_domain_checks(rounded_time, env, output, pool, domains_to_check=None):
# Get the list of domains we handle mail for.
mail_domains = get_mail_domains(env)
@ -311,7 +311,8 @@ def run_domain_checks(rounded_time, env, output, pool):
# Get the list of domains we serve HTTPS for.
web_domains = set(get_web_domains(env))
domains_to_check = mail_domains | dns_domains | web_domains
if domains_to_check is None:
domains_to_check = mail_domains | dns_domains | web_domains
# Remove "www", "autoconfig", "autodiscover", and "mta-sts" subdomains, which we group with their parent,
# if their parent is in the domains to check list.
@ -557,62 +558,104 @@ def check_dns_zone_suggestions(domain, env, output, dns_zonefiles, domains_with_
def check_dnssec(domain, env, output, dns_zonefiles, is_checking_primary=False):
# See if the domain has a DS record set at the registrar. The DS record may have
# several forms. We have to be prepared to check for any valid record. We've
# pre-generated all of the valid digests --- read them in.
# See if the domain has a DS record set at the registrar. The DS record must
# match one of the keys that we've used to sign the zone. It may use one of
# several hashing algorithms. We've pre-generated all possible valid DS
# records, although some will be preferred.
alg_name_map = { '7': 'RSASHA1-NSEC3-SHA1', '8': 'RSASHA256', '13': 'ECDSAP256SHA256' }
digalg_name_map = { '1': 'SHA-1', '2': 'SHA-256', '4': 'SHA-384' }
# Read in the pre-generated DS records
expected_ds_records = { }
ds_file = '/etc/nsd/zones/' + dns_zonefiles[domain] + '.ds'
if not os.path.exists(ds_file): return # Domain is in our database but DNS has not yet been updated.
ds_correct = open(ds_file).read().strip().split("\n")
digests = { }
for rr_ds in ds_correct:
ds_keytag, ds_alg, ds_digalg, ds_digest = rr_ds.split("\t")[4].split(" ")
digests[ds_digalg] = ds_digest
with open(ds_file) as f:
for rr_ds in f:
rr_ds = rr_ds.rstrip()
ds_keytag, ds_alg, ds_digalg, ds_digest = rr_ds.split("\t")[4].split(" ")
# Some registrars may want the public key so they can compute the digest. The DS
# record that we suggest using is for the KSK (and that's how the DS records were generated).
alg_name_map = { '7': 'RSASHA1-NSEC3-SHA1', '8': 'RSASHA256' }
dnssec_keys = load_env_vars_from_file(os.path.join(env['STORAGE_ROOT'], 'dns/dnssec/%s.conf' % alg_name_map[ds_alg]))
dnsssec_pubkey = open(os.path.join(env['STORAGE_ROOT'], 'dns/dnssec/' + dnssec_keys['KSK'] + '.key')).read().split("\t")[3].split(" ")[3]
# Some registrars may want the public key so they can compute the digest. The DS
# record that we suggest using is for the KSK (and that's how the DS records were generated).
# We'll also give the nice name for the key algorithm.
dnssec_keys = load_env_vars_from_file(os.path.join(env['STORAGE_ROOT'], 'dns/dnssec/%s.conf' % alg_name_map[ds_alg]))
dnsssec_pubkey = open(os.path.join(env['STORAGE_ROOT'], 'dns/dnssec/' + dnssec_keys['KSK'] + '.key')).read().split("\t")[3].split(" ")[3]
expected_ds_records[ (ds_keytag, ds_alg, ds_digalg, ds_digest) ] = {
"record": rr_ds,
"keytag": ds_keytag,
"alg": ds_alg,
"alg_name": alg_name_map[ds_alg],
"digalg": ds_digalg,
"digalg_name": digalg_name_map[ds_digalg],
"digest": ds_digest,
"pubkey": dnsssec_pubkey,
}
# Query public DNS for the DS record at the registrar.
ds = query_dns(domain, "DS", nxdomain=None)
ds_looks_valid = ds and len(ds.split(" ")) == 4
if ds_looks_valid: ds = ds.split(" ")
if ds_looks_valid and ds[0] == ds_keytag and ds[1] == ds_alg and ds[3] == digests.get(ds[2]):
if is_checking_primary: return
output.print_ok("DNSSEC 'DS' record is set correctly at registrar.")
ds = query_dns(domain, "DS", nxdomain=None, as_list=True)
if ds is None or isinstance(ds, str): ds = []
# There may be more that one record, so we get the result as a list.
# Filter out records that don't look valid, just in case, and split
# each record on spaces.
ds = [tuple(str(rr).split(" ")) for rr in ds if len(str(rr).split(" ")) == 4]
if len(ds) == 0:
output.print_warning("""This domain's DNSSEC DS record is not set. The DS record is optional. The DS record activates DNSSEC. See below for instructions.""")
else:
if ds == None:
if is_checking_primary: return
output.print_warning("""This domain's DNSSEC DS record is not set. The DS record is optional. The DS record activates DNSSEC.
To set a DS record, you must follow the instructions provided by your domain name registrar and provide to them this information:""")
matched_ds = set(ds) & set(expected_ds_records)
if matched_ds:
# At least one DS record matches one that corresponds with one of the ways we signed
# the zone, so it is valid.
#
# But it may not be preferred. Only algorithm 13 is preferred. Warn if any of the
# matched zones uses a different algorithm.
if set(r[1] for r in matched_ds) == { '13' }: # all are alg 13
output.print_ok("DNSSEC 'DS' record is set correctly at registrar.")
return
elif '13' in set(r[1] for r in matched_ds): # some but not all are alg 13
output.print_ok("DNSSEC 'DS' record is set correctly at registrar. (Records using algorithm other than ECDSAP256SHA256 should be removed.)")
return
else: # no record uses alg 13
output.print_warning("DNSSEC 'DS' record set at registrar is valid but should be updated to ECDSAP256SHA256 (see below).")
else:
if is_checking_primary:
output.print_error("""The DNSSEC 'DS' record for %s is incorrect. See further details below.""" % domain)
return
output.print_error("""This domain's DNSSEC DS record is incorrect. The chain of trust is broken between the public DNS system
and this machine's DNS server. It may take several hours for public DNS to update after a change. If you did not recently
make a change (and are not using this box for DNS aka external DNS), you must resolve this immediately by following the
instructions provided by your domain name registrar and provide to them this information:""")
make a change (and are not using this box for DNS aka external DNS), you must resolve this immediately (see below).""")
output.print_line("""Follow the instructions provided by your domain name registrar to set a DS record.
Registrars support different sorts of DS records. Use the first option that works:""")
preferred_ds_order = [(7, 1), (7, 2), (8, 4), (13, 4), (8, 1), (8, 2), (13, 1), (13, 2)] # low to high
def preferred_ds_order_func(ds_suggestion):
k = (int(ds_suggestion['alg']), int(ds_suggestion['digalg']))
if k in preferred_ds_order:
return preferred_ds_order.index(k)
return -1 # index before first item
output.print_line("")
for i, ds_suggestion in enumerate(sorted(expected_ds_records.values(), key=preferred_ds_order_func, reverse=True)):
output.print_line("")
output.print_line("Key Tag: " + ds_keytag + ("" if not ds_looks_valid or ds[0] == ds_keytag else " (Got '%s')" % ds[0]))
output.print_line("Option " + str(i+1) + ":")
output.print_line("----------")
output.print_line("Key Tag: " + ds_suggestion['keytag'])
output.print_line("Key Flags: KSK")
output.print_line(
("Algorithm: %s / %s" % (ds_alg, alg_name_map[ds_alg]))
+ ("" if not ds_looks_valid or ds[1] == ds_alg else " (Got '%s')" % ds[1]))
# see http://www.iana.org/assignments/dns-sec-alg-numbers/dns-sec-alg-numbers.xhtml
output.print_line("Digest Type: 2 / SHA-256")
# http://www.ietf.org/assignments/ds-rr-types/ds-rr-types.xml
output.print_line("Digest: " + digests['2'])
if ds_looks_valid and ds[3] != digests.get(ds[2]):
output.print_line("(Got digest type %s and digest %s which do not match.)" % (ds[2], ds[3]))
output.print_line("Algorithm: %s / %s" % (ds_suggestion['alg'], ds_suggestion['alg_name']))
output.print_line("Digest Type: %s / %s" % (ds_suggestion['digalg'], ds_suggestion['digalg_name']))
output.print_line("Digest: " + ds_suggestion['digest'])
output.print_line("Public Key: ")
output.print_line(dnsssec_pubkey, monospace=True)
output.print_line(ds_suggestion['pubkey'], monospace=True)
output.print_line("")
output.print_line("Bulk/Record Format:")
output.print_line("" + ds_correct[0])
output.print_line(ds_suggestion['record'], monospace=True)
if len(ds) > 0:
output.print_line("")
output.print_line("This DS record is not relevant when using external DNS.")
output.print_line("The above DS record is not relevant when using external DNS. The DS record is currently set to:")
output.print_line("")
for rr in ds:
output.print_line("Key Tag: {0}, Algorithm: {1}, Digest Type: {2}, Digest: {3}".format(*rr))
def check_mail_domain(domain, env, output):
# Check the MX record.
@ -622,6 +665,8 @@ def check_mail_domain(domain, env, output):
if mx is None:
mxhost = None
elif mx == "[timeout]":
mxhost = None
else:
# query_dns returns a semicolon-delimited list
# of priority-host pairs.
@ -714,7 +759,7 @@ def check_web_domain(domain, rounded_time, ssl_certificates, env, output):
# website for also needs a signed certificate.
check_ssl_cert(domain, rounded_time, ssl_certificates, env, output)
def query_dns(qname, rtype, nxdomain='[Not Set]', at=None):
def query_dns(qname, rtype, nxdomain='[Not Set]', at=None, as_list=False):
# Make the qname absolute by appending a period. Without this, dns.resolver.query
# will fall back a failed lookup to a second query with this machine's hostname
# appended. This has been causing some false-positive Spamhaus reports. The
@ -751,6 +796,9 @@ def query_dns(qname, rtype, nxdomain='[Not Set]', at=None):
if rtype in ("A", "AAAA"):
response = [normalize_ip(str(r)) for r in response]
if as_list:
return response
# There may be multiple answers; concatenate the response. Remove trailing
# periods from responses since that's how qnames are encoded in DNS but is
# confusing for us. The order of the answers doesn't matter, so sort so we
@ -1051,3 +1099,7 @@ if __name__ == "__main__":
elif sys.argv[1] == "--version":
print(what_version_is_this(env))
elif sys.argv[1] == "--only":
with multiprocessing.pool.Pool(processes=10) as pool:
run_checks(False, env, ConsoleOutput(), pool, domains_to_check=sys.argv[2:])

View File

@ -153,8 +153,8 @@ function show_aliases() {
function(r) {
$('#alias_table tbody').html("");
for (var i = 0; i < r.length; i++) {
var hdr = $("<tr><td colspan='3'><h4/></td></tr>");
hdr.find('h4').text(r[i].domain);
var hdr = $("<tr><th colspan='4' style='background-color: #EEE'></th></tr>");
hdr.find('th').text(r[i].domain);
$('#alias_table tbody').append(hdr);
for (var k = 0; k < r[i].aliases.length; k++) {

View File

@ -57,7 +57,13 @@
</div>
</form>
<table id="custom-dns-current" class="table" style="width: auto; display: none">
<div style="text-align: right; font-size; 90%; margin-top: 1em;">
sort by:
<a href="#" onclick="window.miab_custom_dns_data_sort_order='qname'; show_current_custom_dns_update_after_sort(); return false;">domain name</a>
|
<a href="#" onclick="window.miab_custom_dns_data_sort_order='created'; show_current_custom_dns_update_after_sort(); return false;">created</a>
</div>
<table id="custom-dns-current" class="table" style="width: auto; display: none; margin-top: 0;">
<thead>
<th>Domain Name</th>
<th>Record Type</th>
@ -192,36 +198,38 @@ function show_current_custom_dns() {
$('#custom-dns-current').fadeIn();
else
$('#custom-dns-current').fadeOut();
window.miab_custom_dns_data = data;
show_current_custom_dns_update_after_sort();
});
}
var reverse_fqdn = function(el) {
el.qname = el.qname.split('.').reverse().join('.');
return el;
}
var sort = function(a, b) {
if(a.qname === b.qname) {
if(a.rtype === b.rtype) {
return a.value > b.value ? 1 : -1;
}
return a.rtype > b.rtype ? 1 : -1;
}
return a.qname > b.qname ? 1 : -1;
}
function show_current_custom_dns_update_after_sort() {
var data = window.miab_custom_dns_data;
var sort_key = window.miab_custom_dns_data_sort_order || "qname";
data = data.map(reverse_fqdn).sort(sort).map(reverse_fqdn);
data.sort(function(a, b) { return a["sort-order"][sort_key] - b["sort-order"][sort_key] });
$('#custom-dns-current').find("tbody").text('');
var tbody = $('#custom-dns-current').find("tbody");
tbody.text('');
var last_zone = null;
for (var i = 0; i < data.length; i++) {
if (sort_key == "qname" && data[i].zone != last_zone) {
var r = $("<tr><th colspan=4 style='background-color: #EEE'></th></tr>");
r.find("th").text(data[i].zone);
tbody.append(r);
last_zone = data[i].zone;
}
var tr = $("<tr/>");
$('#custom-dns-current').find("tbody").append(tr);
tbody.append(tr);
tr.attr('data-qname', data[i].qname);
tr.attr('data-rtype', data[i].rtype);
tr.attr('data-value', data[i].value);
tr.append($('<td class="long"/>').text(data[i].qname));
tr.append($('<td/>').text(data[i].rtype));
tr.append($('<td class="long"/>').text(data[i].value));
tr.append($('<td class="long" style="max-width: 40em"/>').text(data[i].value));
tr.append($('<td>[<a href="#" onclick="return delete_custom_dns_record(this)">delete</a>]</td>'));
}
});
}
function delete_custom_dns_record(elem) {

View File

@ -5,7 +5,7 @@
<h2>Backup Status</h2>
<p>The box makes an incremental backup each night. By default the backup is stored on the machine itself, but you can also have it stored on Amazon S3.</p>
<p>The box makes an incremental backup each night. By default the backup is stored on the machine itself, but you can also store in on S3-compatible services like Amazon Web Services (AWS).</p>
<h3>Configuration</h3>
@ -17,7 +17,8 @@
<option value="off">Nowhere (Disable Backups)</option>
<option value="local">{{hostname}}</option>
<option value="rsync">rsync</option>
<option value="s3">Amazon S3</option>
<option value="s3">S3 (Amazon or compatible) </option>
<option value="b2">Backblaze B2</option>
</select>
</div>
</div>
@ -72,8 +73,8 @@
<!-- S3 BACKUP -->
<div class="form-group backup-target-s3">
<div class="col-sm-10 col-sm-offset-2">
<p>Backups are stored in an Amazon Web Services S3 bucket. You must have an AWS account already.</p>
<p>You MUST manually copy the encryption password from <tt class="backup-encpassword-file"></tt> to a safe and secure location. You will need this file to decrypt backup files. It is NOT stored in your Amazon S3 bucket.</p>
<p>Backups are stored in an S3-compatible bucket. You must have an AWS or other S3 service account already.</p>
<p>You MUST manually copy the encryption password from <tt class="backup-encpassword-file"></tt> to a safe and secure location. You will need this file to decrypt backup files. It is <b>NOT</b> stored in your S3 bucket.</p>
</div>
</div>
<div class="form-group backup-target-s3">
@ -83,7 +84,7 @@
{% for name, host in backup_s3_hosts %}
<option value="{{host}}">{{name}}</option>
{% endfor %}
<option value="other">Other</option>
<option value="other">Other (non AWS)</option>
</select>
</div>
</div>

View File

@ -1,7 +1,6 @@
<h2>Users</h2>
<style>
#user_table h4 { margin: 1em 0 0 0; }
#user_table tr.account_inactive td.address { color: #888; text-decoration: line-through; }
#user_table .actions { margin-top: .33em; font-size: 95%; }
#user_table .account_inactive .if_active { display: none; }
@ -134,8 +133,8 @@ function show_users() {
function(r) {
$('#user_table tbody').html("");
for (var i = 0; i < r.length; i++) {
var hdr = $("<tr><td colspan='3'><h4/></td></tr>");
hdr.find('h4').text(r[i].domain);
var hdr = $("<tr><th colspan='2' style='background-color: #EEE'></th></tr>");
hdr.find('th').text(r[i].domain);
$('#user_table tbody').append(hdr);
for (var k = 0; k < r[i].users.length; k++) {

View File

@ -160,17 +160,27 @@ def make_domain_config(domain, templates, ssl_certificates, env):
for path, url in yaml.get("proxies", {}).items():
# Parse some flags in the fragment of the URL.
pass_http_host_header = False
proxy_redirect_off = False
frame_options_header_sameorigin = False
m = re.search("#(.*)$", url)
if m:
for flag in m.group(1).split(","):
if flag == "pass-http-host":
pass_http_host_header = True
elif flag == "no-proxy-redirect":
proxy_redirect_off = True
elif flag == "frame-options-sameorigin":
frame_options_header_sameorigin = True
url = re.sub("#(.*)$", "", url)
nginx_conf_extra += "\tlocation %s {" % path
nginx_conf_extra += "\n\t\tproxy_pass %s;" % url
if proxy_redirect_off:
nginx_conf_extra += "\n\t\tproxy_redirect off;"
if pass_http_host_header:
nginx_conf_extra += "\n\t\tproxy_set_header Host $http_host;"
if frame_options_header_sameorigin:
nginx_conf_extra += "\n\t\tproxy_set_header X-Frame-Options SAMEORIGIN;"
nginx_conf_extra += "\n\t\tproxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;"
nginx_conf_extra += "\n\t\tproxy_set_header X-Forwarded-Host $http_host;"
nginx_conf_extra += "\n\t\tproxy_set_header X-Forwarded-Proto $scheme;"
@ -251,3 +261,4 @@ def get_web_domains_info(env):
}
for domain in get_web_domains(env)
]

View File

@ -18,11 +18,11 @@ if [ -z "$TAG" ]; then
# space, but if we put it in a comment it would confuse the status checks!)
# to get the latest version, so the first such line must be the one that we
# want to display in status checks.
if [ "`lsb_release -d | sed 's/.*:\s*//' | sed 's/18\.04\.[0-9]/18.04/' `" == "Ubuntu 18.04 LTS" ]; then
if [ "$(lsb_release -d | sed 's/.*:\s*//' | sed 's/18\.04\.[0-9]/18.04/' )" == "Ubuntu 18.04 LTS" ]; then
# This machine is running Ubuntu 18.04.
TAG=v0.52
TAG=v0.53a
elif [ "`lsb_release -d | sed 's/.*:\s*//' | sed 's/14\.04\.[0-9]/14.04/' `" == "Ubuntu 14.04 LTS" ]; then
elif [ "$(lsb_release -d | sed 's/.*:\s*//' | sed 's/14\.04\.[0-9]/14.04/' )" == "Ubuntu 14.04 LTS" ]; then
# This machine is running Ubuntu 14.04.
echo "You are installing the last version of Mail-in-a-Box that will"
echo "support Ubuntu 14.04. If this is a new installation of Mail-in-a-Box,"
@ -68,11 +68,11 @@ fi
cd $HOME/mailinabox
# Update it.
if [ "$TAG" != `git describe` ]; then
if [ "$TAG" != $(git describe) ]; then
echo Updating Mail-in-a-Box to $TAG . . .
git fetch --depth 1 --force --prune origin tag $TAG
if ! git checkout -q $TAG; then
echo "Update failed. Did you modify something in `pwd`?"
echo "Update failed. Did you modify something in $(pwd)?"
exit 1
fi
echo

View File

@ -62,7 +62,8 @@ chmod go-rwx $STORAGE_ROOT/mail/dkim
tools/editconf.py /etc/opendmarc.conf -s \
"Syslog=true" \
"Socket=inet:8893@[127.0.0.1]"
"Socket=inet:8893@[127.0.0.1]" \
"FailureReports=true"
# SPFIgnoreResults causes the filter to ignore any SPF results in the header
# of the message. This is useful if you want the filter to perfrom SPF checks
@ -81,6 +82,12 @@ tools/editconf.py /etc/opendmarc.conf -s \
tools/editconf.py /etc/opendmarc.conf -s \
"SPFSelfValidate=true"
# Enables generation of failure reports for sending domains that publish a
# "none" policy.
tools/editconf.py /etc/opendmarc.conf -s \
"FailureReportsOnNone=true"
# AlwaysAddARHeader Adds an "Authentication-Results:" header field even to
# unsigned messages from domains with no "signs all" policy. The reported DKIM
# result will be "none" in such cases. Normally unsigned mail from non-strict

View File

@ -68,27 +68,15 @@ echo "include: /etc/nsd/zones.conf" >> /etc/nsd/nsd.conf;
mkdir -p "$STORAGE_ROOT/dns/dnssec";
# TLDs don't all support the same algorithms, so we'll generate keys using a few
# different algorithms. RSASHA1-NSEC3-SHA1 was possibly the first widely used
# algorithm that supported NSEC3, which is a security best practice. However TLDs
# will probably be moving away from it to a a SHA256-based algorithm.
#
# Supports `RSASHA1-NSEC3-SHA1` (didn't test with `RSASHA256`):
#
# * .info
# * .me
#
# Requires `RSASHA256`
#
# * .email
# * .guide
#
# Supports `RSASHA256` (and defaulting to this)
#
# * .fund
# TLDs, registrars, and validating nameservers don't all support the same algorithms,
# so we'll generate keys using a few different algorithms so that dns_update.py can
# choose which algorithm to use when generating the zonefiles. See #1953 for recent
# discussion. File for previously used algorithms (i.e. RSASHA1-NSEC3-SHA1) may still
# be in the output directory, and we'll continue to support signing zones with them
# so that trust isn't broken with deployed DS records, but we won't generate those
# keys on new systems.
FIRST=1 #NODOC
for algo in RSASHA1-NSEC3-SHA1 RSASHA256; do
for algo in RSASHA256 ECDSAP256SHA256; do
if [ ! -f "$STORAGE_ROOT/dns/dnssec/$algo.conf" ]; then
if [ $FIRST == 1 ]; then
echo "Generating DNSSEC signing keys..."
@ -97,7 +85,7 @@ if [ ! -f "$STORAGE_ROOT/dns/dnssec/$algo.conf" ]; then
# Create the Key-Signing Key (KSK) (with `-k`) which is the so-called
# Secure Entry Point. The domain name we provide ("_domain_") doesn't
# matter -- we'll use the same keys for all our domains.
# matter -- we'll use the same keys for all our domains.
#
# `ldns-keygen` outputs the new key's filename to stdout, which
# we're capturing into the `KSK` variable.
@ -105,17 +93,22 @@ if [ ! -f "$STORAGE_ROOT/dns/dnssec/$algo.conf" ]; then
# ldns-keygen uses /dev/random for generating random numbers by default.
# This is slow and unecessary if we ensure /dev/urandom is seeded properly,
# so we use /dev/urandom. See system.sh for an explanation. See #596, #115.
KSK=$(umask 077; cd $STORAGE_ROOT/dns/dnssec; ldns-keygen -r /dev/urandom -a $algo -b 2048 -k _domain_);
# (This previously used -b 2048 but it's unclear if this setting makes sense
# for non-RSA keys, so it's removed. The RSA-based keys are not recommended
# anymore anyway.)
KSK=$(umask 077; cd $STORAGE_ROOT/dns/dnssec; ldns-keygen -r /dev/urandom -a $algo -k _domain_);
# Now create a Zone-Signing Key (ZSK) which is expected to be
# rotated more often than a KSK, although we have no plans to
# rotate it (and doing so would be difficult to do without
# disturbing DNS availability.) Omit `-k` and use a shorter key length.
ZSK=$(umask 077; cd $STORAGE_ROOT/dns/dnssec; ldns-keygen -r /dev/urandom -a $algo -b 1024 _domain_);
# disturbing DNS availability.) Omit `-k`.
# (This previously used -b 1024 but it's unclear if this setting makes sense
# for non-RSA keys, so it's removed.)
ZSK=$(umask 077; cd $STORAGE_ROOT/dns/dnssec; ldns-keygen -r /dev/urandom -a $algo _domain_);
# These generate two sets of files like:
#
# * `K_domain_.+007+08882.ds`: DS record normally provided to domain name registrar (but it's actually invalid with `_domain_`)
# * `K_domain_.+007+08882.ds`: DS record normally provided to domain name registrar (but it's actually invalid with `_domain_` so we don't use this file)
# * `K_domain_.+007+08882.key`: public key
# * `K_domain_.+007+08882.private`: private key (secret!)
@ -139,7 +132,7 @@ cat > /etc/cron.daily/mailinabox-dnssec << EOF;
#!/bin/bash
# Mail-in-a-Box
# Re-sign any DNS zones with DNSSEC because the signatures expire periodically.
`pwd`/tools/dns_update
$(pwd)/tools/dns_update
EOF
chmod +x /etc/cron.daily/mailinabox-dnssec

View File

@ -1,5 +1,5 @@
# If there aren't any mail users yet, create one.
if [ -z "`management/cli.py user`" ]; then
if [ -z "$(management/cli.py user)" ]; then
# The outut of "management/cli.py user" is a list of mail users. If there
# aren't any yet, it'll be empty.
@ -10,7 +10,7 @@ if [ -z "`management/cli.py user`" ]; then
input_box "Mail Account" \
"Let's create your first mail account.
\n\nWhat email address do you want?" \
me@`get_default_hostname` \
me@$(get_default_hostname) \
EMAIL_ADDR
if [ -z "$EMAIL_ADDR" ]; then

View File

@ -9,12 +9,12 @@ function hide_output {
# and returns a non-zero exit code.
# Get a temporary file.
OUTPUT=$(tempfile)
OUTPUT=$(mktemp)
# Execute command, redirecting stderr/stdout to the temporary file. Since we
# check the return code ourselves, disable 'set -e' temporarily.
set +e
$@ &> $OUTPUT
"$@" &> $OUTPUT
E=$?
set -e
@ -22,7 +22,7 @@ function hide_output {
if [ $E != 0 ]; then
# Something failed.
echo
echo FAILED: $@
echo FAILED: "$@"
echo -----------------------------------------
cat $OUTPUT
echo -----------------------------------------
@ -53,8 +53,7 @@ function apt_install {
# install' for all of the packages. Calling `dpkg` on each package is slow,
# and doesn't affect what we actually do, except in the messages, so let's
# not do that anymore.
PACKAGES=$@
apt_get_quiet install $PACKAGES
apt_get_quiet install "$@"
}
function get_default_hostname {

View File

@ -45,8 +45,8 @@ apt_install \
# - https://www.dovecot.org/list/dovecot/2012-August/137569.html
# - https://www.dovecot.org/list/dovecot/2011-December/132455.html
tools/editconf.py /etc/dovecot/conf.d/10-master.conf \
default_process_limit=$(echo "`nproc` * 250" | bc) \
default_vsz_limit=$(echo "`free -tm | tail -1 | awk '{print $2}'` / 3" | bc)M \
default_process_limit=$(echo "$(nproc) * 250" | bc) \
default_vsz_limit=$(echo "$(free -tm | tail -1 | awk '{print $2}') / 3" | bc)M \
log_path=/var/log/mail.log
# The inotify `max_user_instances` default is 128, which constrains
@ -183,6 +183,7 @@ plugin {
sieve_after = $STORAGE_ROOT/mail/sieve/global_after
sieve = $STORAGE_ROOT/mail/sieve/%d/%n.sieve
sieve_dir = $STORAGE_ROOT/mail/sieve/%d/%n
sieve_redirect_envelope_from = recipient
}
EOF

View File

@ -191,7 +191,7 @@ tools/editconf.py /etc/postfix/main.cf \
#
# In a basic setup we would pass mail directly to Dovecot by setting
# virtual_transport to `lmtp:unix:private/dovecot-lmtp`.
tools/editconf.py /etc/postfix/main.cf virtual_transport=lmtp:[127.0.0.1]:10025
tools/editconf.py /etc/postfix/main.cf "virtual_transport=lmtp:[127.0.0.1]:10025"
# Because of a spampd bug, limit the number of recipients in each connection.
# See https://github.com/mail-in-a-box/mailinabox/issues/1523.
tools/editconf.py /etc/postfix/main.cf lmtp_destination_recipient_limit=1

View File

@ -27,9 +27,10 @@ done
# provision free TLS certificates.
apt_install duplicity python-pip virtualenv certbot
# b2sdk is used for backblaze backups.
# boto is used for amazon aws backups.
# Both are installed outside the pipenv, so they can be used by duplicity
hide_output pip3 install --upgrade boto
hide_output pip3 install --upgrade b2sdk boto
# Create a virtualenv for the installation of Python 3 packages
# used by the management daemon.
@ -50,7 +51,7 @@ hide_output $venv/bin/pip install --upgrade \
rtyaml "email_validator>=1.0.0" "exclusiveprocess" \
flask dnspython python-dateutil \
qrcode[pil] pyotp \
"idna>=2.0.0" "cryptography==2.2.2" boto psutil postfix-mta-sts-resolver
"idna>=2.0.0" "cryptography==2.2.2" boto psutil postfix-mta-sts-resolver b2sdk
# CONFIGURATION
@ -96,7 +97,7 @@ export LANG=en_US.UTF-8
export LC_TYPE=en_US.UTF-8
source $venv/bin/activate
exec python `pwd`/management/daemon.py
exec python $(pwd)/management/daemon.py
EOF
chmod +x $inst_dir/start
cp --remove-destination conf/mailinabox.service /lib/systemd/system/mailinabox.service # target was previously a symlink so remove it first
@ -111,7 +112,7 @@ minute=$((RANDOM % 60)) # avoid overloading mailinabox.email
cat > /etc/cron.d/mailinabox-nightly << EOF;
# Mail-in-a-Box --- Do not edit / will be overwritten on update.
# Run nightly tasks: backup, status checks.
$minute 3 * * * root (cd `pwd` && management/daily_tasks.sh)
$minute 3 * * * root (cd $(pwd) && management/daily_tasks.sh)
EOF
# Start the management server.

View File

@ -97,12 +97,12 @@ InstallNextcloud() {
}
# Nextcloud Version to install. Checks are done down below to step through intermediate versions.
nextcloud_ver=20.0.1
nextcloud_hash=f2b3faa570c541df73f209e873a1c2852e79eab8
contacts_ver=3.4.1
contacts_hash=aee680a75e95f26d9285efd3c1e25cf7f3bfd27e
calendar_ver=2.1.2
calendar_hash=930c07863bb7a65652dec34793802c8d80502336
nextcloud_ver=20.0.8
nextcloud_hash=372b0b4bb07c7984c04917aff86b280e68fbe761
contacts_ver=3.5.1
contacts_hash=d2ffbccd3ed89fa41da20a1dff149504c3b33b93
calendar_ver=2.2.0
calendar_hash=673ad72ca28adb8d0f209015ff2dca52ffad99af
user_external_ver=1.0.0
user_external_hash=3bf2609061d7214e7f0f69dd8883e55c4ec8f50a
@ -128,7 +128,7 @@ if [ ! -d /usr/local/lib/owncloud/ ] || [[ ! ${CURRENT_NEXTCLOUD_VER} =~ ^$nextc
# Backup the existing ownCloud/Nextcloud.
# Create a backup directory to store the current installation and database to
BACKUP_DIRECTORY=$STORAGE_ROOT/owncloud-backup/`date +"%Y-%m-%d-%T"`
BACKUP_DIRECTORY=$STORAGE_ROOT/owncloud-backup/$(date +"%Y-%m-%d-%T")
mkdir -p "$BACKUP_DIRECTORY"
if [ -d /usr/local/lib/owncloud/ ]; then
echo "Upgrading Nextcloud --- backing up existing installation, configuration, and database to directory to $BACKUP_DIRECTORY..."
@ -312,7 +312,9 @@ sudo -u www-data php /usr/local/lib/owncloud/occ upgrade
if [ \( $? -ne 0 \) -a \( $? -ne 3 \) ]; then exit 1; fi
# Disable default apps that we don't support
sudo -u www-data php /usr/local/lib/owncloud/occ app:disable photos dashboard activity
sudo -u www-data \
php /usr/local/lib/owncloud/occ app:disable photos dashboard activity \
| (grep -v "No such app enabled" || /bin/true)
# Set PHP FPM values to support large file uploads
# (semicolon is the comment character in this file, hashes produce deprecation warnings)

View File

@ -8,7 +8,7 @@ if [[ $EUID -ne 0 ]]; then
fi
# Check that we are running on Ubuntu 18.04 LTS (or 18.04.xx).
if [ "`lsb_release -d | sed 's/.*:\s*//' | sed 's/18\.04\.[0-9]/18.04/' `" != "Ubuntu 18.04 LTS" ]; then
if [ "$(lsb_release -d | sed 's/.*:\s*//' | sed 's/18\.04\.[0-9]/18.04/' )" != "Ubuntu 18.04 LTS" ]; then
echo "Mail-in-a-Box only supports being installed on Ubuntu 18.04, sorry. You are running:"
echo
lsb_release -d | sed 's/.*:\s*//'

View File

@ -46,7 +46,7 @@ fi
# in the first dialog prompt, so we should do this before that starts.
cat > /usr/local/bin/mailinabox << EOF;
#!/bin/bash
cd `pwd`
cd $(pwd)
source setup/start.sh
EOF
chmod +x /usr/local/bin/mailinabox
@ -78,7 +78,7 @@ if [ ! -d $STORAGE_ROOT ]; then
mkdir -p $STORAGE_ROOT
fi
if [ ! -f $STORAGE_ROOT/mailinabox.version ]; then
echo $(setup/migrate.py --current) > $STORAGE_ROOT/mailinabox.version
setup/migrate.py --current > $STORAGE_ROOT/mailinabox.version
chown $STORAGE_USER.$STORAGE_USER $STORAGE_ROOT/mailinabox.version
fi
@ -94,7 +94,7 @@ PUBLIC_IP=$PUBLIC_IP
PUBLIC_IPV6=$PUBLIC_IPV6
PRIVATE_IP=$PRIVATE_IP
PRIVATE_IPV6=$PRIVATE_IPV6
MTA_STS_MODE=${MTA_STS_MODE-}
MTA_STS_MODE=${DEFAULT_MTA_STS_MODE:-enforce}
EOF
# Start service configuration.

View File

@ -131,7 +131,7 @@ apt_get_quiet autoremove
# * openssh-client: provides ssh-keygen
echo Installing system packages...
apt_install python3 python3-dev python3-pip \
apt_install python3 python3-dev python3-pip python3-setuptools \
netcat-openbsd wget curl git sudo coreutils bc \
haveged pollinate openssh-client unzip \
unattended-upgrades cron ntp fail2ban rsyslog

View File

@ -126,13 +126,13 @@ chmod a+r /var/lib/mailinabox/mozilla-autoconfig.xml
# nginx configuration at /.well-known/mta-sts.txt
# more documentation is available on:
# https://www.uriports.com/blog/mta-sts-explained/
# default mode is "enforce". Change to "testing" which means
# "Messages will be delivered as though there was no failure
# but a report will be sent if TLS-RPT is configured" if you
# are not sure you want this yet. Or "none".
# default mode is "enforce". In /etc/mailinabox.conf change
# "MTA_STS_MODE=testing" which means "Messages will be delivered
# as though there was no failure but a report will be sent if
# TLS-RPT is configured" if you are not sure you want this yet. Or "none".
PUNY_PRIMARY_HOSTNAME=$(echo "$PRIMARY_HOSTNAME" | idn2)
cat conf/mta-sts.txt \
| sed "s/MODE/${MTA_STS_MODE:-enforce}/" \
| sed "s/MODE/${MTA_STS_MODE}/" \
| sed "s/PRIMARY_HOSTNAME/$PUNY_PRIMARY_HOSTNAME/" \
> /var/lib/mailinabox/mta-sts.txt
chmod a+r /var/lib/mailinabox/mta-sts.txt

View File

@ -28,10 +28,11 @@ apt_install \
# Install Roundcube from source if it is not already present or if it is out of date.
# Combine the Roundcube version number with the commit hash of plugins to track
# whether we have the latest version of everything.
VERSION=1.4.10
HASH=36b2351030e1ebddb8e39190d7b0ba82b1bbec1b
PERSISTENT_LOGIN_VERSION=6b3fc450cae23ccb2f393d0ef67aa319e877e435
HTML5_NOTIFIER_VERSION=4b370e3cd60dabd2f428a26f45b677ad1b7118d5
VERSION=1.4.11
HASH=3877f0e70f29e7d0612155632e48c3db1e626be3
PERSISTENT_LOGIN_VERSION=6b3fc450cae23ccb2f393d0ef67aa319e877e435 # version 5.2.0
HTML5_NOTIFIER_VERSION=68d9ca194212e15b3c7225eb6085dbcf02fd13d7 # version 0.6.4+
CARDDAV_VERSION=3.0.3
CARDDAV_HASH=d1e3b0d851ffa2c6bd42bf0c04f70d0e1d0d78f8
@ -46,7 +47,7 @@ needs_update=0 #NODOC
if [ ! -f /usr/local/lib/roundcubemail/version ]; then
# not installed yet #NODOC
needs_update=1 #NODOC
elif [[ "$UPDATE_KEY" != `cat /usr/local/lib/roundcubemail/version` ]]; then
elif [[ "$UPDATE_KEY" != $(cat /usr/local/lib/roundcubemail/version) ]]; then
# checks if the version is what we want
needs_update=1 #NODOC
fi
@ -90,8 +91,9 @@ fi
# ### Configuring Roundcube
# Generate a safe 24-character secret key of safe characters.
SECRET_KEY=$(dd if=/dev/urandom bs=1 count=18 2>/dev/null | base64 | fold -w 24 | head -n 1)
# Generate a secret key of PHP-string-safe characters appropriate
# for the cipher algorithm selected below.
SECRET_KEY=$(dd if=/dev/urandom bs=1 count=32 2>/dev/null | base64 | sed s/=//g)
# Create a configuration file.
#
@ -125,7 +127,8 @@ cat > $RCM_CONFIG <<EOF;
);
\$config['support_url'] = 'https://mailinabox.email/';
\$config['product_name'] = '$PRIMARY_HOSTNAME Webmail';
\$config['des_key'] = '$SECRET_KEY';
\$config['cipher_method'] = 'AES-256-CBC'; # persistent login cookie and potentially other things
\$config['des_key'] = '$SECRET_KEY'; # 37 characters -> ~256 bits for AES-256, see above
\$config['plugins'] = array('html5_notifier', 'archive', 'zipdownload', 'password', 'managesieve', 'jqueryui', 'persistent_login', 'carddav');
\$config['skin'] = 'elastic';
\$config['login_autocomplete'] = 2;

View File

@ -22,23 +22,23 @@ apt_install \
phpenmod -v php imap
# Copy Z-Push into place.
VERSION=2.6.1
TARGETHASH=a4415f0dc0ed884acc8ad5c506944fc7e6d68eeb
VERSION=2.6.2
TARGETHASH=f0e8091a8030e5b851f5ba1f9f0e1a05b8762d80
needs_update=0 #NODOC
if [ ! -f /usr/local/lib/z-push/version ]; then
needs_update=1 #NODOC
elif [[ $VERSION != `cat /usr/local/lib/z-push/version` ]]; then
elif [[ $VERSION != $(cat /usr/local/lib/z-push/version) ]]; then
# checks if the version
needs_update=1 #NODOC
fi
if [ $needs_update == 1 ]; then
# Download
wget_verify "https://stash.z-hub.io/rest/api/latest/projects/ZP/repos/z-push/archive?at=refs%2Ftags%2F$VERSION&format=zip" $TARGETHASH /tmp/z-push.zip
wget_verify "https://github.com/Z-Hub/Z-Push/archive/refs/tags/$VERSION.zip" $TARGETHASH /tmp/z-push.zip
# Extract into place.
rm -rf /usr/local/lib/z-push /tmp/z-push
unzip -q /tmp/z-push.zip -d /tmp/z-push
mv /tmp/z-push/src /usr/local/lib/z-push
mv /tmp/z-push/*/src /usr/local/lib/z-push
rm -rf /tmp/z-push.zip /tmp/z-push
rm -f /usr/sbin/z-push-{admin,top}