1
0
mirror of https://github.com/mail-in-a-box/mailinabox.git synced 2025-04-04 00:17:06 +00:00

Merge remote-tracking branch 'upstream/main'

This commit is contained in:
pappapisshu 2023-01-15 16:05:40 +01:00
commit cb295a4bc2
18 changed files with 104 additions and 67 deletions

View File

@ -620,7 +620,8 @@ def get_backup_config(env):
# Merge in anything written to custom.yaml. # Merge in anything written to custom.yaml.
try: try:
custom_config = rtyaml.load(open(get_backup_configuration_file(env))) with open(os.path.join(backup_root, 'custom.yaml'), 'r') as f:
custom_config = rtyaml.load(f)
if not isinstance(custom_config, dict): raise ValueError() # caught below if not isinstance(custom_config, dict): raise ValueError() # caught below
# Converting the previous configuration (which was not very clear) # Converting the previous configuration (which was not very clear)

View File

@ -47,7 +47,8 @@ def read_password():
return first return first
def setup_key_auth(mgmt_uri): def setup_key_auth(mgmt_uri):
key = open('/var/lib/mailinabox/api.key').read().strip() with open('/var/lib/mailinabox/api.key', 'r') as f:
key = f.read().strip()
auth_handler = urllib.request.HTTPBasicAuthHandler() auth_handler = urllib.request.HTTPBasicAuthHandler()
auth_handler.add_password( auth_handler.add_password(

View File

@ -815,7 +815,8 @@ def write_opendkim_tables(domains, env):
def get_custom_dns_config(env, only_real_records=False): def get_custom_dns_config(env, only_real_records=False):
try: try:
custom_dns = rtyaml.load(open(os.path.join(env['STORAGE_ROOT'], 'dns/custom.yaml'))) with open(os.path.join(env['STORAGE_ROOT'], 'dns/custom.yaml'), 'r') as f:
custom_dns = rtyaml.load(f)
if not isinstance(custom_dns, dict): raise ValueError() # caught below if not isinstance(custom_dns, dict): raise ValueError() # caught below
except: except:
return [ ] return [ ]
@ -992,6 +993,7 @@ def set_custom_dns_record(qname, rtype, value, action, env):
def get_secondary_dns(custom_dns, mode=None): def get_secondary_dns(custom_dns, mode=None):
resolver = dns.resolver.get_default_resolver() resolver = dns.resolver.get_default_resolver()
resolver.timeout = 10 resolver.timeout = 10
resolver.lifetime = 10
values = [] values = []
for qname, rtype, value in custom_dns: for qname, rtype, value in custom_dns:
@ -1009,10 +1011,17 @@ def get_secondary_dns(custom_dns, mode=None):
# doesn't. # doesn't.
if not hostname.startswith("xfr:"): if not hostname.startswith("xfr:"):
if mode == "xfr": if mode == "xfr":
response = dns.resolver.resolve(hostname+'.', "A", raise_on_no_answer=False) try:
values.extend(map(str, response)) response = resolver.resolve(hostname+'.', "A", raise_on_no_answer=False)
response = dns.resolver.resolve(hostname+'.', "AAAA", raise_on_no_answer=False) values.extend(map(str, response))
values.extend(map(str, response)) except dns.exception.DNSException:
pass
try:
response = resolver.resolve(hostname+'.', "AAAA", raise_on_no_answer=False)
values.extend(map(str, response))
except dns.exception.DNSException:
pass
continue continue
values.append(hostname) values.append(hostname)
@ -1030,15 +1039,17 @@ def set_secondary_dns(hostnames, env):
# Validate that all hostnames are valid and that all zone-xfer IP addresses are valid. # Validate that all hostnames are valid and that all zone-xfer IP addresses are valid.
resolver = dns.resolver.get_default_resolver() resolver = dns.resolver.get_default_resolver()
resolver.timeout = 5 resolver.timeout = 5
resolver.lifetime = 5
for item in hostnames: for item in hostnames:
if not item.startswith("xfr:"): if not item.startswith("xfr:"):
# Resolve hostname. # Resolve hostname.
try: try:
response = resolver.resolve(item, "A") response = resolver.resolve(item, "A")
except (dns.resolver.NoNameservers, dns.resolver.NXDOMAIN, dns.resolver.NoAnswer): except (dns.resolver.NoNameservers, dns.resolver.NXDOMAIN, dns.resolver.NoAnswer, dns.resolver.Timeout):
try: try:
response = resolver.resolve(item, "AAAA") response = resolver.resolve(item, "AAAA")
except (dns.resolver.NoNameservers, dns.resolver.NXDOMAIN, dns.resolver.NoAnswer): except (dns.resolver.NoNameservers, dns.resolver.NXDOMAIN, dns.resolver.NoAnswer, dns.resolver.Timeout):
raise ValueError("Could not resolve the IP address of %s." % item) raise ValueError("Could not resolve the IP address of %s." % item)
else: else:
# Validate IP address. # Validate IP address.
@ -1071,7 +1082,7 @@ def get_custom_dns_records(custom_dns, qname, rtype):
def build_recommended_dns(env): def build_recommended_dns(env):
ret = [] ret = []
for (domain, zonefile, records) in build_zones(env): for (domain, zonefile, records) in build_zones(env):
# remove records that we don't dislay # remove records that we don't display
records = [r for r in records if r[3] is not False] records = [r for r in records if r[3] is not False]
# put Required at the top, then Recommended, then everythiing else # put Required at the top, then Recommended, then everythiing else

View File

@ -73,7 +73,8 @@ def scan_files(collector):
continue continue
elif fn[-3:] == '.gz': elif fn[-3:] == '.gz':
tmp_file = tempfile.NamedTemporaryFile() tmp_file = tempfile.NamedTemporaryFile()
shutil.copyfileobj(gzip.open(fn), tmp_file) with gzip.open(fn, 'rb') as f:
shutil.copyfileobj(f, tmp_file)
if VERBOSE: if VERBOSE:
print("Processing file", fn, "...") print("Processing file", fn, "...")

View File

@ -535,7 +535,8 @@ def check_certificate(domain, ssl_certificate, ssl_private_key, warn_if_expiring
# Second, check that the certificate matches the private key. # Second, check that the certificate matches the private key.
if ssl_private_key is not None: if ssl_private_key is not None:
try: try:
priv_key = load_pem(open(ssl_private_key, 'rb').read()) with open(ssl_private_key, 'rb') as f:
priv_key = load_pem(f.read())
except ValueError as e: except ValueError as e:
return ("The private key file %s is not a private key file: %s" % (ssl_private_key, str(e)), None) return ("The private key file %s is not a private key file: %s" % (ssl_private_key, str(e)), None)

View File

@ -207,7 +207,8 @@ def check_ssh_password(env, output):
# the configuration file. # the configuration file.
if not os.path.exists("/etc/ssh/sshd_config"): if not os.path.exists("/etc/ssh/sshd_config"):
return return
sshd = open("/etc/ssh/sshd_config").read() with open("/etc/ssh/sshd_config", "r") as f:
sshd = f.read()
if re.search("\nPasswordAuthentication\s+yes", sshd) \ if re.search("\nPasswordAuthentication\s+yes", sshd) \
or not re.search("\nPasswordAuthentication\s+no", sshd): or not re.search("\nPasswordAuthentication\s+no", sshd):
output.print_error("""The SSH server on this machine permits password-based login. A more secure output.print_error("""The SSH server on this machine permits password-based login. A more secure
@ -308,6 +309,8 @@ def run_network_checks(env, output):
output.print_ok("IP address is not blacklisted by zen.spamhaus.org.") output.print_ok("IP address is not blacklisted by zen.spamhaus.org.")
elif zen == "[timeout]": elif zen == "[timeout]":
output.print_warning("Connection to zen.spamhaus.org timed out. We could not determine whether your server's IP address is blacklisted. Please try again later.") output.print_warning("Connection to zen.spamhaus.org timed out. We could not determine whether your server's IP address is blacklisted. Please try again later.")
elif zen == "[Not Set]":
output.print_warning("Could not connect to zen.spamhaus.org. We could not determine whether your server's IP address is blacklisted. Please try again later.")
else: else:
output.print_error("""The IP address of this machine %s is listed in the Spamhaus Block List (code %s), output.print_error("""The IP address of this machine %s is listed in the Spamhaus Block List (code %s),
which may prevent recipients from receiving your email. See http://www.spamhaus.org/query/ip/%s.""" which may prevent recipients from receiving your email. See http://www.spamhaus.org/query/ip/%s."""
@ -541,7 +544,7 @@ def check_dns_zone(domain, env, output, dns_zonefiles):
for ns in custom_secondary_ns: for ns in custom_secondary_ns:
# We must first resolve the nameserver to an IP address so we can query it. # We must first resolve the nameserver to an IP address so we can query it.
ns_ips = query_dns(ns, "A") ns_ips = query_dns(ns, "A")
if not ns_ips: if not ns_ips or ns_ips in {'[Not Set]', '[timeout]'}:
output.print_error("Secondary nameserver %s is not valid (it doesn't resolve to an IP address)." % ns) output.print_error("Secondary nameserver %s is not valid (it doesn't resolve to an IP address)." % ns)
continue continue
# Choose the first IP if nameserver returns multiple # Choose the first IP if nameserver returns multiple
@ -592,7 +595,8 @@ def check_dnssec(domain, env, output, dns_zonefiles, is_checking_primary=False):
# record that we suggest using is for the KSK (and that's how the DS records were generated). # record that we suggest using is for the KSK (and that's how the DS records were generated).
# We'll also give the nice name for the key algorithm. # We'll also give the nice name for the key algorithm.
dnssec_keys = load_env_vars_from_file(os.path.join(env['STORAGE_ROOT'], 'dns/dnssec/%s.conf' % alg_name_map[ds_alg])) dnssec_keys = load_env_vars_from_file(os.path.join(env['STORAGE_ROOT'], 'dns/dnssec/%s.conf' % alg_name_map[ds_alg]))
dnsssec_pubkey = open(os.path.join(env['STORAGE_ROOT'], 'dns/dnssec/' + dnssec_keys['KSK'] + '.key')).read().split("\t")[3].split(" ")[3] with open(os.path.join(env['STORAGE_ROOT'], 'dns/dnssec/' + dnssec_keys['KSK'] + '.key'), 'r') as f:
dnsssec_pubkey = f.read().split("\t")[3].split(" ")[3]
expected_ds_records[ (ds_keytag, ds_alg, ds_digalg, ds_digest) ] = { expected_ds_records[ (ds_keytag, ds_alg, ds_digalg, ds_digest) ] = {
"record": rr_ds, "record": rr_ds,
@ -744,6 +748,8 @@ def check_mail_domain(domain, env, output):
output.print_ok("Domain is not blacklisted by dbl.spamhaus.org.") output.print_ok("Domain is not blacklisted by dbl.spamhaus.org.")
elif dbl == "[timeout]": elif dbl == "[timeout]":
output.print_warning("Connection to dbl.spamhaus.org timed out. We could not determine whether the domain {} is blacklisted. Please try again later.".format(domain)) output.print_warning("Connection to dbl.spamhaus.org timed out. We could not determine whether the domain {} is blacklisted. Please try again later.".format(domain))
elif dbl == "[Not Set]":
output.print_warning("Could not connect to dbl.spamhaus.org. We could not determine whether the domain {} is blacklisted. Please try again later.".format(domain))
else: else:
output.print_error("""This domain is listed in the Spamhaus Domain Block List (code %s), output.print_error("""This domain is listed in the Spamhaus Domain Block List (code %s),
which may prevent recipients from receiving your mail. which may prevent recipients from receiving your mail.
@ -788,12 +794,17 @@ def query_dns(qname, rtype, nxdomain='[Not Set]', at=None, as_list=False):
# running bind server), or if the 'at' argument is specified, use that host # running bind server), or if the 'at' argument is specified, use that host
# as the nameserver. # as the nameserver.
resolver = dns.resolver.get_default_resolver() resolver = dns.resolver.get_default_resolver()
if at:
# Make sure at is not a string that cannot be used as a nameserver
if at and at not in {'[Not set]', '[timeout]'}:
resolver = dns.resolver.Resolver() resolver = dns.resolver.Resolver()
resolver.nameservers = [at] resolver.nameservers = [at]
# Set a timeout so that a non-responsive server doesn't hold us back. # Set a timeout so that a non-responsive server doesn't hold us back.
resolver.timeout = 5 resolver.timeout = 5
# The number of seconds to spend trying to get an answer to the question. If the
# lifetime expires a dns.exception.Timeout exception will be raised.
resolver.lifetime = 5
# Do the query. # Do the query.
try: try:
@ -947,7 +958,8 @@ def run_and_output_changes(env, pool):
# Load previously saved status checks. # Load previously saved status checks.
cache_fn = "/var/cache/mailinabox/status_checks.json" cache_fn = "/var/cache/mailinabox/status_checks.json"
if os.path.exists(cache_fn): if os.path.exists(cache_fn):
prev = json.load(open(cache_fn)) with open(cache_fn, 'r') as f:
prev = json.load(f)
# Group the serial output into categories by the headings. # Group the serial output into categories by the headings.
def group_by_heading(lines): def group_by_heading(lines):

View File

@ -14,7 +14,9 @@ def load_env_vars_from_file(fn):
# Load settings from a KEY=VALUE file. # Load settings from a KEY=VALUE file.
import collections import collections
env = collections.OrderedDict() env = collections.OrderedDict()
for line in open(fn): env.setdefault(*line.strip().split("=", 1)) with open(fn, 'r') as f:
for line in f:
env.setdefault(*line.strip().split("=", 1))
return env return env
def save_environment(env): def save_environment(env):
@ -34,7 +36,8 @@ def load_settings(env):
import rtyaml import rtyaml
fn = os.path.join(env['STORAGE_ROOT'], 'settings.yaml') fn = os.path.join(env['STORAGE_ROOT'], 'settings.yaml')
try: try:
config = rtyaml.load(open(fn, "r")) with open(fn, "r") as f:
config = rtyaml.load(f)
if not isinstance(config, dict): raise ValueError() # caught below if not isinstance(config, dict): raise ValueError() # caught below
return config return config
except: except:
@ -43,14 +46,16 @@ def load_settings(env):
# THE SSH KEYS AT /root/.ssh # THE SSH KEYS AT /root/.ssh
def load_ssh_public_key(): def load_ssh_public_key():
ssh_public_key_file = os.path.join('/root', '.ssh', 'id_rsa_miab.pub') ssh_public_key_file = os.path.join('/root', '.ssh', 'id_rsa_miab.pub')
if os.path.exists(ssh_public_key_file): if os.path.exists(ssh_public_key_file):
return open(ssh_public_key_file, 'r').read() with open(ssh_public_key_file, 'r') as f:
return f.read()
def load_ssh_private_key(): def load_ssh_private_key():
ssh_private_key_file = os.path.join('/root', '.ssh', 'id_rsa_miab') ssh_private_key_file = os.path.join('/root', '.ssh', 'id_rsa_miab')
if os.path.exists(ssh_private_key_file): if os.path.exists(ssh_private_key_file):
return open(ssh_private_key_file, 'r').read() with open(ssh_private_key_file, 'r') as f:
return f.read()
# UTILITIES # UTILITIES

View File

@ -63,7 +63,8 @@ def get_web_domains_with_root_overrides(env):
root_overrides = { } root_overrides = { }
nginx_conf_custom_fn = os.path.join(env["STORAGE_ROOT"], "www/custom.yaml") nginx_conf_custom_fn = os.path.join(env["STORAGE_ROOT"], "www/custom.yaml")
if os.path.exists(nginx_conf_custom_fn): if os.path.exists(nginx_conf_custom_fn):
custom_settings = rtyaml.load(open(nginx_conf_custom_fn)) with open(nginx_conf_custom_fn, 'r') as f:
custom_settings = rtyaml.load(f)
for domain, settings in custom_settings.items(): for domain, settings in custom_settings.items():
for type, value in [('redirect', settings.get('redirects', {}).get('/')), for type, value in [('redirect', settings.get('redirects', {}).get('/')),
('proxy', settings.get('proxies', {}).get('/'))]: ('proxy', settings.get('proxies', {}).get('/'))]:
@ -75,13 +76,18 @@ def do_web_update(env):
# Pre-load what SSL certificates we will use for each domain. # Pre-load what SSL certificates we will use for each domain.
ssl_certificates = get_ssl_certificates(env) ssl_certificates = get_ssl_certificates(env)
# Helper for reading config files and templates
def read_conf(conf_fn):
with open(os.path.join(os.path.dirname(__file__), "../conf", conf_fn), "r") as f:
return f.read()
# Build an nginx configuration file. # Build an nginx configuration file.
nginx_conf = open(os.path.join(os.path.dirname(__file__), "../conf/nginx-top.conf")).read() nginx_conf = read_conf("nginx-top.conf")
# Load the templates. # Load the templates.
template0 = open(os.path.join(os.path.dirname(__file__), "../conf/nginx.conf")).read() template0 = read_conf("nginx.conf")
template1 = open(os.path.join(os.path.dirname(__file__), "../conf/nginx-alldomains.conf")).read() template1 = read_conf("nginx-alldomains.conf")
template2 = open(os.path.join(os.path.dirname(__file__), "../conf/nginx-primaryonly.conf")).read() template2 = read_conf("nginx-primaryonly.conf")
template3 = "\trewrite ^(.*) https://$REDIRECT_DOMAIN$1 permanent;\n" template3 = "\trewrite ^(.*) https://$REDIRECT_DOMAIN$1 permanent;\n"
# Add the PRIMARY_HOST configuration first so it becomes nginx's default server. # Add the PRIMARY_HOST configuration first so it becomes nginx's default server.
@ -141,11 +147,8 @@ def make_domain_config(domain, templates, ssl_certificates, env):
def hashfile(filepath): def hashfile(filepath):
import hashlib import hashlib
sha1 = hashlib.sha1() sha1 = hashlib.sha1()
f = open(filepath, 'rb') with open(filepath, 'rb') as f:
try:
sha1.update(f.read()) sha1.update(f.read())
finally:
f.close()
return sha1.hexdigest() return sha1.hexdigest()
nginx_conf_extra += "\t# ssl files sha1: %s / %s\n" % (hashfile(tls_cert["private-key"]), hashfile(tls_cert["certificate"])) nginx_conf_extra += "\t# ssl files sha1: %s / %s\n" % (hashfile(tls_cert["private-key"]), hashfile(tls_cert["certificate"]))
@ -153,7 +156,8 @@ def make_domain_config(domain, templates, ssl_certificates, env):
hsts = "yes" hsts = "yes"
nginx_conf_custom_fn = os.path.join(env["STORAGE_ROOT"], "www/custom.yaml") nginx_conf_custom_fn = os.path.join(env["STORAGE_ROOT"], "www/custom.yaml")
if os.path.exists(nginx_conf_custom_fn): if os.path.exists(nginx_conf_custom_fn):
yaml = rtyaml.load(open(nginx_conf_custom_fn)) with open(nginx_conf_custom_fn, 'r') as f:
yaml = rtyaml.load(f)
if domain in yaml: if domain in yaml:
yaml = yaml[domain] yaml = yaml[domain]

View File

@ -202,13 +202,13 @@ chmod -R o-rwx /etc/dovecot
# Ensure mailbox files have a directory that exists and are owned by the mail user. # Ensure mailbox files have a directory that exists and are owned by the mail user.
mkdir -p $STORAGE_ROOT/mail/mailboxes mkdir -p $STORAGE_ROOT/mail/mailboxes
chown -R mail.mail $STORAGE_ROOT/mail/mailboxes chown -R mail:mail $STORAGE_ROOT/mail/mailboxes
# Same for the sieve scripts. # Same for the sieve scripts.
mkdir -p $STORAGE_ROOT/mail/sieve mkdir -p $STORAGE_ROOT/mail/sieve
mkdir -p $STORAGE_ROOT/mail/sieve/global_before mkdir -p $STORAGE_ROOT/mail/sieve/global_before
mkdir -p $STORAGE_ROOT/mail/sieve/global_after mkdir -p $STORAGE_ROOT/mail/sieve/global_after
chown -R mail.mail $STORAGE_ROOT/mail/sieve chown -R mail:mail $STORAGE_ROOT/mail/sieve
# Allow the IMAP/POP ports in the firewall. # Allow the IMAP/POP ports in the firewall.
ufw_allow imaps ufw_allow imaps

View File

@ -34,8 +34,8 @@ contact.admin.always_send warning critical
EOF EOF
# The Debian installer touches these files and chowns them to www-data:adm for use with spawn-fcgi # The Debian installer touches these files and chowns them to www-data:adm for use with spawn-fcgi
chown munin. /var/log/munin/munin-cgi-html.log chown munin /var/log/munin/munin-cgi-html.log
chown munin. /var/log/munin/munin-cgi-graph.log chown munin /var/log/munin/munin-cgi-graph.log
# ensure munin-node knows the name of this machine # ensure munin-node knows the name of this machine
# and reduce logging level to warning # and reduce logging level to warning

View File

@ -110,7 +110,7 @@ InstallNextcloud() {
# Make sure permissions are correct or the upgrade step won't run. # Make sure permissions are correct or the upgrade step won't run.
# $STORAGE_ROOT/owncloud may not yet exist, so use -f to suppress # $STORAGE_ROOT/owncloud may not yet exist, so use -f to suppress
# that error. # that error.
chown -f -R www-data.www-data $STORAGE_ROOT/owncloud /usr/local/lib/owncloud || /bin/true chown -f -R www-data:www-data $STORAGE_ROOT/owncloud /usr/local/lib/owncloud || /bin/true
# If this isn't a new installation, immediately run the upgrade script. # If this isn't a new installation, immediately run the upgrade script.
# Then check for success (0=ok and 3=no upgrade needed, both are success). # Then check for success (0=ok and 3=no upgrade needed, both are success).
@ -259,7 +259,7 @@ EOF
EOF EOF
# Set permissions # Set permissions
chown -R www-data.www-data $STORAGE_ROOT/owncloud /usr/local/lib/owncloud chown -R www-data:www-data $STORAGE_ROOT/owncloud /usr/local/lib/owncloud
# Execute Nextcloud's setup step, which creates the Nextcloud sqlite database. # Execute Nextcloud's setup step, which creates the Nextcloud sqlite database.
# It also wipes it if it exists. And it updates config.php with database # It also wipes it if it exists. And it updates config.php with database
@ -311,7 +311,7 @@ var_export(\$CONFIG);
echo ";"; echo ";";
?> ?>
EOF EOF
chown www-data.www-data $STORAGE_ROOT/owncloud/config.php chown www-data:www-data $STORAGE_ROOT/owncloud/config.php
# Enable/disable apps. Note that this must be done after the Nextcloud setup. # Enable/disable apps. Note that this must be done after the Nextcloud setup.
# The firstrunwizard gave Josh all sorts of problems, so disabling that. # The firstrunwizard gave Josh all sorts of problems, so disabling that.

View File

@ -85,7 +85,7 @@ f=$STORAGE_ROOT
while [[ $f != / ]]; do chmod a+rx "$f"; f=$(dirname "$f"); done; while [[ $f != / ]]; do chmod a+rx "$f"; f=$(dirname "$f"); done;
if [ ! -f $STORAGE_ROOT/mailinabox.version ]; then if [ ! -f $STORAGE_ROOT/mailinabox.version ]; then
setup/migrate.py --current > $STORAGE_ROOT/mailinabox.version setup/migrate.py --current > $STORAGE_ROOT/mailinabox.version
chown $STORAGE_USER.$STORAGE_USER $STORAGE_ROOT/mailinabox.version chown $STORAGE_USER:$STORAGE_USER $STORAGE_ROOT/mailinabox.version
fi fi
# Save the global options in /etc/mailinabox.conf so that standalone # Save the global options in /etc/mailinabox.conf so that standalone

View File

@ -373,3 +373,5 @@ cp -f conf/fail2ban/filter.d/* /etc/fail2ban/filter.d/
# scripts will ensure the files exist and then fail2ban is given another # scripts will ensure the files exist and then fail2ban is given another
# restart at the very end of setup. # restart at the very end of setup.
restart_service fail2ban restart_service fail2ban
systemctl enable fail2ban

View File

@ -134,7 +134,7 @@ cat > $RCM_CONFIG <<EOF;
\$config['product_name'] = '$PRIMARY_HOSTNAME Webmail'; \$config['product_name'] = '$PRIMARY_HOSTNAME Webmail';
\$config['cipher_method'] = 'AES-256-CBC'; # persistent login cookie and potentially other things \$config['cipher_method'] = 'AES-256-CBC'; # persistent login cookie and potentially other things
\$config['des_key'] = '$SECRET_KEY'; # 37 characters -> ~256 bits for AES-256, see above \$config['des_key'] = '$SECRET_KEY'; # 37 characters -> ~256 bits for AES-256, see above
\$config['plugins'] = array('html5_notifier', 'archive', 'zipdownload', 'password', 'managesieve', 'jqueryui', 'persistent_login', 'carddav'); \$config['plugins'] = array('html5_notifier', 'archive', 'zipdownload', 'managesieve', 'jqueryui', 'persistent_login', 'carddav');
\$config['skin'] = 'elastic'; \$config['skin'] = 'elastic';
\$config['login_autocomplete'] = 2; \$config['login_autocomplete'] = 2;
\$config['login_username_filter'] = 'email'; \$config['login_username_filter'] = 'email';
@ -170,7 +170,7 @@ EOF
# Create writable directories. # Create writable directories.
mkdir -p /var/log/roundcubemail /var/tmp/roundcubemail $STORAGE_ROOT/mail/roundcube mkdir -p /var/log/roundcubemail /var/tmp/roundcubemail $STORAGE_ROOT/mail/roundcube
chown -R www-data.www-data /var/log/roundcubemail /var/tmp/roundcubemail $STORAGE_ROOT/mail/roundcube chown -R www-data:www-data /var/log/roundcubemail /var/tmp/roundcubemail $STORAGE_ROOT/mail/roundcube
# Ensure the log file monitored by fail2ban exists, or else fail2ban can't start. # Ensure the log file monitored by fail2ban exists, or else fail2ban can't start.
sudo -u www-data touch /var/log/roundcubemail/errors.log sudo -u www-data touch /var/log/roundcubemail/errors.log
@ -194,14 +194,14 @@ usermod -a -G dovecot www-data
# set permissions so that PHP can use users.sqlite # set permissions so that PHP can use users.sqlite
# could use dovecot instead of www-data, but not sure it matters # could use dovecot instead of www-data, but not sure it matters
chown root.www-data $STORAGE_ROOT/mail chown root:www-data $STORAGE_ROOT/mail
chmod 775 $STORAGE_ROOT/mail chmod 775 $STORAGE_ROOT/mail
chown root.www-data $STORAGE_ROOT/mail/users.sqlite chown root:www-data $STORAGE_ROOT/mail/users.sqlite
chmod 664 $STORAGE_ROOT/mail/users.sqlite chmod 664 $STORAGE_ROOT/mail/users.sqlite
# Fix Carddav permissions: # Fix Carddav permissions:
chown -f -R root.www-data ${RCM_PLUGIN_DIR}/carddav chown -f -R root:www-data ${RCM_PLUGIN_DIR}/carddav
# root.www-data need all permissions, others only read # root:www-data need all permissions, others only read
chmod -R 774 ${RCM_PLUGIN_DIR}/carddav chmod -R 774 ${RCM_PLUGIN_DIR}/carddav
# Run Roundcube database migration script (database is created if it does not exist) # Run Roundcube database migration script (database is created if it does not exist)

View File

@ -76,7 +76,8 @@ for setting in settings:
found = set() found = set()
buf = "" buf = ""
input_lines = list(open(filename)) with open(filename, "r") as f:
input_lines = list(f)
while len(input_lines) > 0: while len(input_lines) > 0:
line = input_lines.pop(0) line = input_lines.pop(0)

View File

@ -40,8 +40,8 @@ cp "$1/owncloud.db" $STORAGE_ROOT/owncloud/
cp "$1/config.php" $STORAGE_ROOT/owncloud/ cp "$1/config.php" $STORAGE_ROOT/owncloud/
ln -sf $STORAGE_ROOT/owncloud/config.php /usr/local/lib/owncloud/config/config.php ln -sf $STORAGE_ROOT/owncloud/config.php /usr/local/lib/owncloud/config/config.php
chown -f -R www-data.www-data $STORAGE_ROOT/owncloud /usr/local/lib/owncloud chown -f -R www-data:www-data $STORAGE_ROOT/owncloud /usr/local/lib/owncloud
chown www-data.www-data $STORAGE_ROOT/owncloud/config.php chown www-data:www-data $STORAGE_ROOT/owncloud/config.php
sudo -u www-data php$PHP_VER /usr/local/lib/owncloud/occ maintenance:mode --off sudo -u www-data php$PHP_VER /usr/local/lib/owncloud/occ maintenance:mode --off

View File

@ -17,13 +17,8 @@ accesses = set()
# Scan the current and rotated access logs. # Scan the current and rotated access logs.
for fn in glob.glob("/var/log/nginx/access.log*"): for fn in glob.glob("/var/log/nginx/access.log*"):
# Gunzip if necessary. # Gunzip if necessary.
if fn.endswith(".gz"):
f = gzip.open(fn)
else:
f = open(fn, "rb")
# Loop through the lines in the access log. # Loop through the lines in the access log.
with f: with (gzip.open if fn.endswith(".gz") else open)(fn, "rb") as f:
for line in f: for line in f:
# Find lines that are GETs on the bootstrap script by either curl or wget. # Find lines that are GETs on the bootstrap script by either curl or wget.
# (Note that we purposely skip ...?ping=1 requests which is the admin panel querying us for updates.) # (Note that we purposely skip ...?ping=1 requests which is the admin panel querying us for updates.)
@ -43,7 +38,8 @@ for date, ip in accesses:
# Since logs are rotated, store the statistics permanently in a JSON file. # Since logs are rotated, store the statistics permanently in a JSON file.
# Load in the stats from an existing file. # Load in the stats from an existing file.
if os.path.exists(outfn): if os.path.exists(outfn):
existing_data = json.load(open(outfn)) with open(outfn, "r") as f:
existing_data = json.load(f)
for date, count in existing_data: for date, count in existing_data:
if date not in by_date: if date not in by_date:
by_date[date] = count by_date[date] = count

View File

@ -124,13 +124,14 @@ def generate_documentation():
""") """)
parser = Source.parser() parser = Source.parser()
for line in open("setup/start.sh"): with open("setup/start.sh", "r") as start_file:
try: for line in start_file:
fn = parser.parse_string(line).filename() try:
except: fn = parser.parse_string(line).filename()
continue except:
if fn in ("setup/start.sh", "setup/preflight.sh", "setup/questions.sh", "setup/firstuser.sh", "setup/management.sh"): continue
continue if fn in ("setup/start.sh", "setup/preflight.sh", "setup/questions.sh", "setup/firstuser.sh", "setup/management.sh"):
continue
import sys import sys
print(fn, file=sys.stderr) print(fn, file=sys.stderr)
@ -401,7 +402,8 @@ class BashScript(Grammar):
@staticmethod @staticmethod
def parse(fn): def parse(fn):
if fn in ("setup/functions.sh", "/etc/mailinabox.conf"): return "" if fn in ("setup/functions.sh", "/etc/mailinabox.conf"): return ""
string = open(fn).read() with open(fn, "r") as f:
string = f.read()
# tokenize # tokenize
string = re.sub(".* #NODOC\n", "", string) string = re.sub(".* #NODOC\n", "", string)