diff --git a/CHANGELOG.md b/CHANGELOG.md index 1d0e6e0a..cf6e527e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,14 +6,44 @@ In Development ownCloud: +* Update ownCloud to version 9.1.1 + +Control panel: + +* Backups can now be made using rsync-over-ssh! +* Remove recommendations for Certificate Providers +* Status checks failed if the system doesn't support iptables or doesn't have ufw installed. +* Add support for SSHFP records when sshd listens on non-standard ports + +System: + +* Ubuntu's "Upgrade to 16.04" notice is suppressed since you should not do that. +* Lower memory requirements to 512MB, display a warning if system memory is below 768MB. + +Mail: + +* Turn off header filters for forwarded email. +* Another go at fixing a long-standing issue with training the spam filter not working (because of a file permissions issue). +* Exchange/ActiveSync will now use your display name set in Roundcube in the From: line of outgoing email. + +v0.20 (September 23, 2016) +-------------------------- + +ownCloud: + * Updated to ownCloud to 8.2.7. +Control Panel: + +* Fixed a crash that occurs when there are IPv6 DNS records due to a bug in dnspython 1.14.0. +* Improved the wonky low disk space check. + v0.19b (August 20, 2016) ------------------------ This update corrects a security issue introduced in v0.18. -A remote code execution vulnerability is corrected in how the munin system monitoring graphs are generated for the control panel. The vulnerability involves an administrative user visiting a carefully crafted URL. +* A remote code execution vulnerability is corrected in how the munin system monitoring graphs are generated for the control panel. The vulnerability involves an administrative user visiting a carefully crafted URL. v0.19a (August 18, 2016) ------------------------ @@ -148,7 +178,6 @@ v0.16 (January 30, 2016) ------------------------ This update primarily adds automatic SSL (now "TLS") certificate provisioning from Let's Encrypt (https://letsencrypt.org/). -* The Sieve port is now open so tools like the Thunderbird Sieve program can be used to edit mail filters. Control Panel: @@ -587,4 +616,4 @@ v0.02 (September 21, 2014) v0.01 (August 19, 2014) ----------------------- -First release. +First versioned release after a year of unversioned development. diff --git a/README.md b/README.md index a2d5d31e..0883e52e 100644 --- a/README.md +++ b/README.md @@ -59,7 +59,7 @@ by me: $ curl -s https://keybase.io/joshdata/key.asc | gpg --import gpg: key C10BDD81: public key "Joshua Tauberer " imported - $ git verify-tag v0.19b + $ git verify-tag v0.20 gpg: Signature made ..... using RSA key ID C10BDD81 gpg: Good signature from "Joshua Tauberer " gpg: WARNING: This key is not certified with a trusted signature! @@ -72,7 +72,7 @@ and on my [personal homepage](https://razor.occams.info/). (Of course, if this r Checkout the tag corresponding to the most recent release: - $ git checkout v0.19b + $ git checkout v0.20 Begin the installation. diff --git a/conf/zpush/backend_imap.php b/conf/zpush/backend_imap.php index 84dc7358..b1867625 100644 --- a/conf/zpush/backend_imap.php +++ b/conf/zpush/backend_imap.php @@ -8,7 +8,7 @@ define('IMAP_SERVER', '127.0.0.1'); define('IMAP_PORT', 993); define('IMAP_OPTIONS', '/ssl/norsh/novalidate-cert'); -define('IMAP_DEFAULTFROM', ''); +define('IMAP_DEFAULTFROM', 'sql'); define('SYSTEM_MIME_TYPES_MAPPING', '/etc/mime.types'); define('IMAP_AUTOSEEN_ON_DELETE', false); @@ -23,15 +23,16 @@ define('IMAP_FOLDER_TRASH', 'TRASH'); define('IMAP_FOLDER_SPAM', 'SPAM'); define('IMAP_FOLDER_ARCHIVE', 'ARCHIVE'); - -// not used -define('IMAP_FROM_SQL_DSN', ''); +define('IMAP_FROM_SQL_DSN', 'sqlite:STORAGE_ROOT/mail/roundcube/roundcube.sqlite'); define('IMAP_FROM_SQL_USER', ''); define('IMAP_FROM_SQL_PASSWORD', ''); define('IMAP_FROM_SQL_OPTIONS', serialize(array(PDO::ATTR_PERSISTENT => true))); -define('IMAP_FROM_SQL_QUERY', "select first_name, last_name, mail_address from users where mail_address = '#username@#domain'"); -define('IMAP_FROM_SQL_FIELDS', serialize(array('first_name', 'last_name', 'mail_address'))); -define('IMAP_FROM_SQL_FROM', '#first_name #last_name <#mail_address>'); +define('IMAP_FROM_SQL_QUERY', "SELECT name, email FROM identities i INNER JOIN users u ON i.user_id = u.user_id WHERE u.username = '#username' AND i.standard = 1 AND i.del = 0 AND i.name <> ''"); +define('IMAP_FROM_SQL_FIELDS', serialize(array('name', 'email'))); +define('IMAP_FROM_SQL_FROM', '#name <#email>'); +define('IMAP_FROM_SQL_FULLNAME', '#name'); + +// not used define('IMAP_FROM_LDAP_SERVER', ''); define('IMAP_FROM_LDAP_SERVER_PORT', '389'); define('IMAP_FROM_LDAP_USER', 'cn=zpush,ou=servers,dc=zpush,dc=org'); @@ -40,6 +41,7 @@ define('IMAP_FROM_LDAP_BASE', 'dc=zpush,dc=org'); define('IMAP_FROM_LDAP_QUERY', '(mail=#username@#domain)'); define('IMAP_FROM_LDAP_FIELDS', serialize(array('givenname', 'sn', 'mail'))); define('IMAP_FROM_LDAP_FROM', '#givenname #sn <#mail>'); +define('IMAP_FROM_LDAP_FULLNAME', '#givenname #sn'); define('IMAP_SMTP_METHOD', 'sendmail'); diff --git a/management/backup.py b/management/backup.py index be87a429..e93b6fcb 100755 --- a/management/backup.py +++ b/management/backup.py @@ -13,6 +13,11 @@ import rtyaml from utils import exclusive_process, load_environment, shell, wait_for_service, fix_boto +rsync_ssh_options = [ + "--ssh-options='-i /root/.ssh/id_rsa_miab'", + "--rsync-options=-e \"/usr/bin/ssh -oStrictHostKeyChecking=no -oBatchMode=yes -p 22 -i /root/.ssh/id_rsa_miab\"", +] + def backup_status(env): # Root folder backup_root = os.path.join(env["STORAGE_ROOT"], 'backup') @@ -52,6 +57,7 @@ def backup_status(env): "size": 0, # collection-status doesn't give us the size "volumes": keys[2], # number of archive volumes for this backup (not really helpful) } + code, collection_status = shell('check_output', [ "/usr/bin/duplicity", "collection-status", @@ -59,7 +65,7 @@ def backup_status(env): "--gpg-options", "--cipher-algo=AES256", "--log-fd", "1", config["target"], - ], + ] + rsync_ssh_options, get_env(env), trap=True) if code != 0: @@ -177,24 +183,24 @@ def get_passphrase(env): with open(os.path.join(backup_root, 'secret_key.txt')) as f: passphrase = f.readline().strip() if len(passphrase) < 43: raise Exception("secret_key.txt's first line is too short!") - + return passphrase def get_env(env): config = get_backup_config(env) - + env = { "PASSPHRASE" : get_passphrase(env) } - + if get_target_type(config) == 's3': env["AWS_ACCESS_KEY_ID"] = config["target_user"] env["AWS_SECRET_ACCESS_KEY"] = config["target_pass"] - + return env - + def get_target_type(config): protocol = config["target"].split(":")[0] return protocol - + def perform_backup(full_backup): env = load_environment() @@ -204,7 +210,7 @@ def perform_backup(full_backup): backup_cache_dir = os.path.join(backup_root, 'cache') backup_dir = os.path.join(backup_root, 'encrypted') - # Are backups dissbled? + # Are backups disabled? if config["target"] == "off": return @@ -283,7 +289,7 @@ def perform_backup(full_backup): env["STORAGE_ROOT"], config["target"], "--allow-source-mismatch" - ], + ] + rsync_ssh_options, get_env(env)) finally: # Start services again. @@ -305,7 +311,7 @@ def perform_backup(full_backup): "--archive-dir", backup_cache_dir, "--force", config["target"] - ], + ] + rsync_ssh_options, get_env(env)) # From duplicity's manual: @@ -320,7 +326,7 @@ def perform_backup(full_backup): "--archive-dir", backup_cache_dir, "--force", config["target"] - ], + ] + rsync_ssh_options, get_env(env)) # Change ownership of backups to the user-data user, so that the after-bcakup @@ -359,7 +365,7 @@ def run_duplicity_verification(): "--exclude", backup_root, config["target"], env["STORAGE_ROOT"], - ], get_env(env)) + ] + rsync_ssh_options, get_env(env)) def run_duplicity_restore(args): env = load_environment() @@ -370,7 +376,7 @@ def run_duplicity_restore(args): "restore", "--archive-dir", backup_cache_dir, config["target"], - ] + args, + ] + rsync_ssh_options + args, get_env(env)) def list_target_files(config): @@ -383,6 +389,36 @@ def list_target_files(config): if p.scheme == "file": return [(fn, os.path.getsize(os.path.join(p.path, fn))) for fn in os.listdir(p.path)] + elif p.scheme == "rsync": + rsync_fn_size_re = re.compile(r'.* ([^ ]*) [^ ]* [^ ]* (.*)') + rsync_target = '{host}:{path}' + + _, target_host, target_path = config['target'].split('//') + target_path = '/' + target_path + if not target_path.endswith('/'): + target_path += '/' + + rsync_command = [ 'rsync', + '-e', + '/usr/bin/ssh -i /root/.ssh/id_rsa_miab -oStrictHostKeyChecking=no -oBatchMode=yes', + '--list-only', + '-r', + rsync_target.format( + host=target_host, + path=target_path) + ] + + code, listing = shell('check_output', rsync_command, trap=True) + if code == 0: + ret = [] + for l in listing.split('\n'): + match = rsync_fn_size_re.match(l) + if match: + ret.append( (match.groups()[1], int(match.groups()[0].replace(',',''))) ) + return ret + else: + raise ValueError("Connection to rsync host failed") + elif p.scheme == "s3": # match to a Region fix_boto() # must call prior to importing boto @@ -425,7 +461,7 @@ def list_target_files(config): def backup_set_custom(env, target, target_user, target_pass, min_age): config = get_backup_config(env, for_save=True) - + # min_age must be an int if isinstance(min_age, str): min_age = int(min_age) @@ -443,11 +479,11 @@ def backup_set_custom(env, target, target_user, target_pass, min_age): list_target_files(config) except ValueError as e: return str(e) - + write_backup_config(env, config) return "OK" - + def get_backup_config(env, for_save=False, for_ui=False): backup_root = os.path.join(env["STORAGE_ROOT"], 'backup') @@ -482,6 +518,9 @@ def get_backup_config(env, for_save=False, for_ui=False): if config["target"] == "local": # Expand to the full URL. config["target"] = "file://" + config["file_target_directory"] + ssh_pub_key = os.path.join('/root', '.ssh', 'id_rsa_miab.pub') + if os.path.exists(ssh_pub_key): + config["ssh_pub_key"] = open(ssh_pub_key, 'r').read() return config diff --git a/management/dns_update.py b/management/dns_update.py index d7bbdfd0..784a5b08 100755 --- a/management/dns_update.py +++ b/management/dns_update.py @@ -348,7 +348,18 @@ def build_sshfp_records(): # like the known_hosts file: hostname, keytype, fingerprint. The order # of the output is arbitrary, so sort it to prevent spurrious updates # to the zone file (that trigger bumping the serial number). - keys = shell("check_output", ["ssh-keyscan", "localhost"]) + + # scan the sshd_config and find the ssh ports (port 22 may be closed) + with open('/etc/ssh/sshd_config', 'r') as f: + ports = [] + t = f.readlines() + for line in t: + s = line.split() + if len(s) == 2 and s[0] == 'Port': + ports = ports + [s[1]] + # the keys are the same at each port, so we only need to get + # them at the first port found (may not be port 22) + keys = shell("check_output", ["ssh-keyscan", "-p", ports[0], "localhost"]) for key in sorted(keys.split("\n")): if key.strip() == "" or key[0] == "#": continue try: diff --git a/management/ssl_certificates.py b/management/ssl_certificates.py index c49443b9..fd1f88c5 100755 --- a/management/ssl_certificates.py +++ b/management/ssl_certificates.py @@ -238,8 +238,22 @@ def get_certificates_to_provision(env, show_extended_problems=True, force_domain except Exception as e: problems[domain] = "DNS isn't configured properly for this domain: DNS lookup had an error: %s." % str(e) return False - if len(response) != 1 or str(response[0]) != value: - problems[domain] = "Domain control validation cannot be performed for this domain because DNS points the domain to another machine (%s %s)." % (rtype, ", ".join(str(r) for r in response)) + + # Unfortunately, the response.__str__ returns bytes + # instead of string, if it resulted from an AAAA-query. + # We need to convert manually, until this is fixed: + # https://github.com/rthalley/dnspython/issues/204 + # + # BEGIN HOTFIX + def rdata__str__(r): + s = r.to_text() + if isinstance(s, bytes): + s = s.decode('utf-8') + return s + # END HOTFIX + + if len(response) != 1 or rdata__str__(response[0]) != value: + problems[domain] = "Domain control validation cannot be performed for this domain because DNS points the domain to another machine (%s %s)." % (rtype, ", ".join(rdata__str__(r) for r in response)) return False return True diff --git a/management/status_checks.py b/management/status_checks.py index 13cbab12..ed1a403e 100755 --- a/management/status_checks.py +++ b/management/status_checks.py @@ -169,8 +169,19 @@ def run_system_checks(rounded_values, env, output): check_free_memory(rounded_values, env, output) def check_ufw(env, output): - ufw = shell('check_output', ['ufw', 'status']).splitlines() + if not os.path.isfile('/usr/sbin/ufw'): + output.print_warning("""The ufw program was not installed. If your system is able to run iptables, rerun the setup.""") + return + code, ufw = shell('check_output', ['ufw', 'status'], trap=True) + + if code != 0: + # The command failed, it's safe to say the firewall is disabled + output.print_warning("""The firewall is not working on this machine. An error was received + while trying to check the firewall. To investigate run 'sudo ufw status'.""") + return + + ufw = ufw.splitlines() if ufw[0] == "Status: active": not_allowed_ports = 0 for service in get_services(): @@ -229,15 +240,15 @@ def check_free_disk_space(rounded_values, env, output): st = os.statvfs(env['STORAGE_ROOT']) bytes_total = st.f_blocks * st.f_frsize bytes_free = st.f_bavail * st.f_frsize - if not rounded_values: - disk_msg = "The disk has %s GB space remaining." % str(round(bytes_free/1024.0/1024.0/1024.0*10.0)/10) - else: - disk_msg = "The disk has less than %s%% space left." % str(round(bytes_free/bytes_total/10 + .5)*10) + disk_msg = "The disk has %.2f GB space remaining." % (bytes_free/1024.0/1024.0/1024.0) if bytes_free > .3 * bytes_total: + if rounded_values: disk_msg = "The disk has more than 30% free space." output.print_ok(disk_msg) elif bytes_free > .15 * bytes_total: + if rounded_values: disk_msg = "The disk has less than 30% free space." output.print_warning(disk_msg) else: + if rounded_values: disk_msg = "The disk has less than 15% free space." output.print_error(disk_msg) def check_free_memory(rounded_values, env, output): @@ -472,7 +483,7 @@ def check_dns_zone(domain, env, output, dns_zonefiles): % (existing_ns, correct_ns) ) # Check that each custom secondary nameserver resolves the IP address. - + if custom_secondary_ns and not probably_external_dns: for ns in custom_secondary_ns: # We must first resolve the nameserver to an IP address so we can query it. @@ -680,6 +691,22 @@ def query_dns(qname, rtype, nxdomain='[Not Set]', at=None): # periods from responses since that's how qnames are encoded in DNS but is # confusing for us. The order of the answers doesn't matter, so sort so we # can compare to a well known order. + + # Unfortunately, the response.__str__ returns bytes + # instead of string, if it resulted from an AAAA-query. + # We need to convert manually, until this is fixed: + # https://github.com/rthalley/dnspython/issues/204 + # + # BEGIN HOTFIX + response_new = [] + for r in response: + if isinstance(r.to_text(), bytes): + response_new.append(r.to_text().decode('utf-8')) + else: + response_new.append(r) + response = response_new + # END HOTFIX + return "; ".join(sorted(str(r).rstrip('.') for r in response)) def check_ssl_cert(domain, rounded_time, ssl_certificates, env, output): @@ -897,7 +924,7 @@ class FileOutput: class ConsoleOutput(FileOutput): def __init__(self): self.buf = sys.stdout - + # Do nice line-wrapping according to the size of the terminal. # The 'stty' program queries standard input for terminal information. if sys.stdin.isatty(): diff --git a/management/templates/aliases.html b/management/templates/aliases.html index bf8e63cd..78556df8 100644 --- a/management/templates/aliases.html +++ b/management/templates/aliases.html @@ -123,7 +123,7 @@ - +
Verb Action
GET(none) Returns a list of existing mail aliases. Adding ?format=json to the URL will give JSON-encoded results.
POST/add Adds a new mail alias. Required POST-body parameters are address and forward_to.
POST/add Adds a new mail alias. Required POST-body parameters are address and forwards_to.
POST/remove Removes a mail alias. Required POST-body parameter is address.
@@ -135,7 +135,7 @@ curl -X GET https://{{hostname}}/admin/mail/aliases?format=json # Adds a new alias -curl -X POST -d "address=new_alias@mydomail.com" -d "forward_to=my_email@mydomain.com" https://{{hostname}}/admin/mail/aliases/add +curl -X POST -d "address=new_alias@mydomail.com" -d "forwards_to=my_email@mydomain.com" https://{{hostname}}/admin/mail/aliases/add # Removes an alias curl -X POST -d "address=new_alias@mydomail.com" https://{{hostname}}/admin/mail/aliases/remove diff --git a/management/templates/custom-dns.html b/management/templates/custom-dns.html index df3d82e5..30b29890 100644 --- a/management/templates/custom-dns.html +++ b/management/templates/custom-dns.html @@ -35,8 +35,8 @@ - - + + @@ -69,7 +69,7 @@

Using a secondary nameserver

If your TLD requires you to have two separate nameservers, you can either set up external DNS and ignore the DNS server on this box entirely, or use the DNS server on this box but add a secondary (aka “slave”) nameserver.

-

If you choose to use a seconday nameserver, you must find a seconday nameserver service provider. Your domain name registrar or virtual cloud provider may provide this service for you. Once you set up the seconday nameserver service, enter the hostname (not the IP address) of their secondary nameserver in the box below.

+

If you choose to use a secondary nameserver, you must find a secondary nameserver service provider. Your domain name registrar or virtual cloud provider may provide this service for you. Once you set up the secondary nameserver service, enter the hostname (not the IP address) of their secondary nameserver in the box below.

diff --git a/management/templates/ssl.html b/management/templates/ssl.html index cefc82fd..0cc4d59a 100644 --- a/management/templates/ssl.html +++ b/management/templates/ssl.html @@ -55,7 +55,7 @@

Install certificate

-

There are many other places where you can get a free or cheap certificate. If you don't want to use our automatic Let's Encrypt integration, you can give Namecheap’s $9 certificate, StartSSL’s free express lane, WoSign’s free TLS or any other certificate provider a try.

+

If you don't want to use our automatic Let's Encrypt integration, you can give any other certificate provider a try. You can generate the needed CSR below.

Which domain are you getting a certificate for?

@@ -108,7 +108,7 @@ function show_tls(keep_provisioning_shown) { $('#ssl_provision_p').toggle(res.can_provision.length > 0); if (res.can_provision.length > 0) $('#ssl_provision_p span').text(res.can_provision.join(", ")); - + $('#ssl_provision_problems_div').toggle(res.cant_provision.length > 0); $('#ssl_provision_problems tbody').text(""); for (var i = 0; i < res.cant_provision.length; i++) { @@ -260,7 +260,7 @@ function provision_tls_cert() { } } ready_to_finish(); - + // don't re-enable the Provision button -- user must use the Retry button when it becomes enabled may_reenable_provision_button = false; @@ -268,7 +268,7 @@ function provision_tls_cert() { n.find("p").addClass("text-success").text("The TLS certificate was provisioned and installed."); setTimeout("show_tls(true)", 1); // update main table of certificate statuses, call with arg keep_provisioning_shown true so that we don't clear what we just outputted } - + // display the detailed log info in case of problems var trace = $("
Log:
"); n.append(trace); diff --git a/management/templates/system-backup.html b/management/templates/system-backup.html index 8fceafe6..0ccb4bd6 100644 --- a/management/templates/system-backup.html +++ b/management/templates/system-backup.html @@ -16,16 +16,60 @@
+
-

Backups are stored on this machine’s own hard disk. You are responsible for periodically using SFTP (FTP over SSH) to copy the backup files from to a safe location. These files are encrypted, so they are safe to store anywhere.

+

Backups are stored on this machine’s own hard disk. You are responsible for periodically using SFTP (FTP over SSH) to copy the backup files from to a safe location. These files are encrypted, so they are safe to store anywhere.

Separately copy the encryption password from to a safe and secure location. You will need this file to decrypt backup files.

+ +
+
+ +

Backups synced to a remote machine using rsync over SSH, with local + copies in . These files are encrypted, so + they are safe to store anywhere.

Separately copy the encryption + password from to a safe and + secure location. You will need this file to decrypt backup files.

+ +
+
+
+ +
+ +
+
+
+ +
+ +
+
+
+ +
+ +
+
+
+ +
+ +
+ Copy the Public SSH Key above, and paste it within the ~/.ssh/authorized_keys + of target user on the backup server specified above. That way you'll enable secure and + passwordless authentication from your mail-in-a-box server and your backup server. +
+
+
+

Backups are stored in an Amazon Web Services S3 bucket. You must have an AWS account already.

@@ -60,7 +104,8 @@
-
+ +
@@ -92,7 +137,7 @@ function toggle_form() { var target_type = $("#backup-target-type").val(); - $(".backup-target-local, .backup-target-s3").hide(); + $(".backup-target-local, .backup-target-rsync, .backup-target-s3").hide(); $(".backup-target-" + target_type).show(); } @@ -114,7 +159,7 @@ function nice_size(bytes) { function show_system_backup() { show_custom_backup() - + $('#backup-status tbody').html("Loading...") api( "/system/backup/status", @@ -160,28 +205,37 @@ function show_system_backup() { } function show_custom_backup() { - $(".backup-target-local, .backup-target-s3").hide(); + $(".backup-target-local, .backup-target-rsync, .backup-target-s3").hide(); api( "/system/backup/config", "GET", { }, function(r) { + $("#backup-target-user").val(r.target_user); + $("#backup-target-pass").val(r.target_pass); + $("#min-age").val(r.min_age_in_days); + $(".backup-location").text(r.file_target_directory); + $(".backup-encpassword-file").text(r.enc_pw_file); + $("#ssh-pub-key").val(r.ssh_pub_key); + if (r.target == "file://" + r.file_target_directory) { $("#backup-target-type").val("local"); } else if (r.target == "off") { $("#backup-target-type").val("off"); + } else if (r.target.substring(0, 8) == "rsync://") { + $("#backup-target-type").val("rsync"); + var path = r.target.substring(8).split('//'); + var host_parts = path.shift().split('@'); + $("#backup-target-rsync-user").val(host_parts[0]); + $("#backup-target-rsync-host").val(host_parts[1]); + $("#backup-target-rsync-path").val('/'+path[0]); } else if (r.target.substring(0, 5) == "s3://") { $("#backup-target-type").val("s3"); - var hostpath = r.target.substring(5).split('/'); + var hostpath = r.target.substring(5).split('/'); var host = hostpath.shift(); $("#backup-target-s3-host").val(host); $("#backup-target-s3-path").val(hostpath.join('/')); } - $("#backup-target-user").val(r.target_user); - $("#backup-target-pass").val(r.target_pass); - $("#min-age").val(r.min_age_in_days); - $('#backup-location').text(r.file_target_directory); - $('.backup-encpassword-file').text(r.enc_pw_file); toggle_form() }) } @@ -190,12 +244,18 @@ function set_custom_backup() { var target_type = $("#backup-target-type").val(); var target_user = $("#backup-target-user").val(); var target_pass = $("#backup-target-pass").val(); - + var target; if (target_type == "local" || target_type == "off") target = target_type; else if (target_type == "s3") target = "s3://" + $("#backup-target-s3-host").val() + "/" + $("#backup-target-s3-path").val(); + else if (target_type == "rsync") { + target = "rsync://" + $("#backup-target-rsync-user").val() + "@" + $("#backup-target-rsync-host").val() + + "/" + $("#backup-target-rsync-path").val(); + target_user = ''; + } + var min_age = $("#min-age").val(); api( diff --git a/setup/bootstrap.sh b/setup/bootstrap.sh index 7d180bfe..d3e7712a 100644 --- a/setup/bootstrap.sh +++ b/setup/bootstrap.sh @@ -7,7 +7,7 @@ ######################################################### if [ -z "$TAG" ]; then - TAG=v0.19b + TAG=v0.20 fi # Are we running as root? diff --git a/setup/mail-postfix.sh b/setup/mail-postfix.sh index d62478e1..ca52edbd 100755 --- a/setup/mail-postfix.sh +++ b/setup/mail-postfix.sh @@ -91,7 +91,8 @@ tools/editconf.py /etc/postfix/main.cf \ # * Give it a different name in syslog to distinguish it from the port 25 smtpd server. # * Add a new cleanup service specific to the submission service ('authclean') # that filters out privacy-sensitive headers on mail being sent out by -# authenticated users. +# authenticated users. By default Postfix also applies this to attached +# emails but we turn this off by setting nested_header_checks empty. tools/editconf.py /etc/postfix/master.cf -s -w \ "submission=inet n - - - - smtpd -o syslog_name=postfix/submission @@ -100,7 +101,8 @@ tools/editconf.py /etc/postfix/master.cf -s -w \ -o smtpd_tls_ciphers=high -o smtpd_tls_exclude_ciphers=aNULL,DES,3DES,MD5,DES+MD5,RC4 -o smtpd_tls_mandatory_protocols=!SSLv2,!SSLv3 -o cleanup_service_name=authclean" \ "authclean=unix n - - - 0 cleanup - -o header_checks=pcre:/etc/postfix/outgoing_mail_header_filters" + -o header_checks=pcre:/etc/postfix/outgoing_mail_header_filters + -o nested_header_checks=" # Install the `outgoing_mail_header_filters` file required by the new 'authclean' service. cp conf/postfix_outgoing_mail_header_filters /etc/postfix/outgoing_mail_header_filters diff --git a/setup/owncloud.sh b/setup/owncloud.sh index 2e590b00..b0a59ade 100755 --- a/setup/owncloud.sh +++ b/setup/owncloud.sh @@ -16,10 +16,6 @@ apt_install \ apt-get purge -qq -y owncloud* -# Install ownCloud from source of this version: -owncloud_ver=8.2.7 -owncloud_hash=723ba3f46dad219109cdf28dcc016fcd8a6bc434 - # Migrate <= v0.10 setups that stored the ownCloud config.php in /usr/local rather than # in STORAGE_ROOT. Move the file to STORAGE_ROOT. if [ ! -f $STORAGE_ROOT/owncloud/config.php ] \ @@ -32,28 +28,34 @@ if [ ! -f $STORAGE_ROOT/owncloud/config.php ] \ ln -sf $STORAGE_ROOT/owncloud/config.php /usr/local/lib/owncloud/config/config.php fi -# Check if ownCloud dir exist, and check if version matches owncloud_ver (if either doesn't - install/upgrade) -if [ ! -d /usr/local/lib/owncloud/ ] \ - || ! grep -q $owncloud_ver /usr/local/lib/owncloud/version.php; then +InstallOwncloud() { + echo + echo "Upgrading to ownCloud version $1" + echo + + version=$1 + hash=$2 + + # Remove the current owncloud + rm -rf /usr/local/lib/owncloud # Download and verify - wget_verify https://download.owncloud.org/community/owncloud-$owncloud_ver.zip $owncloud_hash /tmp/owncloud.zip - - # Clear out the existing ownCloud. - if [ -d /usr/local/lib/owncloud/ ]; then - echo "upgrading ownCloud to $owncloud_ver (backing up existing ownCloud directory to /tmp/owncloud-backup-$$)..." - mv /usr/local/lib/owncloud /tmp/owncloud-backup-$$ - fi + wget_verify https://download.owncloud.org/community/owncloud-$version.zip $hash /tmp/owncloud.zip # Extract ownCloud - unzip -u -o -q /tmp/owncloud.zip -d /usr/local/lib #either extracts new or replaces current files + unzip -q /tmp/owncloud.zip -d /usr/local/lib rm -f /tmp/owncloud.zip - # The two apps we actually want are not in ownCloud core. Clone them from + # The two apps we actually want are not in ownCloud core. Download the releases from # their github repositories. mkdir -p /usr/local/lib/owncloud/apps - git_clone https://github.com/owncloudarchive/contacts 9ba2e667ae8c7ea36d8c4a4c3413c374beb24b1b '' /usr/local/lib/owncloud/apps/contacts - git_clone https://github.com/owncloudarchive/calendar 2086e738a3b7b868ec59cd61f0f88b49c3f21dd1 '' /usr/local/lib/owncloud/apps/calendar + wget_verify https://github.com/owncloud/contacts/releases/download/v1.4.0.0/contacts.tar.gz c1c22d29699456a45db447281682e8bc3f10e3e7 /tmp/contacts.tgz + tar xf /tmp/contacts.tgz -C /usr/local/lib/owncloud/apps/ + rm /tmp/contacts.tgz + + wget_verify https://github.com/nextcloud/calendar/releases/download/v1.4.0/calendar.tar.gz c84f3170efca2a99ea6254de34b0af3cb0b3a821 /tmp/calendar.tgz + tar xf /tmp/calendar.tgz -C /usr/local/lib/owncloud/apps/ + rm /tmp/calendar.tgz # Fix weird permissions. chmod 750 /usr/local/lib/owncloud/{apps,config} @@ -69,7 +71,7 @@ if [ ! -d /usr/local/lib/owncloud/ ] \ # If this isn't a new installation, immediately run the upgrade script. # Then check for success (0=ok and 3=no upgrade needed, both are success). - if [ -f $STORAGE_ROOT/owncloud/owncloud.db ]; then + if [ -e $STORAGE_ROOT/owncloud/owncloud.db ]; then # ownCloud 8.1.1 broke upgrades. It may fail on the first attempt, but # that can be OK. sudo -u www-data php /usr/local/lib/owncloud/occ upgrade @@ -81,6 +83,76 @@ if [ ! -d /usr/local/lib/owncloud/ ] \ echo "...which seemed to work." fi fi +} + +owncloud_ver=9.1.1 + +# Check if ownCloud dir exist, and check if version matches owncloud_ver (if either doesn't - install/upgrade) +if [ ! -d /usr/local/lib/owncloud/ ] \ + || ! grep -q $owncloud_ver /usr/local/lib/owncloud/version.php; then + + # Stop php-fpm + hide_output service php5-fpm stop + + # Backup the existing ownCloud. + # Create a backup directory to store the current installation and database to + BACKUP_DIRECTORY=$STORAGE_ROOT/owncloud-backup/`date +"%Y-%m-%d-%T"` + mkdir -p "$BACKUP_DIRECTORY" + if [ -d /usr/local/lib/owncloud/ ]; then + echo "upgrading ownCloud to $owncloud_ver (backing up existing ownCloud installation, configuration and database to directory to $BACKUP_DIRECTORY..." + cp -r /usr/local/lib/owncloud "$BACKUP_DIRECTORY/owncloud-install" + fi + if [ -e /home/user-data/owncloud/owncloud.db ]; then + cp /home/user-data/owncloud/owncloud.db $BACKUP_DIRECTORY + fi + if [ -e /home/user-data/owncloud/config.php ]; then + cp /home/user-data/owncloud/config.php $BACKUP_DIRECTORY + fi + + # We only need to check if we do upgrades when owncloud was previously installed + if [ -e /usr/local/lib/owncloud/version.php ]; then + if grep -q "8.1.[0-9]" /usr/local/lib/owncloud/version.php; then + echo "We are running 8.1.x, upgrading to 8.2.3 first" + InstallOwncloud 8.2.3 bfdf6166fbf6fc5438dc358600e7239d1c970613 + fi + + # If we are upgrading from 8.2.x we should go to 9.0 first. Owncloud doesn't support skipping minor versions + if grep -q "8.2.[0-9]" /usr/local/lib/owncloud/version.php; then + echo "We are running version 8.2.x, upgrading to 9.0.2 first" + + # We need to disable memcached. The upgrade and install fails + # with memcached + CONFIG_TEMP=$(/bin/mktemp) + php < $CONFIG_TEMP && mv $CONFIG_TEMP $STORAGE_ROOT/owncloud/config.php; + +EOF + chown www-data.www-data $STORAGE_ROOT/owncloud/config.php + + # We can now install owncloud 9.0.2 + InstallOwncloud 9.0.2 72a3d15d09f58c06fa8bee48b9e60c9cd356f9c5 + + # The owncloud 9 migration doesn't migrate calendars and contacts + # The option to migrate these are removed in 9.1 + # So the migrations should be done when we have 9.0 installed + sudo -u www-data php /usr/local/lib/owncloud/occ dav:migrate-addressbooks + # The following migration has to be done for each owncloud user + for directory in $STORAGE_ROOT/owncloud/*@*/ ; do + username=$(basename "${directory}") + sudo -u www-data php /usr/local/lib/owncloud/occ dav:migrate-calendar $username + done + sudo -u www-data php /usr/local/lib/owncloud/occ dav:sync-birthday-calendar + fi + fi + + InstallOwncloud $owncloud_ver 72ed9812432f01b3a459c4afc33f5c76b71eec09 fi # ### Configuring ownCloud @@ -110,10 +182,7 @@ if [ ! -f $STORAGE_ROOT/owncloud/owncloud.db ]; then 'arguments'=>array('{127.0.0.1:993/imap/ssl/novalidate-cert}') ) ), - 'memcache.local' => '\\OC\\Memcache\\Memcached', - "memcached_servers" => array ( - array('127.0.0.1', 11211), - ), + 'memcache.local' => '\OC\Memcache\APC', 'mail_smtpmode' => 'sendmail', 'mail_smtpsecure' => '', 'mail_smtpauthtype' => 'LOGIN', @@ -173,7 +242,7 @@ include("$STORAGE_ROOT/owncloud/config.php"); \$CONFIG['trusted_domains'] = array('$PRIMARY_HOSTNAME'); -\$CONFIG['memcache.local'] = '\\OC\\Memcache\\Memcached'; +\$CONFIG['memcache.local'] = '\OC\Memcache\APC'; \$CONFIG['overwrite.cli.url'] = '/cloud'; \$CONFIG['mail_from_address'] = 'administrator'; # just the local part, matches our master administrator address @@ -212,6 +281,12 @@ tools/editconf.py /etc/php5/fpm/php.ini -c ';' \ max_execution_time=600 \ short_open_tag=On +# If apc is explicitly disabled we need to enable it +if grep -q apc.enabled=0 /etc/php5/mods-available/apcu.ini; then + tools/editconf.py /etc/php5/mods-available/apcu.ini -c ';' \ + apc.enabled=1 +fi + # Set up a cron job for owncloud. cat > /etc/cron.hourly/mailinabox-owncloud << EOF; #!/bin/bash diff --git a/setup/preflight.sh b/setup/preflight.sh index 4f73fffc..4be2ec41 100644 --- a/setup/preflight.sh +++ b/setup/preflight.sh @@ -19,20 +19,26 @@ fi # Check that we have enough memory. # -# /proc/meminfo reports free memory in kibibytes. Our baseline will be 768 MB, -# which is 750000 kibibytes. +# /proc/meminfo reports free memory in kibibytes. Our baseline will be 512 MB, +# which is 500000 kibibytes. +# +# We will display a warning if the memory is below 768 MB which is 750000 kibibytes # # Skip the check if we appear to be running inside of Vagrant, because that's really just for testing. TOTAL_PHYSICAL_MEM=$(head -n 1 /proc/meminfo | awk '{print $2}') -if [ $TOTAL_PHYSICAL_MEM -lt 750000 ]; then +if [ $TOTAL_PHYSICAL_MEM -lt 500000 ]; then if [ ! -d /vagrant ]; then TOTAL_PHYSICAL_MEM=$(expr \( \( $TOTAL_PHYSICAL_MEM \* 1024 \) / 1000 \) / 1000) echo "Your Mail-in-a-Box needs more memory (RAM) to function properly." - echo "Please provision a machine with at least 768 MB, 1 GB recommended." + echo "Please provision a machine with at least 512 MB, 1 GB recommended." echo "This machine has $TOTAL_PHYSICAL_MEM MB memory." exit fi fi +if [ $TOTAL_PHYSICAL_MEM -lt 750000 ]; then + echo "WARNING: Your Mail-in-a-Box has less than 768 MB of memory." + echo " It might run unreliably when under heavy load." +fi # Check that tempfs is mounted with exec MOUNTED_TMP_AS_NO_EXEC=$(grep "/tmp.*noexec" /proc/mounts) diff --git a/setup/spamassassin.sh b/setup/spamassassin.sh index ca264c0d..6c29fe8e 100755 --- a/setup/spamassassin.sh +++ b/setup/spamassassin.sh @@ -84,7 +84,7 @@ tools/editconf.py /etc/spamassassin/local.cf -s \ tools/editconf.py /etc/spamassassin/local.cf -s \ bayes_path=$STORAGE_ROOT/mail/spamassassin/bayes \ - bayes_file_mode=0660 + bayes_file_mode=0666 mkdir -p $STORAGE_ROOT/mail/spamassassin chown -R spampd:spampd $STORAGE_ROOT/mail/spamassassin diff --git a/setup/system.sh b/setup/system.sh index 293ac68d..53fae9d6 100755 --- a/setup/system.sh +++ b/setup/system.sh @@ -119,6 +119,12 @@ apt_install python3 python3-dev python3-pip \ haveged pollinate \ unattended-upgrades cron ntp fail2ban +# ### Suppress Upgrade Prompts +# Since Mail-in-a-Box might jump straight to 18.04 LTS, there's no need +# to be reminded about 16.04 on every login. +tools/editconf.py /etc/update-manager/release-upgrades Prompt=never +rm -f /var/lib/ubuntu-release-upgrader/release-upgrade-available + # ### Set the system timezone # # Some systems are missing /etc/timezone, which we cat into the configs for @@ -208,6 +214,12 @@ pollinate -q -r # Between these two, we really ought to be all set. +# We need an ssh key to store backups via rsync, if it doesn't exist create one +if [ ! -f /root/.ssh/id_rsa_miab ]; then + echo 'Creating SSH key for backup…' + ssh-keygen -t rsa -b 2048 -a 100 -f /root/.ssh/id_rsa_miab -N '' -q +fi + # ### Package maintenance # # Allow apt to install system updates automatically every day. diff --git a/setup/zpush.sh b/setup/zpush.sh index 33aadc0c..5cc12c51 100755 --- a/setup/zpush.sh +++ b/setup/zpush.sh @@ -53,6 +53,7 @@ cp conf/zpush/backend_combined.php /usr/local/lib/z-push/backend/combined/config # Configure IMAP rm -f /usr/local/lib/z-push/backend/imap/config.php cp conf/zpush/backend_imap.php /usr/local/lib/z-push/backend/imap/config.php +sed -i "s%STORAGE_ROOT%$STORAGE_ROOT%" /usr/local/lib/z-push/backend/imap/config.php # Configure CardDav rm -f /usr/local/lib/z-push/backend/carddav/config.php diff --git a/tests/fail2ban.py b/tests/fail2ban.py index fb74e706..0a3c1da4 100644 --- a/tests/fail2ban.py +++ b/tests/fail2ban.py @@ -10,11 +10,11 @@ import sys, os, time, functools # parse command line -if len(sys.argv) != 3: - print("Usage: tests/fail2ban.py \"ssh user@hostname\" hostname") +if len(sys.argv) != 4: + print("Usage: tests/fail2ban.py \"ssh user@hostname\" hostname owncloud_user") sys.exit(1) -ssh_command, hostname = sys.argv[1:3] +ssh_command, hostname, owncloud_user = sys.argv[1:4] # define some test types @@ -68,6 +68,28 @@ def imap_test(): finally: M.logout() # shuts down connection, has nothing to do with login() + +def pop_test(): + import poplib + try: + M = poplib.POP3_SSL(hostname) + except ConnectionRefusedError: + # looks like fail2ban worked + raise IsBlocked() + try: + M.user('fakeuser') + try: + M.pass_('fakepassword') + except poplib.error_proto as e: + # Authentication should fail. + M = None # don't .quit() + return + M.list() + raise Exception("authentication didn't fail") + finally: + if M: + M.quit() + def http_test(url, expected_status, postdata=None, qsargs=None, auth=None): import urllib.parse import requests @@ -183,6 +205,9 @@ if __name__ == "__main__": # IMAP run_test(imap_test, [], 20, 30, 4) + # POP + run_test(pop_test, [], 20, 30, 4) + # Mail-in-a-Box control panel run_test(http_test, ["/admin/me", 200], 20, 30, 1) @@ -190,7 +215,7 @@ if __name__ == "__main__": run_test(http_test, ["/admin/munin/", 401], 20, 30, 1) # ownCloud - run_test(http_test, ["/cloud/remote.php/webdav", 401, None, None, ["aa", "aa"]], 20, 120, 1) + run_test(http_test, ["/cloud/remote.php/webdav", 401, None, None, [owncloud_user, "aa"]], 20, 120, 1) # restart fail2ban so that this client machine is no longer blocked restart_fail2ban_service(final=True) diff --git a/tools/owncloud-restore.sh b/tools/owncloud-restore.sh new file mode 100755 index 00000000..0e6a0756 --- /dev/null +++ b/tools/owncloud-restore.sh @@ -0,0 +1,49 @@ +#!/bin/bash +# +# This script will restore the backup made during an installation +source /etc/mailinabox.conf # load global vars + +if [ -z "$1" ]; then + echo "Usage: owncloud-restore.sh " + echo + echo "WARNING: This will restore the database to the point of the installation!" + echo " This means that you will lose all changes made by users after that point" + echo + echo + echo "Backups are stored here: $STORAGE_ROOT/owncloud-backup/" + echo + echo "Available backups:" + echo + find $STORAGE_ROOT/owncloud-backup/* -maxdepth 0 -type d + echo + echo "Supply the directory that was created during the last installation as the only commandline argument" + exit +fi + +if [ ! -f $1/config.php ]; then + echo "This isn't a valid backup location" + exit +fi + +echo "Restoring backup from $1" +service php5-fpm stop + +# remove the current owncloud installation +rm -rf /usr/local/lib/owncloud/ +# restore the current owncloud application +cp -r "$1/owncloud-install" /usr/local/lib/owncloud + +# restore access rights +chmod 750 /usr/local/lib/owncloud/{apps,config} + +cp "$1/owncloud.db" $STORAGE_ROOT/owncloud/ +cp "$1/config.php" $STORAGE_ROOT/owncloud/ + +ln -sf $STORAGE_ROOT/owncloud/config.php /usr/local/lib/owncloud/config/config.php +chown -f -R www-data.www-data $STORAGE_ROOT/owncloud /usr/local/lib/owncloud +chown www-data.www-data $STORAGE_ROOT/owncloud/config.php + +sudo -u www-data php /usr/local/lib/owncloud/occ maintenance:mode --off + +service php5-fpm start +echo "Done"