mirror of
https://github.com/mail-in-a-box/mailinabox.git
synced 2026-03-13 17:17:23 +01:00
Compare commits
228 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
23d2df7a93 | ||
|
|
1cd97d46a2 | ||
|
|
53f84a8092 | ||
|
|
6441de63ba | ||
|
|
b2553aea33 | ||
|
|
5ef1cfbdc7 | ||
|
|
7527b4dc27 | ||
|
|
1367816b04 | ||
|
|
299a2315c1 | ||
|
|
9a6aea6940 | ||
|
|
98cd04cccf | ||
|
|
0cc20cbb97 | ||
|
|
ef6a17d4a6 | ||
|
|
17a149947a | ||
|
|
a2c50ae967 | ||
|
|
13958ba4df | ||
|
|
8eb71483f3 | ||
|
|
d8e30883fa | ||
|
|
47acbbf332 | ||
|
|
dece359c90 | ||
|
|
6a9eb4e367 | ||
|
|
fc03ce9b2f | ||
|
|
ce17c12ca2 | ||
|
|
5edaeb8c7b | ||
|
|
3a28d1b073 | ||
|
|
6f2226bfcd | ||
|
|
97cd4c64ad | ||
|
|
43d50d0667 | ||
|
|
6258a7f311 | ||
|
|
ab36cc8968 | ||
|
|
33b71c6b3c | ||
|
|
34e821c102 | ||
|
|
2af557139d | ||
|
|
9e0dcd8718 | ||
|
|
be2b5a62de | ||
|
|
0cbba71c72 | ||
|
|
d28563fb45 | ||
|
|
38632f0f90 | ||
|
|
0754ce01b1 | ||
|
|
1ef455d37d | ||
|
|
d152603abd | ||
|
|
9e125aec00 | ||
|
|
2c90c267bd | ||
|
|
47de93961e | ||
|
|
1990f32ca4 | ||
|
|
807939c0e4 | ||
|
|
a1c7bf0883 | ||
|
|
5008cc603e | ||
|
|
9857db96cd | ||
|
|
e9e6d94e3b | ||
|
|
462a79cf47 | ||
|
|
f792deeebd | ||
|
|
95173bb327 | ||
|
|
1d09e2406b | ||
|
|
c9add7a8bf | ||
|
|
e4caed9277 | ||
|
|
1760eaa601 | ||
|
|
b25ce67fe1 | ||
|
|
b23ba6f75e | ||
|
|
cf904a05cc | ||
|
|
47a5a44b9e | ||
|
|
a0e6c7ceb6 | ||
|
|
49aa367ffa | ||
|
|
83b36f2c3a | ||
|
|
2b341d884f | ||
|
|
141a09b31e | ||
|
|
6378ec4bbd | ||
|
|
603fb1c698 | ||
|
|
67b4ea947b | ||
|
|
4075b7c78a | ||
|
|
6499eba0cb | ||
|
|
980626aa40 | ||
|
|
3f329bc1a8 | ||
|
|
69de67b1c2 | ||
|
|
7158f9a8d9 | ||
|
|
bb75bd7167 | ||
|
|
4fa58169f1 | ||
|
|
564040897f | ||
|
|
f78bbab289 | ||
|
|
d3c82d7363 | ||
|
|
7b9b978a6d | ||
|
|
45d47818ca | ||
|
|
202c4a948b | ||
|
|
b5269bb28e | ||
|
|
d6c5f09a1a | ||
|
|
11546b97bb | ||
|
|
cac6a251cc | ||
|
|
31d26a7bad | ||
|
|
a9ed9ae936 | ||
|
|
a9892efe38 | ||
|
|
f02e0a3ccb | ||
|
|
d6f26609fc | ||
|
|
05438d047d | ||
|
|
32f5632620 | ||
|
|
005cc08b40 | ||
|
|
6a659fe10d | ||
|
|
6941ca2f63 | ||
|
|
e4eba49c1b | ||
|
|
f289439d1d | ||
|
|
a5ef64919a | ||
|
|
e132125cf3 | ||
|
|
01b5512ac7 | ||
|
|
a0c7e63d78 | ||
|
|
8ba5f2ffa7 | ||
|
|
2c44333679 | ||
|
|
610be9cf17 | ||
|
|
eb5e8fe388 | ||
|
|
c999c6082f | ||
|
|
3b86b3fe66 | ||
|
|
0a71dca825 | ||
|
|
4f98d470a0 | ||
|
|
57abae3999 | ||
|
|
202e49a897 | ||
|
|
13093f1732 | ||
|
|
837d327c1e | ||
|
|
e39b777abc | ||
|
|
7ca42489ae | ||
|
|
8c6363f792 | ||
|
|
cbb7f29f96 | ||
|
|
8886c9b6bc | ||
|
|
a07de38e80 | ||
|
|
1f08997a9e | ||
|
|
1b2d07d81d | ||
|
|
fc32cf5bcc | ||
|
|
ce94ef38b2 | ||
|
|
1e9c587b92 | ||
|
|
9f1d633ae4 | ||
|
|
f01189631a | ||
|
|
542877ee46 | ||
|
|
f1760b516d | ||
|
|
f0143fd6c9 | ||
|
|
febfa72d60 | ||
|
|
c03e00035f | ||
|
|
2f8866ef32 | ||
|
|
f98afac6df | ||
|
|
5efd5abbe4 | ||
|
|
6bb8f5d889 | ||
|
|
35f4a49d10 | ||
|
|
a31d713fcc | ||
|
|
6f38f7afc3 | ||
|
|
e514ca0009 | ||
|
|
8c3aed2846 | ||
|
|
2cab9d5514 | ||
|
|
c38bdbb0c5 | ||
|
|
2a1704a0dc | ||
|
|
36168b4609 | ||
|
|
bd498def76 | ||
|
|
d8279c48ac | ||
|
|
4232245546 | ||
|
|
2d1186e55d | ||
|
|
072aeca1be | ||
|
|
cb656f9ef4 | ||
|
|
322a5779f1 | ||
|
|
e41df28bf2 | ||
|
|
d3239b49ce | ||
|
|
d11be61d94 | ||
|
|
916063a79b | ||
|
|
5aa0bf2d14 | ||
|
|
f3ad6b4acc | ||
|
|
ec039719de | ||
|
|
6989df0af3 | ||
|
|
14b16b2f36 | ||
|
|
cbc7e280d6 | ||
|
|
f4fa9c93a0 | ||
|
|
6c64723d7c | ||
|
|
3d21f2223e | ||
|
|
710a69b812 | ||
|
|
dd6a8d9998 | ||
|
|
9f32e5af0a | ||
|
|
298e19598b | ||
|
|
680191d7cb | ||
|
|
81d6d69b85 | ||
|
|
6df72bf4ac | ||
|
|
01f2451349 | ||
|
|
dcd971d079 | ||
|
|
4d22fb9b2a | ||
|
|
c18d58b13f | ||
|
|
b539c2df70 | ||
|
|
64fdb4ddc1 | ||
|
|
a8669197dd | ||
|
|
2412c92772 | ||
|
|
7c0ca42145 | ||
|
|
c443524ee2 | ||
|
|
e2fa01e0cf | ||
|
|
6558f05d1d | ||
|
|
1be0f39be0 | ||
|
|
d01001f2a5 | ||
|
|
7c85694d60 | ||
|
|
b2fcd4c9e5 | ||
|
|
ba8123f08a | ||
|
|
e2879a8eb1 | ||
|
|
eab8652225 | ||
|
|
ead6f96513 | ||
|
|
7ec662c83f | ||
|
|
348d2b8701 | ||
|
|
12f0dcb23b | ||
|
|
449a538e6b | ||
|
|
3c50c9a18b | ||
|
|
3c10ec70a5 | ||
|
|
1a59f343c0 | ||
|
|
fba4d4702e | ||
|
|
143bbf37f4 | ||
|
|
fd3ad267ba | ||
|
|
330583f71d | ||
|
|
d775f90f0c | ||
|
|
e096144713 | ||
|
|
7ce30ba888 | ||
|
|
6a3ec1d874 | ||
|
|
575d3a66c6 | ||
|
|
cc333b3965 | ||
|
|
351758b3bd | ||
|
|
94053d8432 | ||
|
|
e14b2826e0 | ||
|
|
150611123a | ||
|
|
abfc17ee62 | ||
|
|
97be9c94b9 | ||
|
|
21b00e8fbb | ||
|
|
01636c2e4b | ||
|
|
005315cd29 | ||
|
|
20d20df829 | ||
|
|
f945a1bc6b | ||
|
|
3a09b04786 | ||
|
|
82e752395b | ||
|
|
e330abd587 | ||
|
|
16422b4055 | ||
|
|
b9ca74c915 | ||
|
|
f78cff225b | ||
|
|
31eec9fa1c |
152
CHANGELOG.md
152
CHANGELOG.md
@@ -1,24 +1,158 @@
|
|||||||
CHANGELOG
|
CHANGELOG
|
||||||
=========
|
=========
|
||||||
|
|
||||||
Development
|
v0.11 (June 29, 2015)
|
||||||
-----------
|
---------------------
|
||||||
|
|
||||||
|
Advisories:
|
||||||
|
* Users can no longer spoof arbitrary email addresses in outbound mail. When sending mail, the email address configured in your mail client must match the SMTP login username being used, or the email address must be an alias with the SMTP login username listed as one of the alias's targets.
|
||||||
|
* This update replaces your DKIM signing key with a stronger key. Because of DNS caching/propagation, mail sent within a few hours after this update could be marked as spam by recipients. If you use External DNS, you will need to update your DNS records.
|
||||||
|
* The box will now install software from a new Mail-in-a-Box PPA on Launchpad.net, where we are distributing two of our own packages: a patched postgrey and dovecot-lucene.
|
||||||
|
|
||||||
|
Mail:
|
||||||
|
* Greylisting will now let some reputable senders pass through immediately.
|
||||||
|
* Searching mail (via IMAP) will now be much faster using the dovecot lucene full text search plugin.
|
||||||
|
* Users can no longer spoof arbitrary email addresses in outbound mail (see above).
|
||||||
|
* Fix for deleting admin@ and postmaster@ addresses.
|
||||||
|
* Roundcube is updated to version 1.1.2, plugins updated.
|
||||||
|
* Exchange/ActiveSync autoconfiguration was not working on all devices (e.g. iPhone) because of a case-sensitive URL.
|
||||||
|
* The DKIM signing key has been increased to 2048 bits, from 1024, replacing the existing key.
|
||||||
|
|
||||||
|
Web:
|
||||||
|
* 'www' subdomains now automatically redirect to their parent domain (but you'll need to install an SSL certificate).
|
||||||
|
* OCSP no longer uses Google Public DNS.
|
||||||
|
* The installed PHP version is no longer exposed through HTTP response headers, for better security.
|
||||||
|
|
||||||
|
DNS:
|
||||||
|
* Default IPv6 AAAA records were missing since version 0.09.
|
||||||
|
|
||||||
|
Control panel:
|
||||||
|
* Resetting a user's password now forces them to log in again everywhere.
|
||||||
|
* Status checks were not working if an ssh server was not installed.
|
||||||
|
* SSL certificate validation now uses the Python cryptography module in some places where openssl was used.
|
||||||
|
* There is a new tab to show the installed version of Mail-in-a-Box and to fetch the latest released version.
|
||||||
|
|
||||||
|
System:
|
||||||
|
* The munin system monitoring tool is now installed and accessible at /admin/munin.
|
||||||
|
* ownCloud updated to version 8.0.4. The ownCloud installation step now is reslient to download problems. The ownCloud configuration file is now stored in STORAGE_ROOT to fix loss of data when moving STORAGE_ROOT to a new machine.
|
||||||
|
* The setup scripts now run `apt-get update` prior to installing anything to ensure the apt database is in sync with the packages actually available.
|
||||||
|
|
||||||
|
|
||||||
|
v0.10 (June 1, 2015)
|
||||||
|
--------------------
|
||||||
|
|
||||||
|
* SMTP Submission (port 587) began offering the insecure SSLv3 protocol due to a misconfiguration in the previous version.
|
||||||
|
* Roundcube now allows persistent logins using Roundcube-Persistent-Login-Plugin.
|
||||||
|
* ownCloud is updated to version 8.0.3.
|
||||||
|
* SPF records for non-mail domains were tightened.
|
||||||
|
* The minimum greylisting delay has been reduced from 5 minutes to 3 minutes.
|
||||||
|
* Users and aliases weren't working if they were entered with any uppercase letters. Now only lowercase is allowed.
|
||||||
|
* After installing an SSL certificate from the control panel, the page wasn't being refreshed.
|
||||||
|
* Backups broke if the box's hostname was changed after installation.
|
||||||
|
* Dotfiles (i.e. .svn) stored in ownCloud Files were not accessible from ownCloud's mobile/desktop clients.
|
||||||
|
* Fix broken install on OVH VPS's.
|
||||||
|
|
||||||
|
|
||||||
|
v0.09 (May 8, 2015)
|
||||||
|
-------------------
|
||||||
|
|
||||||
|
Mail:
|
||||||
|
|
||||||
|
* Spam checking is now performed on messages larger than the previous limit of 64KB.
|
||||||
|
* POP3S is now enabled (port 995).
|
||||||
|
* Roundcube is updated to version 1.1.1.
|
||||||
|
* Minor security improvements (more mail headers with user agent info are anonymized; crypto settings were tightened).
|
||||||
|
|
||||||
|
ownCloud:
|
||||||
|
|
||||||
|
* Downloading files you uploaded to ownCloud broke because of a change in ownCloud 8.
|
||||||
|
|
||||||
|
DNS:
|
||||||
|
|
||||||
|
* Internationalized Domain Names (IDNs) should now work in email. If you had custom DNS or custom web settings for internationalized domains, check that they are still working.
|
||||||
|
* It is now possible to set multiple TXT and other types of records on the same domain in the control panel.
|
||||||
|
* The custom DNS API was completely rewritten to support setting multiple records of the same type on a domain. Any existing client code using the DNS API will have to be rewritten. (Existing code will just get 404s back.)
|
||||||
|
* On some systems the `nsd` service failed to start if network inferfaces were not ready.
|
||||||
|
|
||||||
|
System / Control Panel:
|
||||||
|
|
||||||
|
* In order to guard against misconfiguration that can lead to domain control validation hijacking, email addresses that begin with admin, administrator, postmaster, hostmaster, and webmaster can no longer be used for (new) mail user accounts, and aliases for these addresses may direct mail only to the box's administrator(s).
|
||||||
|
* Backups now use duplicity's built-in gpg symmetric AES256 encryption rather than my home-brewed encryption. Old backups will be incorporated inside the first backup after this update but then deleted from disk (i.e. your backups from the previous few days will be backed up).
|
||||||
|
* There was a race condition between backups and the new nightly status checks.
|
||||||
|
* The control panel would sometimes lock up with an unnecessary loading indicator.
|
||||||
|
* You can no longer delete your own account from the control panel.
|
||||||
|
|
||||||
|
Setup:
|
||||||
|
|
||||||
|
* All Mail-in-a-Box release tags are now signed on github, instructions for verifying the signature are added to the README, and the integrity of some packages downloaded during setup is now verified against a SHA1 hash stored in the tag itself.
|
||||||
|
* Bugs in first user account creation were fixed.
|
||||||
|
|
||||||
|
v0.08 (April 1, 2015)
|
||||||
|
---------------------
|
||||||
|
|
||||||
|
Mail:
|
||||||
|
|
||||||
|
* The Roundcube vacation_sieve plugin by @arodier is now installed to make it easier to set vacation auto-reply messages from within Roundcube.
|
||||||
|
* Authentication-Results headers for DMARC, added in v0.07, were mistakenly added for outbound mail --- that's now removed.
|
||||||
|
* The Trash folder is now created automatically for new mail accounts, addressing a Roundcube error.
|
||||||
|
|
||||||
|
DNS:
|
||||||
|
|
||||||
|
* Custom DNS TXT records were not always working and they can now override the default SPF, DKIM, and DMARC records.
|
||||||
|
|
||||||
|
System:
|
||||||
|
|
||||||
|
* ownCloud updated to version 8.0.2.
|
||||||
|
* Brute-force SSH and IMAP login attempts are now prevented by properly configuring fail2ban.
|
||||||
|
* Status checks are run each night and any changes from night to night are emailed to the box administrator (the first user account).
|
||||||
|
|
||||||
|
Control panel:
|
||||||
|
|
||||||
|
* The new check that system services are running mistakenly checked that the Dovecot Managesieve service is publicly accessible. Although the service binds to the public network interface we don't open the port in ufw. On some machines it seems that ufw blocks the connection from the status checks (which seems correct) and on some machines (mine) it doesn't, which is why I didn't notice the problem.
|
||||||
|
* The current backup chain will now try to predict how many days until it is deleted (always at least 3 days after the next full backup).
|
||||||
|
* The list of aliases that forward to a user are removed from the Mail Users page because when there are many alises it is slow and times-out.
|
||||||
|
* Some status check errors are turned into warnings, especially those that might not apply if External DNS is used.
|
||||||
|
|
||||||
|
v0.07 (February 28, 2015)
|
||||||
|
-------------------------
|
||||||
|
|
||||||
|
Mail:
|
||||||
|
|
||||||
|
* If the box manages mail for a domain and a subdomain of that domain, outbound mail from the subdomain was not DKIM-signed and would therefore fail DMARC tests on the receiving end, possibly result in the mail heading into spam folders.
|
||||||
|
* Auto-configuration for Mozilla Thunderbird, Evolution, KMail, and Kontact is now available.
|
||||||
|
* Domains that only have a catch-all alias or domain alias no longer automatically create/require admin@ and postmaster@ addresses since they'll forward anyway.
|
||||||
|
* Roundcube is updated to version 1.1.0.
|
||||||
|
* Authentication-Results headers for DMARC are now added to incoming mail.
|
||||||
|
|
||||||
|
DNS:
|
||||||
|
|
||||||
|
* If a custom CNAME record is set on a 'www' subdomain, the default A/AAAA records were preventing the CNAME from working.
|
||||||
|
* If a custom DNS A record overrides one provided by the box, the a corresponding default IPv6 record by the box is removed since it will probably be incorrect.
|
||||||
|
* Internationalized domain names (IDNs) are now supported for DNS and web, but email is not yet tested.
|
||||||
|
|
||||||
|
Web:
|
||||||
|
|
||||||
|
* Static websites now deny access to certain dot (.) files and directories which typically have sensitive info: .ht*, .svn*, .git*, .hg*, .bzr*.
|
||||||
|
* The nginx server no longer reports its version and OS for better privacy.
|
||||||
|
* The HTTP->HTTPS redirect is now more efficient.
|
||||||
|
* When serving a 'www.' domain, reuse the SSL certificate for the parent domain if it covers the 'www' subdomain too
|
||||||
|
* If a custom DNS CNAME record is set on a domain, don't offer to put a website on that domain. (Same logic already applies to custom A/AAAA records.)
|
||||||
|
|
||||||
Control panel:
|
Control panel:
|
||||||
|
|
||||||
* Status checks now check that system services are actually running by pinging each port that should have something running on it.
|
* Status checks now check that system services are actually running by pinging each port that should have something running on it.
|
||||||
* If a custom CNAME record is set on a 'www' subdomain, the default A/AAAA records were preventing the CNAME from working.
|
* The status checks are now parallelized so they may be a little faster.
|
||||||
|
* The status check for MX records now allow any priority, in case an unusual setup is required.
|
||||||
|
* The interface for setting website domain-specific directories is simplified.
|
||||||
|
* The mail guide now says that to use Outlook, Outlook 2007 or later on Windows 7 and later is required.
|
||||||
|
* External DNS settings now skip the special "_secondary_nameserver" key which is used for storing secondary NS information.
|
||||||
|
|
||||||
Setup:
|
Setup:
|
||||||
|
|
||||||
* Install cron if it isn't already installed.
|
* Install cron if it isn't already installed.
|
||||||
* Fix a units problem in the minimum memory check.
|
* Fix a units problem in the minimum memory check.
|
||||||
|
* If you override the STORAGE_ROOT, your setting will now persist if you re-run setup.
|
||||||
Miscellaneous:
|
* Hangs due to apt wanting the user to resolve a conflict should now be fixed (apt will just clobber the problematic file now).
|
||||||
|
|
||||||
* Internationalized domain names (IDNs) are now supported for DNS and web, but email is not yet tested.
|
|
||||||
* Domains that only have a catch-all alias or domain alias no longer automatically create/require admin@ and postmaster@ addresses since they'll forward anyway.
|
|
||||||
|
|
||||||
|
|
||||||
v0.06 (January 4, 2015)
|
v0.06 (January 4, 2015)
|
||||||
-----------------------
|
-----------------------
|
||||||
|
|||||||
56
README.md
56
README.md
@@ -14,19 +14,57 @@ I am trying to:
|
|||||||
* Make deploying a good mail server easy.
|
* Make deploying a good mail server easy.
|
||||||
* Promote [decentralization](http://redecentralize.org/), innovation, and privacy on the web.
|
* Promote [decentralization](http://redecentralize.org/), innovation, and privacy on the web.
|
||||||
* Have automated, auditable, and [idempotent](http://sharknet.us/2014/02/01/automated-configuration-management-challenges-with-idempotency/) configuration.
|
* Have automated, auditable, and [idempotent](http://sharknet.us/2014/02/01/automated-configuration-management-challenges-with-idempotency/) configuration.
|
||||||
* **Not** be a mail server that the NSA cannot hack.
|
* **Not** make a totally unhackable, NSA-proof server.
|
||||||
* **Not** be customizable by power users.
|
* **Not** make something customizable by power users.
|
||||||
|
|
||||||
The long-term goal is to have this be a one-click email appliance with *no* user-configurable setup options.
|
|
||||||
|
|
||||||
For more background, see [The Rationale](https://github.com/mail-in-a-box/mailinabox/wiki).
|
|
||||||
|
|
||||||
This setup is what has been powering my own personal email since September 2013.
|
This setup is what has been powering my own personal email since September 2013.
|
||||||
|
|
||||||
The Box
|
The Box
|
||||||
-------
|
-------
|
||||||
|
|
||||||
Mail-in-a-Box turns a fresh Ubuntu 14.04 LTS 64-bit machine into a working mail server, including SMTP ([postfix](http://www.postfix.org/)), IMAP ([dovecot](http://dovecot.org/)), Exchange ActiveSync ([z-push](https://github.com/fmbiete/Z-Push-contrib)), webmail ([Roundcube](http://roundcube.net/)), spam filtering ([spamassassin](https://spamassassin.apache.org/)), greylisting ([postgrey](http://postgrey.schweikert.ch/)), CardDAV/CalDAV ([ownCloud](http://owncloud.org/)), DNS, [SPF](https://en.wikipedia.org/wiki/Sender_Policy_Framework), DKIM ([OpenDKIM](http://www.opendkim.org/)), [DMARC](https://en.wikipedia.org/wiki/DMARC), [DNSSEC](https://en.wikipedia.org/wiki/DNSSEC), [DANE TLSA](https://en.wikipedia.org/wiki/DNS-based_Authentication_of_Named_Entities), [SSHFP](https://tools.ietf.org/html/rfc4255), and basic system services like a firewall, intrusion protection, and setting the system clock.
|
Mail-in-a-Box turns a fresh Ubuntu 14.04 LTS 64-bit machine into a working mail server by installing and configuring various components.
|
||||||
|
|
||||||
|
It is a one-click email appliance (see the [setup guide](https://mailinabox.email/guide.html)). There are no user-configurable setup options. It "just works".
|
||||||
|
|
||||||
|
The components installed are:
|
||||||
|
|
||||||
|
* SMTP ([postfix](http://www.postfix.org/)), IMAP ([dovecot](http://dovecot.org/)), CardDAV/CalDAV ([ownCloud](http://owncloud.org/)), Exchange ActiveSync ([z-push](https://github.com/fmbiete/Z-Push-contrib))
|
||||||
|
* Webmail ([Roundcube](http://roundcube.net/)), static website hosting ([nginx](http://nginx.org/))
|
||||||
|
* Spam filtering ([spamassassin](https://spamassassin.apache.org/)), greylisting ([postgrey](http://postgrey.schweikert.ch/))
|
||||||
|
* DNS ([nsd4](http://www.nlnetlabs.nl/projects/nsd/)) with [SPF](https://en.wikipedia.org/wiki/Sender_Policy_Framework), DKIM ([OpenDKIM](http://www.opendkim.org/)), [DMARC](https://en.wikipedia.org/wiki/DMARC), [DNSSEC](https://en.wikipedia.org/wiki/DNSSEC), [DANE TLSA](https://en.wikipedia.org/wiki/DNS-based_Authentication_of_Named_Entities), and [SSHFP](https://tools.ietf.org/html/rfc4255) records automatically set
|
||||||
|
* Firewall ([ufw](https://launchpad.net/ufw)), intrusion protection ([fail2ban](http://www.fail2ban.org/wiki/index.php/Main_Page)), system monitoring ([munin](http://munin-monitoring.org/))
|
||||||
|
|
||||||
|
It also includes:
|
||||||
|
|
||||||
|
* A control panel and API for adding/removing mail users, aliases, custom DNS records, etc. and detailed system monitoring.
|
||||||
|
* Our own builds of postgrey and dovecot-lucene distributed via the [Mail-in-a-Box PPA](https://launchpad.net/~mail-in-a-box/+archive/ubuntu/ppa) on Launchpad.
|
||||||
|
|
||||||
|
For more information on how Mail-in-a-Box handles your privacy, see the [security details page](security.md).
|
||||||
|
|
||||||
|
The Security
|
||||||
|
------------
|
||||||
|
|
||||||
|
See the [security guide](security.md) for more information about the box's security configuration (TLS, password storage, etc).
|
||||||
|
|
||||||
|
I sign the release tags on git. To verify that a tag is signed by me, you can perform the following steps:
|
||||||
|
|
||||||
|
# Download my PGP key.
|
||||||
|
$ curl -s https://keybase.io/joshdata/key.asc | gpg --import
|
||||||
|
gpg: key C10BDD81: public key "Joshua Tauberer <jt@occams.info>" imported
|
||||||
|
|
||||||
|
# Clone this repository.
|
||||||
|
$ git clone https://github.com/mail-in-a-box/mailinabox
|
||||||
|
$ cd mailinabox
|
||||||
|
|
||||||
|
# Verify the tag.
|
||||||
|
$ git verify-tag v0.11
|
||||||
|
gpg: Signature made ..... using RSA key ID C10BDD81
|
||||||
|
gpg: Good signature from "Joshua Tauberer <jt@occams.info>"
|
||||||
|
gpg: WARNING: This key is not certified with a trusted signature!
|
||||||
|
gpg: There is no indication that the signature belongs to the owner.
|
||||||
|
Primary key fingerprint: 5F4C 0E73 13CC D744 693B 2AEA B920 41F4 C10B DD81
|
||||||
|
|
||||||
|
The key ID and fingerprint above should match my [Keybase.io key](https://keybase.io/joshdata) and the fingerprint I publish on [my homepage](https://razor.occams.info/).
|
||||||
|
|
||||||
The Acknowledgements
|
The Acknowledgements
|
||||||
--------------------
|
--------------------
|
||||||
@@ -39,5 +77,7 @@ The History
|
|||||||
-----------
|
-----------
|
||||||
|
|
||||||
* In 2007 I wrote a relatively popular Mozilla Thunderbird extension that added client-side SPF and DKIM checks to mail to warn users about possible phishing: [add-on page](https://addons.mozilla.org/en-us/thunderbird/addon/sender-verification-anti-phish/), [source](https://github.com/JoshData/thunderbird-spf).
|
* In 2007 I wrote a relatively popular Mozilla Thunderbird extension that added client-side SPF and DKIM checks to mail to warn users about possible phishing: [add-on page](https://addons.mozilla.org/en-us/thunderbird/addon/sender-verification-anti-phish/), [source](https://github.com/JoshData/thunderbird-spf).
|
||||||
|
* In August 2013 I began Mail-in-a-Box by combining my own mail server configuration with the setup in ["NSA-proof your email in 2 hours"](http://sealedabstract.com/code/nsa-proof-your-e-mail-in-2-hours/) and making the setup steps reproducible with bash scripts.
|
||||||
* Mail-in-a-Box was a semifinalist in the 2014 [Knight News Challenge](https://www.newschallenge.org/challenge/2014/submissions/mail-in-a-box), but it was not selected as a winner.
|
* Mail-in-a-Box was a semifinalist in the 2014 [Knight News Challenge](https://www.newschallenge.org/challenge/2014/submissions/mail-in-a-box), but it was not selected as a winner.
|
||||||
* Mail-in-a-Box hit the front page of Hacker News in [April](https://news.ycombinator.com/item?id=7634514) and [September](https://news.ycombinator.com/item?id=8276171) 2014.
|
* Mail-in-a-Box hit the front page of Hacker News in [April](https://news.ycombinator.com/item?id=7634514) 2014, [September](https://news.ycombinator.com/item?id=8276171) 2014, and [May](https://news.ycombinator.com/item?id=9624267) 2015.
|
||||||
|
* FastCompany mentioned Mail-in-a-Box a [roundup of privacy projects](http://www.fastcompany.com/3047645/your-own-private-cloud) on June 26, 2015.
|
||||||
|
|||||||
22
conf/fail2ban/dovecotimap.conf
Normal file
22
conf/fail2ban/dovecotimap.conf
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
# Fail2Ban filter Dovecot authentication and pop3/imap server
|
||||||
|
# For Mail-in-a-Box
|
||||||
|
|
||||||
|
[INCLUDES]
|
||||||
|
|
||||||
|
before = common.conf
|
||||||
|
|
||||||
|
[Definition]
|
||||||
|
|
||||||
|
_daemon = (auth|dovecot(-auth)?|auth-worker)
|
||||||
|
|
||||||
|
failregex = ^%(__prefix_line)s(pop3|imap)-login: (Info: )?(Aborted login|Disconnected)(: Inactivity)? \(((no auth attempts|auth failed, \d+ attempts)( in \d+ secs)?|tried to use (disabled|disallowed) \S+ auth)\):( user=<\S*>,)?( method=\S+,)? rip=<HOST>, lip=(\d{1,3}\.){3}\d{1,3}(, TLS( handshaking)?(: Disconnected)?)?(, session=<\S+>)?\s*$
|
||||||
|
|
||||||
|
ignoreregex =
|
||||||
|
|
||||||
|
# DEV Notes:
|
||||||
|
# * the first regex is essentially a copy of pam-generic.conf
|
||||||
|
# * Probably doesn't do dovecot sql/ldap backends properly
|
||||||
|
#
|
||||||
|
# Author: Martin Waschbuesch
|
||||||
|
# Daniel Black (rewrote with begin and end anchors)
|
||||||
|
# Mail-in-a-Box (swapped session=...)
|
||||||
34
conf/fail2ban/jail.local
Normal file
34
conf/fail2ban/jail.local
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
# Fail2Ban configuration file.
|
||||||
|
# For Mail-in-a-Box
|
||||||
|
[DEFAULT]
|
||||||
|
|
||||||
|
# bantime in seconds
|
||||||
|
bantime = 60
|
||||||
|
|
||||||
|
# This should ban dumb brute-force attacks, not oblivious users.
|
||||||
|
findtime = 30
|
||||||
|
maxretry = 20
|
||||||
|
|
||||||
|
#
|
||||||
|
# JAILS
|
||||||
|
#
|
||||||
|
|
||||||
|
[ssh]
|
||||||
|
|
||||||
|
enabled = true
|
||||||
|
logpath = /var/log/auth.log
|
||||||
|
maxretry = 20
|
||||||
|
|
||||||
|
[ssh-ddos]
|
||||||
|
|
||||||
|
enabled = true
|
||||||
|
maxretry = 20
|
||||||
|
|
||||||
|
[sasl]
|
||||||
|
|
||||||
|
enabled = true
|
||||||
|
|
||||||
|
[dovecot]
|
||||||
|
|
||||||
|
enabled = true
|
||||||
|
filter = dovecotimap
|
||||||
44
conf/mozilla-autoconfig.xml
Normal file
44
conf/mozilla-autoconfig.xml
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
<?xml version="1.0"?>
|
||||||
|
<clientConfig version="1.1">
|
||||||
|
<emailProvider id="PRIMARY_HOSTNAME">
|
||||||
|
<domain>PRIMARY_HOSTNAME</domain>
|
||||||
|
|
||||||
|
<displayName>PRIMARY_HOSTNAME (Mail-in-a-Box)</displayName>
|
||||||
|
<displayShortName>PRIMARY_HOSTNAME</displayShortName>
|
||||||
|
|
||||||
|
<incomingServer type="imap">
|
||||||
|
<hostname>PRIMARY_HOSTNAME</hostname>
|
||||||
|
<port>993</port>
|
||||||
|
<socketType>SSL</socketType>
|
||||||
|
<username>%EMAILADDRESS%</username>
|
||||||
|
<authentication>password-cleartext</authentication>
|
||||||
|
</incomingServer>
|
||||||
|
|
||||||
|
<outgoingServer type="smtp">
|
||||||
|
<hostname>PRIMARY_HOSTNAME</hostname>
|
||||||
|
<port>587</port>
|
||||||
|
<socketType>STARTTLS</socketType>
|
||||||
|
<username>%EMAILADDRESS%</username>
|
||||||
|
<authentication>password-cleartext</authentication>
|
||||||
|
<addThisServer>true</addThisServer>
|
||||||
|
<useGlobalPreferredServer>true</useGlobalPreferredServer>
|
||||||
|
</outgoingServer>
|
||||||
|
|
||||||
|
<documentation url="https://PRIMARY_HOSTNAME/">
|
||||||
|
<descr lang="en">PRIMARY_HOSTNAME website.</descr>
|
||||||
|
</documentation>
|
||||||
|
</emailProvider>
|
||||||
|
|
||||||
|
<webMail>
|
||||||
|
<loginPage url="https://PRIMARY_HOSTNAME/mail/" />
|
||||||
|
<loginPageInfo url="https://PRIMARY_HOSTNAME/mail/" >
|
||||||
|
<username>%EMAILADDRESS%</username>
|
||||||
|
<usernameField id="rcmloginuser" name="_user" />
|
||||||
|
<passwordField id="rcmloginpwd" name="_pass" />
|
||||||
|
<loginButton id="rcmloginsubmit" />
|
||||||
|
</loginPageInfo>
|
||||||
|
</webMail>
|
||||||
|
|
||||||
|
<clientConfigUpdate url="https://PRIMARY_HOSTNAME/.well-known/autoconfig/mail/config-v1.1.xml" />
|
||||||
|
|
||||||
|
</clientConfig>
|
||||||
79
conf/nginx-alldomains.conf
Normal file
79
conf/nginx-alldomains.conf
Normal file
@@ -0,0 +1,79 @@
|
|||||||
|
# Expose this directory as static files.
|
||||||
|
root $ROOT;
|
||||||
|
index index.html index.htm;
|
||||||
|
|
||||||
|
location = /robots.txt {
|
||||||
|
log_not_found off;
|
||||||
|
access_log off;
|
||||||
|
}
|
||||||
|
|
||||||
|
location = /favicon.ico {
|
||||||
|
log_not_found off;
|
||||||
|
access_log off;
|
||||||
|
}
|
||||||
|
|
||||||
|
location = /mailinabox.mobileconfig {
|
||||||
|
alias /var/lib/mailinabox/mobileconfig.xml;
|
||||||
|
}
|
||||||
|
location = /.well-known/autoconfig/mail/config-v1.1.xml {
|
||||||
|
alias /var/lib/mailinabox/mozilla-autoconfig.xml;
|
||||||
|
}
|
||||||
|
|
||||||
|
# Roundcube Webmail configuration.
|
||||||
|
rewrite ^/mail$ /mail/ redirect;
|
||||||
|
rewrite ^/mail/$ /mail/index.php;
|
||||||
|
location /mail/ {
|
||||||
|
index index.php;
|
||||||
|
alias /usr/local/lib/roundcubemail/;
|
||||||
|
}
|
||||||
|
location ~ /mail/config/.* {
|
||||||
|
# A ~-style location is needed to give this precedence over the next block.
|
||||||
|
return 403;
|
||||||
|
}
|
||||||
|
location ~ /mail/.*\.php {
|
||||||
|
# note: ~ has precendence over a regular location block
|
||||||
|
include fastcgi_params;
|
||||||
|
fastcgi_split_path_info ^/mail(/.*)()$;
|
||||||
|
fastcgi_index index.php;
|
||||||
|
fastcgi_param SCRIPT_FILENAME /usr/local/lib/roundcubemail/$fastcgi_script_name;
|
||||||
|
fastcgi_pass php-fpm;
|
||||||
|
|
||||||
|
# Outgoing mail also goes through this endpoint, so increase the maximum
|
||||||
|
# file upload limit to match the corresponding Postfix limit.
|
||||||
|
client_max_body_size 128M;
|
||||||
|
}
|
||||||
|
|
||||||
|
# Z-Push (Microsoft Exchange ActiveSync)
|
||||||
|
location /Microsoft-Server-ActiveSync {
|
||||||
|
include /etc/nginx/fastcgi_params;
|
||||||
|
fastcgi_param SCRIPT_FILENAME /usr/local/lib/z-push/index.php;
|
||||||
|
fastcgi_param PHP_VALUE "include_path=.:/usr/share/php:/usr/share/pear:/usr/share/awl/inc";
|
||||||
|
fastcgi_read_timeout 630;
|
||||||
|
fastcgi_pass php-fpm;
|
||||||
|
|
||||||
|
# Outgoing mail also goes through this endpoint, so increase the maximum
|
||||||
|
# file upload limit to match the corresponding Postfix limit.
|
||||||
|
client_max_body_size 128M;
|
||||||
|
}
|
||||||
|
location ~* ^/autodiscover/autodiscover.xml$ {
|
||||||
|
include fastcgi_params;
|
||||||
|
fastcgi_param SCRIPT_FILENAME /usr/local/lib/z-push/autodiscover/autodiscover.php;
|
||||||
|
fastcgi_param PHP_VALUE "include_path=.:/usr/share/php:/usr/share/pear:/usr/share/awl/inc";
|
||||||
|
fastcgi_pass php-fpm;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# ADDITIONAL DIRECTIVES HERE
|
||||||
|
|
||||||
|
# Disable viewing dotfiles (.htaccess, .svn, .git, etc.)
|
||||||
|
# This block is placed at the end. Nginx's precedence rules means this block
|
||||||
|
# takes precedence over all non-regex matches and only regex matches that
|
||||||
|
# come after it (i.e. none of those, since this is the last one.) That means
|
||||||
|
# we're blocking dotfiles in the static hosted sites but not the FastCGI-
|
||||||
|
# handled locations for ownCloud (which serves user-uploaded files that might
|
||||||
|
# have this pattern, see #414) or some of the other services.
|
||||||
|
location ~ /\.(ht|svn|git|hg|bzr) {
|
||||||
|
log_not_found off;
|
||||||
|
access_log off;
|
||||||
|
deny all;
|
||||||
|
}
|
||||||
@@ -2,6 +2,7 @@
|
|||||||
# Proxy /admin to our Python based control panel daemon. It is
|
# Proxy /admin to our Python based control panel daemon. It is
|
||||||
# listening on IPv4 only so use an IP address and not 'localhost'.
|
# listening on IPv4 only so use an IP address and not 'localhost'.
|
||||||
rewrite ^/admin$ /admin/;
|
rewrite ^/admin$ /admin/;
|
||||||
|
rewrite ^/admin/munin$ /admin/munin/ redirect;
|
||||||
location /admin/ {
|
location /admin/ {
|
||||||
proxy_pass http://127.0.0.1:10222/;
|
proxy_pass http://127.0.0.1:10222/;
|
||||||
proxy_set_header X-Forwarded-For $remote_addr;
|
proxy_set_header X-Forwarded-For $remote_addr;
|
||||||
@@ -24,11 +25,13 @@
|
|||||||
# /cloud/index.php/apps/files/
|
# /cloud/index.php/apps/files/
|
||||||
# /cloud/index.php/apps/files/ajax/scan.php (it's really index.php; see 6fdef379adfdeac86cc2220209bdf4eb9562268d)
|
# /cloud/index.php/apps/files/ajax/scan.php (it's really index.php; see 6fdef379adfdeac86cc2220209bdf4eb9562268d)
|
||||||
# /cloud/ocs/v1.php/apps/files_sharing/api/v1 (see #240)
|
# /cloud/ocs/v1.php/apps/files_sharing/api/v1 (see #240)
|
||||||
|
# /cloud/remote.php/webdav/yourfilehere...
|
||||||
include fastcgi_params;
|
include fastcgi_params;
|
||||||
fastcgi_param SCRIPT_FILENAME /usr/local/lib/owncloud/$2;
|
fastcgi_param SCRIPT_FILENAME /usr/local/lib/owncloud/$2;
|
||||||
fastcgi_param SCRIPT_NAME $1$2;
|
fastcgi_param SCRIPT_NAME $1$2;
|
||||||
fastcgi_param PATH_INFO $3;
|
fastcgi_param PATH_INFO $3;
|
||||||
fastcgi_param MOD_X_ACCEL_REDIRECT_ENABLED on;
|
fastcgi_param MOD_X_ACCEL_REDIRECT_ENABLED on;
|
||||||
|
fastcgi_param MOD_X_ACCEL_REDIRECT_PREFIX /owncloud-xaccel;
|
||||||
fastcgi_read_timeout 630;
|
fastcgi_read_timeout 630;
|
||||||
fastcgi_pass php-fpm;
|
fastcgi_pass php-fpm;
|
||||||
error_page 403 /cloud/core/templates/403.php;
|
error_page 403 /cloud/core/templates/403.php;
|
||||||
@@ -36,12 +39,13 @@
|
|||||||
client_max_body_size 1G;
|
client_max_body_size 1G;
|
||||||
fastcgi_buffers 64 4K;
|
fastcgi_buffers 64 4K;
|
||||||
}
|
}
|
||||||
location ^~ /cloud/data {
|
location ^~ /owncloud-xaccel/ {
|
||||||
# In order to support MOD_X_ACCEL_REDIRECT_ENABLED, we need to expose
|
# This directory is for MOD_X_ACCEL_REDIRECT_ENABLED. ownCloud sends the full file
|
||||||
# the data directory but only allow 'internal' redirects within nginx
|
# path on disk as a subdirectory under this virtual path.
|
||||||
# so that this is not exposed to the world.
|
# We must only allow 'internal' redirects within nginx so that the filesystem
|
||||||
|
# is not exposed to the world.
|
||||||
internal;
|
internal;
|
||||||
alias $STORAGE_ROOT/owncloud;
|
alias /;
|
||||||
}
|
}
|
||||||
location ~ ^/((caldav|carddav|webdav).*)$ {
|
location ~ ^/((caldav|carddav|webdav).*)$ {
|
||||||
# Z-Push doesn't like getting a redirect, and a plain rewrite didn't work either.
|
# Z-Push doesn't like getting a redirect, and a plain rewrite didn't work either.
|
||||||
@@ -53,3 +57,4 @@
|
|||||||
rewrite ^/.well-known/carddav /cloud/remote.php/carddav/ redirect;
|
rewrite ^/.well-known/carddav /cloud/remote.php/carddav/ redirect;
|
||||||
rewrite ^/.well-known/caldav /cloud/remote.php/caldav/ redirect;
|
rewrite ^/.well-known/caldav /cloud/remote.php/caldav/ redirect;
|
||||||
|
|
||||||
|
# ADDITIONAL DIRECTIVES HERE
|
||||||
|
|||||||
@@ -70,6 +70,5 @@ ssl_dhparam STORAGE_ROOT/ssl/dh2048.pem;
|
|||||||
# nginx will use them to talk to the CA.
|
# nginx will use them to talk to the CA.
|
||||||
ssl_stapling on;
|
ssl_stapling on;
|
||||||
ssl_stapling_verify on;
|
ssl_stapling_verify on;
|
||||||
resolver 8.8.8.8 8.8.4.4 valid=86400;
|
resolver 127.0.0.1 valid=86400;
|
||||||
resolver_timeout 10;
|
resolver_timeout 10;
|
||||||
#ssl_trusted_certificate /path/to/all-certs-in-chain.crt;
|
|
||||||
|
|||||||
@@ -7,7 +7,15 @@ server {
|
|||||||
|
|
||||||
server_name $HOSTNAME;
|
server_name $HOSTNAME;
|
||||||
root /tmp/invalid-path-nothing-here;
|
root /tmp/invalid-path-nothing-here;
|
||||||
rewrite ^/(.*)$ https://$HOSTNAME/$1 permanent;
|
|
||||||
|
# Improve privacy: Hide version an OS information on
|
||||||
|
# error pages and in the "Server" HTTP-Header.
|
||||||
|
server_tokens off;
|
||||||
|
|
||||||
|
# Redirect using the 'return' directive and the built-in
|
||||||
|
# variable '$request_uri' to avoid any capturing, matching
|
||||||
|
# or evaluation of regular expressions.
|
||||||
|
return 301 https://$HOSTNAME$request_uri;
|
||||||
}
|
}
|
||||||
|
|
||||||
# The secure HTTPS server.
|
# The secure HTTPS server.
|
||||||
@@ -17,71 +25,13 @@ server {
|
|||||||
|
|
||||||
server_name $HOSTNAME;
|
server_name $HOSTNAME;
|
||||||
|
|
||||||
|
# Improve privacy: Hide version an OS information on
|
||||||
|
# error pages and in the "Server" HTTP-Header.
|
||||||
|
server_tokens off;
|
||||||
|
|
||||||
ssl_certificate $SSL_CERTIFICATE;
|
ssl_certificate $SSL_CERTIFICATE;
|
||||||
ssl_certificate_key $SSL_KEY;
|
ssl_certificate_key $SSL_KEY;
|
||||||
include /etc/nginx/nginx-ssl.conf;
|
include /etc/nginx/nginx-ssl.conf;
|
||||||
|
|
||||||
# Expose this directory as static files.
|
|
||||||
root $ROOT;
|
|
||||||
index index.html index.htm;
|
|
||||||
|
|
||||||
location = /robots.txt {
|
|
||||||
log_not_found off;
|
|
||||||
access_log off;
|
|
||||||
}
|
|
||||||
|
|
||||||
location = /favicon.ico {
|
|
||||||
log_not_found off;
|
|
||||||
access_log off;
|
|
||||||
}
|
|
||||||
|
|
||||||
location = /mailinabox.mobileconfig {
|
|
||||||
alias /var/lib/mailinabox/mobileconfig.xml;
|
|
||||||
}
|
|
||||||
|
|
||||||
# Roundcube Webmail configuration.
|
|
||||||
rewrite ^/mail$ /mail/ redirect;
|
|
||||||
rewrite ^/mail/$ /mail/index.php;
|
|
||||||
location /mail/ {
|
|
||||||
index index.php;
|
|
||||||
alias /usr/local/lib/roundcubemail/;
|
|
||||||
}
|
|
||||||
location ~ /mail/config/.* {
|
|
||||||
# A ~-style location is needed to give this precedence over the next block.
|
|
||||||
return 403;
|
|
||||||
}
|
|
||||||
location ~ /mail/.*\.php {
|
|
||||||
# note: ~ has precendence over a regular location block
|
|
||||||
include fastcgi_params;
|
|
||||||
fastcgi_split_path_info ^/mail(/.*)()$;
|
|
||||||
fastcgi_index index.php;
|
|
||||||
fastcgi_param SCRIPT_FILENAME /usr/local/lib/roundcubemail/$fastcgi_script_name;
|
|
||||||
fastcgi_pass php-fpm;
|
|
||||||
|
|
||||||
# Outgoing mail also goes through this endpoint, so increase the maximum
|
|
||||||
# file upload limit to match the corresponding Postfix limit.
|
|
||||||
client_max_body_size 128M;
|
|
||||||
}
|
|
||||||
|
|
||||||
# Z-Push (Microsoft Exchange ActiveSync)
|
|
||||||
location /Microsoft-Server-ActiveSync {
|
|
||||||
include /etc/nginx/fastcgi_params;
|
|
||||||
fastcgi_param SCRIPT_FILENAME /usr/local/lib/z-push/index.php;
|
|
||||||
fastcgi_param PHP_VALUE "include_path=.:/usr/share/php:/usr/share/pear:/usr/share/awl/inc";
|
|
||||||
fastcgi_read_timeout 630;
|
|
||||||
fastcgi_pass php-fpm;
|
|
||||||
|
|
||||||
# Outgoing mail also goes through this endpoint, so increase the maximum
|
|
||||||
# file upload limit to match the corresponding Postfix limit.
|
|
||||||
client_max_body_size 128M;
|
|
||||||
}
|
|
||||||
location /autodiscover/autodiscover.xml {
|
|
||||||
include fastcgi_params;
|
|
||||||
fastcgi_param SCRIPT_FILENAME /usr/local/lib/z-push/autodiscover/autodiscover.php;
|
|
||||||
fastcgi_param PHP_VALUE "include_path=.:/usr/share/php:/usr/share/pear:/usr/share/awl/inc";
|
|
||||||
fastcgi_pass php-fpm;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
# ADDITIONAL DIRECTIVES HERE
|
# ADDITIONAL DIRECTIVES HERE
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,3 +8,7 @@
|
|||||||
/^\s*X-Enigmail:/ IGNORE
|
/^\s*X-Enigmail:/ IGNORE
|
||||||
/^\s*X-Mailer:/ IGNORE
|
/^\s*X-Mailer:/ IGNORE
|
||||||
/^\s*X-Originating-IP:/ IGNORE
|
/^\s*X-Originating-IP:/ IGNORE
|
||||||
|
/^\s*X-Pgp-Agent:/ IGNORE
|
||||||
|
|
||||||
|
# The Mime-Version header can leak the user agent too, e.g. in Mime-Version: 1.0 (Mac OS X Mail 8.1 \(2010.6\)).
|
||||||
|
/^\s*(Mime-Version:\s*[0-9\.]+)\s.+/ REPLACE $1
|
||||||
|
|||||||
@@ -88,8 +88,9 @@ class KeyAuthService:
|
|||||||
if email == "" or pw == "":
|
if email == "" or pw == "":
|
||||||
raise ValueError("Enter an email address and password.")
|
raise ValueError("Enter an email address and password.")
|
||||||
|
|
||||||
# The password might be a user-specific API key.
|
# The password might be a user-specific API key. create_user_key raises
|
||||||
if hmac.compare_digest(self.create_user_key(email), pw):
|
# a ValueError if the user does not exist.
|
||||||
|
if hmac.compare_digest(self.create_user_key(email, env), pw):
|
||||||
# OK.
|
# OK.
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
@@ -111,18 +112,26 @@ class KeyAuthService:
|
|||||||
# Login failed.
|
# Login failed.
|
||||||
raise ValueError("Invalid password.")
|
raise ValueError("Invalid password.")
|
||||||
|
|
||||||
# Get privileges for authorization.
|
# Get privileges for authorization. This call should never fail because by this
|
||||||
|
# point we know the email address is a valid user. But on error the call will
|
||||||
# (This call should never fail on a valid user. But if it did fail, it would
|
# return a tuple of an error message and an HTTP status code.
|
||||||
# return a tuple of an error message and an HTTP status code.)
|
|
||||||
privs = get_mail_user_privileges(email, env)
|
privs = get_mail_user_privileges(email, env)
|
||||||
if isinstance(privs, tuple): raise Exception("Error getting privileges.")
|
if isinstance(privs, tuple): raise ValueError(privs[0])
|
||||||
|
|
||||||
# Return a list of privileges.
|
# Return a list of privileges.
|
||||||
return privs
|
return privs
|
||||||
|
|
||||||
def create_user_key(self, email):
|
def create_user_key(self, email, env):
|
||||||
return hmac.new(self.key.encode('ascii'), b"AUTH:" + email.encode("utf8"), digestmod="sha1").hexdigest()
|
# Store an HMAC with the client. The hashed message of the HMAC will be the user's
|
||||||
|
# email address & hashed password and the key will be the master API key. The user of
|
||||||
|
# course has their own email address and password. We assume they do not have the master
|
||||||
|
# API key (unless they are trusted anyway). The HMAC proves that they authenticated
|
||||||
|
# with us in some other way to get the HMAC. Including the password means that when
|
||||||
|
# a user's password is reset, the HMAC changes and they will correctly need to log
|
||||||
|
# in to the control panel again. This method raises a ValueError if the user does
|
||||||
|
# not exist, due to get_mail_password.
|
||||||
|
msg = b"AUTH:" + email.encode("utf8") + b" " + get_mail_password(email, env).encode("utf8")
|
||||||
|
return hmac.new(self.key.encode('ascii'), msg, digestmod="sha256").hexdigest()
|
||||||
|
|
||||||
def _generate_key(self):
|
def _generate_key(self):
|
||||||
raw_key = os.urandom(32)
|
raw_key = os.urandom(32)
|
||||||
|
|||||||
@@ -2,25 +2,24 @@
|
|||||||
|
|
||||||
# This script performs a backup of all user data:
|
# This script performs a backup of all user data:
|
||||||
# 1) System services are stopped while a copy of user data is made.
|
# 1) System services are stopped while a copy of user data is made.
|
||||||
# 2) An incremental backup is made using duplicity into the
|
# 2) An incremental encrypted backup is made using duplicity into the
|
||||||
# directory STORAGE_ROOT/backup/duplicity.
|
# directory STORAGE_ROOT/backup/encrypted. The password used for
|
||||||
|
# encryption is stored in backup/secret_key.txt.
|
||||||
# 3) The stopped services are restarted.
|
# 3) The stopped services are restarted.
|
||||||
# 4) The backup files are encrypted with a long password (stored in
|
|
||||||
# backup/secret_key.txt) to STORAGE_ROOT/backup/encrypted.
|
|
||||||
# 5) STORAGE_ROOT/backup/after-backup is executd if it exists.
|
# 5) STORAGE_ROOT/backup/after-backup is executd if it exists.
|
||||||
|
|
||||||
import os, os.path, shutil, glob, re, datetime
|
import os, os.path, shutil, glob, re, datetime
|
||||||
import dateutil.parser, dateutil.relativedelta, dateutil.tz
|
import dateutil.parser, dateutil.relativedelta, dateutil.tz
|
||||||
|
|
||||||
from utils import exclusive_process, load_environment, shell
|
from utils import exclusive_process, load_environment, shell, wait_for_service
|
||||||
|
|
||||||
# destroy backups when the most recent increment in the chain
|
# Destroy backups when the most recent increment in the chain
|
||||||
# that depends on it is this many days old.
|
# that depends on it is this many days old.
|
||||||
keep_backups_for_days = 3
|
keep_backups_for_days = 3
|
||||||
|
|
||||||
def backup_status(env):
|
def backup_status(env):
|
||||||
# What is the current status of backups?
|
# What is the current status of backups?
|
||||||
# Loop through all of the files in STORAGE_ROOT/backup/duplicity to
|
# Loop through all of the files in STORAGE_ROOT/backup/encrypted to
|
||||||
# get a list of all of the backups taken and sum up file sizes to
|
# get a list of all of the backups taken and sum up file sizes to
|
||||||
# see how large the storage is.
|
# see how large the storage is.
|
||||||
|
|
||||||
@@ -36,10 +35,10 @@ def backup_status(env):
|
|||||||
return "%d hours, %d minutes" % (rd.hours, rd.minutes)
|
return "%d hours, %d minutes" % (rd.hours, rd.minutes)
|
||||||
|
|
||||||
backups = { }
|
backups = { }
|
||||||
basedir = os.path.join(env['STORAGE_ROOT'], 'backup/duplicity/')
|
backup_root = os.path.join(env["STORAGE_ROOT"], 'backup')
|
||||||
encdir = os.path.join(env['STORAGE_ROOT'], 'backup/encrypted/')
|
backup_dir = os.path.join(backup_root, 'encrypted')
|
||||||
os.makedirs(basedir, exist_ok=True) # os.listdir fails if directory does not exist
|
os.makedirs(backup_dir, exist_ok=True) # os.listdir fails if directory does not exist
|
||||||
for fn in os.listdir(basedir):
|
for fn in os.listdir(backup_dir):
|
||||||
m = re.match(r"duplicity-(full|full-signatures|(inc|new-signatures)\.(?P<incbase>\d+T\d+Z)\.to)\.(?P<date>\d+T\d+Z)\.", fn)
|
m = re.match(r"duplicity-(full|full-signatures|(inc|new-signatures)\.(?P<incbase>\d+T\d+Z)\.to)\.(?P<date>\d+T\d+Z)\.", fn)
|
||||||
if not m: raise ValueError(fn)
|
if not m: raise ValueError(fn)
|
||||||
|
|
||||||
@@ -53,23 +52,37 @@ def backup_status(env):
|
|||||||
"full": m.group("incbase") is None,
|
"full": m.group("incbase") is None,
|
||||||
"previous": m.group("incbase"),
|
"previous": m.group("incbase"),
|
||||||
"size": 0,
|
"size": 0,
|
||||||
"encsize": 0,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
backups[key]["size"] += os.path.getsize(os.path.join(basedir, fn))
|
backups[key]["size"] += os.path.getsize(os.path.join(backup_dir, fn))
|
||||||
|
|
||||||
# Also check encrypted size.
|
|
||||||
encfn = os.path.join(encdir, fn + ".enc")
|
|
||||||
if os.path.exists(encfn):
|
|
||||||
backups[key]["encsize"] += os.path.getsize(encfn)
|
|
||||||
|
|
||||||
# Ensure the rows are sorted reverse chronologically.
|
# Ensure the rows are sorted reverse chronologically.
|
||||||
# This is relied on by should_force_full() and the next step.
|
# This is relied on by should_force_full() and the next step.
|
||||||
backups = sorted(backups.values(), key = lambda b : b["date"], reverse=True)
|
backups = sorted(backups.values(), key = lambda b : b["date"], reverse=True)
|
||||||
|
|
||||||
|
# Get the average size of incremental backups and the size of the
|
||||||
|
# most recent full backup.
|
||||||
|
incremental_count = 0
|
||||||
|
incremental_size = 0
|
||||||
|
first_full_size = None
|
||||||
|
for bak in backups:
|
||||||
|
if bak["full"]:
|
||||||
|
first_full_size = bak["size"]
|
||||||
|
break
|
||||||
|
incremental_count += 1
|
||||||
|
incremental_size += bak["size"]
|
||||||
|
|
||||||
|
# Predict how many more increments until the next full backup,
|
||||||
|
# and add to that the time we hold onto backups, to predict
|
||||||
|
# how long the most recent full backup+increments will be held
|
||||||
|
# onto. Round up since the backup occurs on the night following
|
||||||
|
# when the threshold is met.
|
||||||
|
deleted_in = None
|
||||||
|
if incremental_count > 0 and first_full_size is not None:
|
||||||
|
deleted_in = "approx. %d days" % round(keep_backups_for_days + (.5 * first_full_size - incremental_size) / (incremental_size/incremental_count) + .5)
|
||||||
|
|
||||||
# When will a backup be deleted?
|
# When will a backup be deleted?
|
||||||
saw_full = False
|
saw_full = False
|
||||||
deleted_in = None
|
|
||||||
days_ago = now - datetime.timedelta(days=keep_backups_for_days)
|
days_ago = now - datetime.timedelta(days=keep_backups_for_days)
|
||||||
for bak in backups:
|
for bak in backups:
|
||||||
if deleted_in:
|
if deleted_in:
|
||||||
@@ -86,9 +99,8 @@ def backup_status(env):
|
|||||||
bak["deleted_in"] = deleted_in
|
bak["deleted_in"] = deleted_in
|
||||||
|
|
||||||
return {
|
return {
|
||||||
"directory": basedir,
|
"directory": backup_dir,
|
||||||
"encpwfile": os.path.join(env['STORAGE_ROOT'], 'backup/secret_key.txt'),
|
"encpwfile": os.path.join(backup_root, 'secret_key.txt'),
|
||||||
"encdirectory": encdir,
|
|
||||||
"tz": now.tzname(),
|
"tz": now.tzname(),
|
||||||
"backups": backups,
|
"backups": backups,
|
||||||
}
|
}
|
||||||
@@ -117,10 +129,35 @@ def perform_backup(full_backup):
|
|||||||
|
|
||||||
exclusive_process("backup")
|
exclusive_process("backup")
|
||||||
|
|
||||||
# Ensure the backup directory exists.
|
backup_root = os.path.join(env["STORAGE_ROOT"], 'backup')
|
||||||
backup_dir = os.path.join(env["STORAGE_ROOT"], 'backup')
|
backup_cache_dir = os.path.join(backup_root, 'cache')
|
||||||
backup_duplicity_dir = os.path.join(backup_dir, 'duplicity')
|
backup_dir = os.path.join(backup_root, 'encrypted')
|
||||||
os.makedirs(backup_duplicity_dir, exist_ok=True)
|
|
||||||
|
# In an older version of this script, duplicity was called
|
||||||
|
# such that it did not encrypt the backups it created (in
|
||||||
|
# backup/duplicity), and instead openssl was called separately
|
||||||
|
# after each backup run, creating AES256 encrypted copies of
|
||||||
|
# each file created by duplicity in backup/encrypted.
|
||||||
|
#
|
||||||
|
# We detect the transition by the presence of backup/duplicity
|
||||||
|
# and handle it by 'dupliception': we move all the old *un*encrypted
|
||||||
|
# duplicity files up out of the backup/duplicity directory (as
|
||||||
|
# backup/ is excluded from duplicity runs) in order that it is
|
||||||
|
# included in the next run, and we delete backup/encrypted (which
|
||||||
|
# duplicity will output files directly to, post-transition).
|
||||||
|
old_backup_dir = os.path.join(backup_root, 'duplicity')
|
||||||
|
migrated_unencrypted_backup_dir = os.path.join(env["STORAGE_ROOT"], "migrated_unencrypted_backup")
|
||||||
|
if os.path.isdir(old_backup_dir):
|
||||||
|
# Move the old unencrypted files to a new location outside of
|
||||||
|
# the backup root so they get included in the next (new) backup.
|
||||||
|
# Then we'll delete them. Also so that they do not get in the
|
||||||
|
# way of duplicity doing a full backup on the first run after
|
||||||
|
# we take care of this.
|
||||||
|
shutil.move(old_backup_dir, migrated_unencrypted_backup_dir)
|
||||||
|
|
||||||
|
# The backup_dir (backup/encrypted) now has a new purpose.
|
||||||
|
# Clear it out.
|
||||||
|
shutil.rmtree(backup_dir)
|
||||||
|
|
||||||
# On the first run, always do a full backup. Incremental
|
# On the first run, always do a full backup. Incremental
|
||||||
# will fail. Otherwise do a full backup when the size of
|
# will fail. Otherwise do a full backup when the size of
|
||||||
@@ -132,81 +169,113 @@ def perform_backup(full_backup):
|
|||||||
shell('check_call', ["/usr/sbin/service", "dovecot", "stop"])
|
shell('check_call', ["/usr/sbin/service", "dovecot", "stop"])
|
||||||
shell('check_call', ["/usr/sbin/service", "postfix", "stop"])
|
shell('check_call', ["/usr/sbin/service", "postfix", "stop"])
|
||||||
|
|
||||||
# Update the backup mirror directory which mirrors the current
|
# Get the encryption passphrase. secret_key.txt is 2048 random
|
||||||
# STORAGE_ROOT (but excluding the backups themselves!).
|
# bits base64-encoded and with line breaks every 65 characters.
|
||||||
|
# gpg will only take the first line of text, so sanity check that
|
||||||
|
# that line is long enough to be a reasonable passphrase. It
|
||||||
|
# only needs to be 43 base64-characters to match AES256's key
|
||||||
|
# length of 32 bytes.
|
||||||
|
with open(os.path.join(backup_root, 'secret_key.txt')) as f:
|
||||||
|
passphrase = f.readline().strip()
|
||||||
|
if len(passphrase) < 43: raise Exception("secret_key.txt's first line is too short!")
|
||||||
|
env_with_passphrase = { "PASSPHRASE" : passphrase }
|
||||||
|
|
||||||
|
# Run a backup of STORAGE_ROOT (but excluding the backups themselves!).
|
||||||
|
# --allow-source-mismatch is needed in case the box's hostname is changed
|
||||||
|
# after the first backup. See #396.
|
||||||
try:
|
try:
|
||||||
shell('check_call', [
|
shell('check_call', [
|
||||||
"/usr/bin/duplicity",
|
"/usr/bin/duplicity",
|
||||||
"full" if full_backup else "incr",
|
"full" if full_backup else "incr",
|
||||||
"--no-encryption",
|
"--archive-dir", backup_cache_dir,
|
||||||
"--archive-dir", "/tmp/duplicity-archive-dir",
|
"--exclude", backup_root,
|
||||||
"--name", "mailinabox",
|
"--volsize", "250",
|
||||||
"--exclude", backup_dir,
|
"--gpg-options", "--cipher-algo=AES256",
|
||||||
"--volsize", "100",
|
|
||||||
"--verbosity", "warning",
|
|
||||||
env["STORAGE_ROOT"],
|
env["STORAGE_ROOT"],
|
||||||
"file://" + backup_duplicity_dir
|
"file://" + backup_dir,
|
||||||
])
|
"--allow-source-mismatch"
|
||||||
|
],
|
||||||
|
env_with_passphrase)
|
||||||
finally:
|
finally:
|
||||||
# Start services again.
|
# Start services again.
|
||||||
shell('check_call', ["/usr/sbin/service", "dovecot", "start"])
|
shell('check_call', ["/usr/sbin/service", "dovecot", "start"])
|
||||||
shell('check_call', ["/usr/sbin/service", "postfix", "start"])
|
shell('check_call', ["/usr/sbin/service", "postfix", "start"])
|
||||||
|
|
||||||
|
# Once the migrated backup is included in a new backup, it can be deleted.
|
||||||
|
if os.path.isdir(migrated_unencrypted_backup_dir):
|
||||||
|
shutil.rmtree(migrated_unencrypted_backup_dir)
|
||||||
|
|
||||||
# Remove old backups. This deletes all backup data no longer needed
|
# Remove old backups. This deletes all backup data no longer needed
|
||||||
# from more than 31 days ago. Must do this before destroying the
|
# from more than 3 days ago.
|
||||||
# cache directory or else this command will re-create it.
|
|
||||||
shell('check_call', [
|
shell('check_call', [
|
||||||
"/usr/bin/duplicity",
|
"/usr/bin/duplicity",
|
||||||
"remove-older-than",
|
"remove-older-than",
|
||||||
"%dD" % keep_backups_for_days,
|
"%dD" % keep_backups_for_days,
|
||||||
"--archive-dir", "/tmp/duplicity-archive-dir",
|
"--archive-dir", backup_cache_dir,
|
||||||
"--name", "mailinabox",
|
|
||||||
"--force",
|
"--force",
|
||||||
"--verbosity", "warning",
|
"file://" + backup_dir
|
||||||
"file://" + backup_duplicity_dir
|
],
|
||||||
])
|
env_with_passphrase)
|
||||||
|
|
||||||
# Remove duplicity's cache directory because it's redundant with our backup directory.
|
# From duplicity's manual:
|
||||||
shutil.rmtree("/tmp/duplicity-archive-dir")
|
# "This should only be necessary after a duplicity session fails or is
|
||||||
|
# aborted prematurely."
|
||||||
|
# That may be unlikely here but we may as well ensure we tidy up if
|
||||||
|
# that does happen - it might just have been a poorly timed reboot.
|
||||||
|
shell('check_call', [
|
||||||
|
"/usr/bin/duplicity",
|
||||||
|
"cleanup",
|
||||||
|
"--archive-dir", backup_cache_dir,
|
||||||
|
"--force",
|
||||||
|
"file://" + backup_dir
|
||||||
|
],
|
||||||
|
env_with_passphrase)
|
||||||
|
|
||||||
# Encrypt all of the new files.
|
# Change ownership of backups to the user-data user, so that the after-bcakup
|
||||||
backup_encrypted_dir = os.path.join(backup_dir, 'encrypted')
|
# script can access them.
|
||||||
os.makedirs(backup_encrypted_dir, exist_ok=True)
|
shell('check_call', ["/bin/chown", "-R", env["STORAGE_USER"], backup_dir])
|
||||||
for fn in os.listdir(backup_duplicity_dir):
|
|
||||||
fn2 = os.path.join(backup_encrypted_dir, fn) + ".enc"
|
|
||||||
if os.path.exists(fn2): continue
|
|
||||||
|
|
||||||
# Encrypt the backup using the backup private key.
|
|
||||||
shell('check_call', [
|
|
||||||
"/usr/bin/openssl",
|
|
||||||
"enc",
|
|
||||||
"-aes-256-cbc",
|
|
||||||
"-a",
|
|
||||||
"-salt",
|
|
||||||
"-in", os.path.join(backup_duplicity_dir, fn),
|
|
||||||
"-out", fn2,
|
|
||||||
"-pass", "file:%s" % os.path.join(backup_dir, "secret_key.txt"),
|
|
||||||
])
|
|
||||||
|
|
||||||
# The backup can be decrypted with:
|
|
||||||
# openssl enc -d -aes-256-cbc -a -in latest.tgz.enc -out /dev/stdout -pass file:secret_key.txt | tar -z
|
|
||||||
|
|
||||||
# Remove encrypted backups that are no longer needed.
|
|
||||||
for fn in os.listdir(backup_encrypted_dir):
|
|
||||||
fn2 = os.path.join(backup_duplicity_dir, fn.replace(".enc", ""))
|
|
||||||
if os.path.exists(fn2): continue
|
|
||||||
os.unlink(os.path.join(backup_encrypted_dir, fn))
|
|
||||||
|
|
||||||
# Execute a post-backup script that does the copying to a remote server.
|
# Execute a post-backup script that does the copying to a remote server.
|
||||||
# Run as the STORAGE_USER user, not as root. Pass our settings in
|
# Run as the STORAGE_USER user, not as root. Pass our settings in
|
||||||
# environment variables so the script has access to STORAGE_ROOT.
|
# environment variables so the script has access to STORAGE_ROOT.
|
||||||
post_script = os.path.join(backup_dir, 'after-backup')
|
post_script = os.path.join(backup_root, 'after-backup')
|
||||||
if os.path.exists(post_script):
|
if os.path.exists(post_script):
|
||||||
shell('check_call',
|
shell('check_call',
|
||||||
['su', env['STORAGE_USER'], '-c', post_script],
|
['su', env['STORAGE_USER'], '-c', post_script],
|
||||||
env=env)
|
env=env)
|
||||||
|
|
||||||
|
# Our nightly cron job executes system status checks immediately after this
|
||||||
|
# backup. Since it checks that dovecot and postfix are running, block for a
|
||||||
|
# bit (maximum of 10 seconds each) to give each a chance to finish restarting
|
||||||
|
# before the status checks might catch them down. See #381.
|
||||||
|
wait_for_service(25, True, env, 10)
|
||||||
|
wait_for_service(993, True, env, 10)
|
||||||
|
|
||||||
|
def run_duplicity_verification():
|
||||||
|
env = load_environment()
|
||||||
|
backup_root = os.path.join(env["STORAGE_ROOT"], 'backup')
|
||||||
|
backup_cache_dir = os.path.join(backup_root, 'cache')
|
||||||
|
backup_dir = os.path.join(backup_root, 'encrypted')
|
||||||
|
env_with_passphrase = { "PASSPHRASE" : open(os.path.join(backup_root, 'secret_key.txt')).read() }
|
||||||
|
shell('check_call', [
|
||||||
|
"/usr/bin/duplicity",
|
||||||
|
"--verbosity", "info",
|
||||||
|
"verify",
|
||||||
|
"--compare-data",
|
||||||
|
"--archive-dir", backup_cache_dir,
|
||||||
|
"--exclude", backup_root,
|
||||||
|
"file://" + backup_dir,
|
||||||
|
env["STORAGE_ROOT"],
|
||||||
|
], env_with_passphrase)
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
import sys
|
import sys
|
||||||
full_backup = "--full" in sys.argv
|
if sys.argv[-1] == "--verify":
|
||||||
perform_backup(full_backup)
|
# Run duplicity's verification command to check a) the backup files
|
||||||
|
# are readable, and b) report if they are up to date.
|
||||||
|
run_duplicity_verification()
|
||||||
|
else:
|
||||||
|
# Perform a backup. Add --full to force a full backup rather than
|
||||||
|
# possibly performing an incremental backup.
|
||||||
|
full_backup = "--full" in sys.argv
|
||||||
|
perform_backup(full_backup)
|
||||||
|
|||||||
@@ -1,155 +0,0 @@
|
|||||||
#!/usr/bin/python3
|
|
||||||
|
|
||||||
# Helps you purchase a SSL certificate from Gandi.net using
|
|
||||||
# their API.
|
|
||||||
#
|
|
||||||
# Before you begin:
|
|
||||||
# 1) Create an account on Gandi.net.
|
|
||||||
# 2) Pre-pay $16 into your account at https://www.gandi.net/prepaid/operations. Wait until the payment goes through.
|
|
||||||
# 3) Activate your API key first on the test platform (wait a while, refresh the page) and then activate the production API at https://www.gandi.net/admin/api_key.
|
|
||||||
|
|
||||||
import sys, re, os.path, urllib.request
|
|
||||||
import xmlrpc.client
|
|
||||||
import rtyaml
|
|
||||||
|
|
||||||
from utils import load_environment, shell
|
|
||||||
from web_update import get_web_domains, get_domain_ssl_files, get_web_root
|
|
||||||
from status_checks import check_certificate
|
|
||||||
|
|
||||||
def buy_ssl_certificate(api_key, domain, command, env):
|
|
||||||
if domain != env['PRIMARY_HOSTNAME'] \
|
|
||||||
and domain not in get_web_domains(env):
|
|
||||||
raise ValueError("Domain is not %s or a domain we're serving a website for." % env['PRIMARY_HOSTNAME'])
|
|
||||||
|
|
||||||
# Initialize.
|
|
||||||
|
|
||||||
gandi = xmlrpc.client.ServerProxy('https://rpc.gandi.net/xmlrpc/')
|
|
||||||
|
|
||||||
try:
|
|
||||||
existing_certs = gandi.cert.list(api_key)
|
|
||||||
except Exception as e:
|
|
||||||
if "Invalid API key" in str(e):
|
|
||||||
print("Invalid API key. Check that you copied the API Key correctly from https://www.gandi.net/admin/api_key.")
|
|
||||||
sys.exit(1)
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
|
|
||||||
# Where is the SSL cert stored?
|
|
||||||
|
|
||||||
ssl_key, ssl_certificate = get_domain_ssl_files(domain, env)
|
|
||||||
|
|
||||||
# Have we already created a cert for this domain?
|
|
||||||
|
|
||||||
for cert in existing_certs:
|
|
||||||
if cert['cn'] == domain:
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
# No existing cert found. Purchase one.
|
|
||||||
if command != 'purchase':
|
|
||||||
print("No certificate or order found yet. If you haven't yet purchased a certificate, run ths script again with the 'purchase' command. Otherwise wait a moment and try again.")
|
|
||||||
sys.exit(1)
|
|
||||||
else:
|
|
||||||
# Start an order for a single standard SSL certificate.
|
|
||||||
# Use DNS validation. Web-based validation won't work because they
|
|
||||||
# require a file on HTTP but not HTTPS w/o redirects and we don't
|
|
||||||
# serve anything plainly over HTTP. Email might be another way but
|
|
||||||
# DNS is easier to automate.
|
|
||||||
op = gandi.cert.create(api_key, {
|
|
||||||
"csr": open(ssl_csr_path).read(),
|
|
||||||
"dcv_method": "dns",
|
|
||||||
"duration": 1, # year?
|
|
||||||
"package": "cert_std_1_0_0",
|
|
||||||
})
|
|
||||||
print("An SSL certificate has been ordered.")
|
|
||||||
print()
|
|
||||||
print(op)
|
|
||||||
print()
|
|
||||||
print("In a moment please run this script again with the 'setup' command.")
|
|
||||||
|
|
||||||
if cert['status'] == 'pending':
|
|
||||||
# Get the information we need to update our DNS with a code so that
|
|
||||||
# Gandi can verify that we own the domain.
|
|
||||||
|
|
||||||
dcv = gandi.cert.get_dcv_params(api_key, {
|
|
||||||
"csr": open(ssl_csr_path).read(),
|
|
||||||
"cert_id": cert['id'],
|
|
||||||
"dcv_method": "dns",
|
|
||||||
"duration": 1, # year?
|
|
||||||
"package": "cert_std_1_0_0",
|
|
||||||
})
|
|
||||||
if dcv["dcv_method"] != "dns":
|
|
||||||
raise Exception("Certificate ordered with an unknown validation method.")
|
|
||||||
|
|
||||||
# Update our DNS data.
|
|
||||||
|
|
||||||
dns_config = env['STORAGE_ROOT'] + '/dns/custom.yaml'
|
|
||||||
if os.path.exists(dns_config):
|
|
||||||
dns_records = rtyaml.load(open(dns_config))
|
|
||||||
else:
|
|
||||||
dns_records = { }
|
|
||||||
|
|
||||||
qname = dcv['md5'] + '.' + domain
|
|
||||||
value = dcv['sha1'] + '.comodoca.com.'
|
|
||||||
dns_records[qname] = { "CNAME": value }
|
|
||||||
|
|
||||||
with open(dns_config, 'w') as f:
|
|
||||||
f.write(rtyaml.dump(dns_records))
|
|
||||||
|
|
||||||
shell('check_call', ['tools/dns_update'])
|
|
||||||
|
|
||||||
# Okay, done with this step.
|
|
||||||
|
|
||||||
print("DNS has been updated. Gandi will check within 60 minutes.")
|
|
||||||
print()
|
|
||||||
print("See https://www.gandi.net/admin/ssl/%d/details for the status of this order." % cert['id'])
|
|
||||||
|
|
||||||
elif cert['status'] == 'valid':
|
|
||||||
# The certificate is ready.
|
|
||||||
|
|
||||||
# Check before we overwrite something we shouldn't.
|
|
||||||
if os.path.exists(ssl_certificate):
|
|
||||||
cert_status, cert_status_details = check_certificate(None, ssl_certificate, None)
|
|
||||||
if cert_status != "SELF-SIGNED":
|
|
||||||
print("Please back up and delete the file %s so I can save your new certificate." % ssl_certificate)
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
# Form the certificate.
|
|
||||||
|
|
||||||
# The certificate comes as a long base64-encoded string. Break in
|
|
||||||
# into lines in the usual way.
|
|
||||||
pem = "-----BEGIN CERTIFICATE-----\n"
|
|
||||||
pem += "\n".join(chunk for chunk in re.split(r"(.{64})", cert['cert']) if chunk != "")
|
|
||||||
pem += "\n-----END CERTIFICATE-----\n\n"
|
|
||||||
|
|
||||||
# Append intermediary certificates.
|
|
||||||
pem += urllib.request.urlopen("https://www.gandi.net/static/CAs/GandiStandardSSLCA.pem").read().decode("ascii")
|
|
||||||
|
|
||||||
# Write out.
|
|
||||||
|
|
||||||
with open(ssl_certificate, "w") as f:
|
|
||||||
f.write(pem)
|
|
||||||
|
|
||||||
print("The certificate has been installed in %s. Restarting services..." % ssl_certificate)
|
|
||||||
|
|
||||||
# Restart dovecot and if this is for PRIMARY_HOSTNAME.
|
|
||||||
|
|
||||||
if domain == env['PRIMARY_HOSTNAME']:
|
|
||||||
shell('check_call', ["/usr/sbin/service", "dovecot", "restart"])
|
|
||||||
shell('check_call', ["/usr/sbin/service", "postfix", "restart"])
|
|
||||||
|
|
||||||
# Restart nginx in all cases.
|
|
||||||
|
|
||||||
shell('check_call', ["/usr/sbin/service", "nginx", "restart"])
|
|
||||||
|
|
||||||
else:
|
|
||||||
print("The certificate has an unknown status. Please check https://www.gandi.net/admin/ssl/%d/details for the status of this order." % cert['id'])
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
if len(sys.argv) < 4:
|
|
||||||
print("Usage: python management/buy_certificate.py gandi_api_key domain_name {purchase, setup}")
|
|
||||||
sys.exit(1)
|
|
||||||
api_key = sys.argv[1]
|
|
||||||
domain_name = sys.argv[2]
|
|
||||||
cmd = sys.argv[3]
|
|
||||||
buy_ssl_certificate(api_key, domain_name, cmd, load_environment())
|
|
||||||
|
|
||||||
@@ -4,13 +4,19 @@ import os, os.path, re, json
|
|||||||
|
|
||||||
from functools import wraps
|
from functools import wraps
|
||||||
|
|
||||||
from flask import Flask, request, render_template, abort, Response
|
from flask import Flask, request, render_template, abort, Response, send_from_directory
|
||||||
|
|
||||||
import auth, utils
|
import auth, utils
|
||||||
from mailconfig import get_mail_users, get_mail_users_ex, get_admins, add_mail_user, set_mail_password, remove_mail_user
|
from mailconfig import get_mail_users, get_mail_users_ex, get_admins, add_mail_user, set_mail_password, remove_mail_user
|
||||||
from mailconfig import get_mail_user_privileges, add_remove_mail_user_privilege
|
from mailconfig import get_mail_user_privileges, add_remove_mail_user_privilege
|
||||||
from mailconfig import get_mail_aliases, get_mail_aliases_ex, get_mail_domains, add_mail_alias, remove_mail_alias
|
from mailconfig import get_mail_aliases, get_mail_aliases_ex, get_mail_domains, add_mail_alias, remove_mail_alias
|
||||||
|
|
||||||
|
# Create a worker pool for the status checks. The pool should
|
||||||
|
# live across http requests so we don't baloon the system with
|
||||||
|
# processes.
|
||||||
|
import multiprocessing.pool
|
||||||
|
pool = multiprocessing.pool.Pool(processes=10)
|
||||||
|
|
||||||
env = utils.load_environment()
|
env = utils.load_environment()
|
||||||
|
|
||||||
auth_service = auth.KeyAuthService()
|
auth_service = auth.KeyAuthService()
|
||||||
@@ -74,7 +80,7 @@ def unauthorized(error):
|
|||||||
return auth_service.make_unauthorized_response()
|
return auth_service.make_unauthorized_response()
|
||||||
|
|
||||||
def json_response(data):
|
def json_response(data):
|
||||||
return Response(json.dumps(data), status=200, mimetype='application/json')
|
return Response(json.dumps(data, indent=2, sort_keys=True)+'\n', status=200, mimetype='application/json')
|
||||||
|
|
||||||
###################################
|
###################################
|
||||||
|
|
||||||
@@ -84,10 +90,12 @@ def json_response(data):
|
|||||||
def index():
|
def index():
|
||||||
# Render the control panel. This route does not require user authentication
|
# Render the control panel. This route does not require user authentication
|
||||||
# so it must be safe!
|
# so it must be safe!
|
||||||
|
no_users_exist = (len(get_mail_users(env)) == 0)
|
||||||
no_admins_exist = (len(get_admins(env)) == 0)
|
no_admins_exist = (len(get_admins(env)) == 0)
|
||||||
return render_template('index.html',
|
return render_template('index.html',
|
||||||
hostname=env['PRIMARY_HOSTNAME'],
|
hostname=env['PRIMARY_HOSTNAME'],
|
||||||
storage_root=env['STORAGE_ROOT'],
|
storage_root=env['STORAGE_ROOT'],
|
||||||
|
no_users_exist=no_users_exist,
|
||||||
no_admins_exist=no_admins_exist,
|
no_admins_exist=no_admins_exist,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -110,7 +118,7 @@ def me():
|
|||||||
|
|
||||||
# Is authorized as admin? Return an API key for future use.
|
# Is authorized as admin? Return an API key for future use.
|
||||||
if "admin" in privs:
|
if "admin" in privs:
|
||||||
resp["api_key"] = auth_service.create_user_key(email)
|
resp["api_key"] = auth_service.create_user_key(email, env)
|
||||||
|
|
||||||
# Return.
|
# Return.
|
||||||
return json_response(resp)
|
return json_response(resp)
|
||||||
@@ -213,8 +221,8 @@ def dns_update():
|
|||||||
@app.route('/dns/secondary-nameserver')
|
@app.route('/dns/secondary-nameserver')
|
||||||
@authorized_personnel_only
|
@authorized_personnel_only
|
||||||
def dns_get_secondary_nameserver():
|
def dns_get_secondary_nameserver():
|
||||||
from dns_update import get_custom_dns_config
|
from dns_update import get_custom_dns_config, get_secondary_dns
|
||||||
return json_response({ "hostname": get_custom_dns_config(env).get("_secondary_nameserver") })
|
return json_response({ "hostname": get_secondary_dns(get_custom_dns_config(env)) })
|
||||||
|
|
||||||
@app.route('/dns/secondary-nameserver', methods=['POST'])
|
@app.route('/dns/secondary-nameserver', methods=['POST'])
|
||||||
@authorized_personnel_only
|
@authorized_personnel_only
|
||||||
@@ -225,38 +233,70 @@ def dns_set_secondary_nameserver():
|
|||||||
except ValueError as e:
|
except ValueError as e:
|
||||||
return (str(e), 400)
|
return (str(e), 400)
|
||||||
|
|
||||||
@app.route('/dns/set')
|
@app.route('/dns/custom')
|
||||||
@authorized_personnel_only
|
@authorized_personnel_only
|
||||||
def dns_get_records():
|
def dns_get_records(qname=None, rtype=None):
|
||||||
from dns_update import get_custom_dns_config, get_custom_records
|
from dns_update import get_custom_dns_config
|
||||||
additional_records = get_custom_dns_config(env)
|
return json_response([
|
||||||
records = get_custom_records(None, additional_records, env)
|
{
|
||||||
return json_response([{
|
|
||||||
"qname": r[0],
|
"qname": r[0],
|
||||||
"rtype": r[1],
|
"rtype": r[1],
|
||||||
"value": r[2],
|
"value": r[2],
|
||||||
} for r in records])
|
}
|
||||||
|
for r in get_custom_dns_config(env)
|
||||||
|
if r[0] != "_secondary_nameserver"
|
||||||
|
and (not qname or r[0] == qname)
|
||||||
|
and (not rtype or r[1] == rtype) ])
|
||||||
|
|
||||||
@app.route('/dns/set/<qname>', methods=['POST'])
|
@app.route('/dns/custom/<qname>', methods=['GET', 'POST', 'PUT', 'DELETE'])
|
||||||
@app.route('/dns/set/<qname>/<rtype>', methods=['POST'])
|
@app.route('/dns/custom/<qname>/<rtype>', methods=['GET', 'POST', 'PUT', 'DELETE'])
|
||||||
@app.route('/dns/set/<qname>/<rtype>/<value>', methods=['POST'])
|
|
||||||
@authorized_personnel_only
|
@authorized_personnel_only
|
||||||
def dns_set_record(qname, rtype="A", value=None):
|
def dns_set_record(qname, rtype="A"):
|
||||||
from dns_update import do_dns_update, set_custom_dns_record
|
from dns_update import do_dns_update, set_custom_dns_record
|
||||||
try:
|
try:
|
||||||
# Get the value from the URL, then the POST parameters, or if it is not set then
|
# Normalize.
|
||||||
# use the remote IP address of the request --- makes dynamic DNS easy. To clear a
|
rtype = rtype.upper()
|
||||||
# value, '' must be explicitly passed.
|
|
||||||
if value is None:
|
# Read the record value from the request BODY, which must be
|
||||||
value = request.form.get("value")
|
# ASCII-only. Not used with GET.
|
||||||
if value is None:
|
value = request.stream.read().decode("ascii", "ignore").strip()
|
||||||
value = request.environ.get("HTTP_X_FORWARDED_FOR") # normally REMOTE_ADDR but we're behind nginx as a reverse proxy
|
|
||||||
if value == '' or value == '__delete__':
|
if request.method == "GET":
|
||||||
# request deletion
|
# Get the existing records matching the qname and rtype.
|
||||||
value = None
|
return dns_get_records(qname, rtype)
|
||||||
if set_custom_dns_record(qname, rtype, value, env):
|
|
||||||
return do_dns_update(env)
|
elif request.method in ("POST", "PUT"):
|
||||||
|
# There is a default value for A/AAAA records.
|
||||||
|
if rtype in ("A", "AAAA") and value == "":
|
||||||
|
value = request.environ.get("HTTP_X_FORWARDED_FOR") # normally REMOTE_ADDR but we're behind nginx as a reverse proxy
|
||||||
|
|
||||||
|
# Cannot add empty records.
|
||||||
|
if value == '':
|
||||||
|
return ("No value for the record provided.", 400)
|
||||||
|
|
||||||
|
if request.method == "POST":
|
||||||
|
# Add a new record (in addition to any existing records
|
||||||
|
# for this qname-rtype pair).
|
||||||
|
action = "add"
|
||||||
|
elif request.method == "PUT":
|
||||||
|
# In REST, PUT is supposed to be idempotent, so we'll
|
||||||
|
# make this action set (replace all records for this
|
||||||
|
# qname-rtype pair) rather than add (add a new record).
|
||||||
|
action = "set"
|
||||||
|
|
||||||
|
elif request.method == "DELETE":
|
||||||
|
if value == '':
|
||||||
|
# Delete all records for this qname-type pair.
|
||||||
|
value = None
|
||||||
|
else:
|
||||||
|
# Delete just the qname-rtype-value record exactly.
|
||||||
|
pass
|
||||||
|
action = "remove"
|
||||||
|
|
||||||
|
if set_custom_dns_record(qname, rtype, value, action, env):
|
||||||
|
return do_dns_update(env) or "Something isn't right."
|
||||||
return "OK"
|
return "OK"
|
||||||
|
|
||||||
except ValueError as e:
|
except ValueError as e:
|
||||||
return (str(e), 400)
|
return (str(e), 400)
|
||||||
|
|
||||||
@@ -272,7 +312,7 @@ def dns_get_dump():
|
|||||||
@authorized_personnel_only
|
@authorized_personnel_only
|
||||||
def ssl_get_csr(domain):
|
def ssl_get_csr(domain):
|
||||||
from web_update import get_domain_ssl_files, create_csr
|
from web_update import get_domain_ssl_files, create_csr
|
||||||
ssl_key, ssl_certificate = get_domain_ssl_files(domain, env)
|
ssl_key, ssl_certificate, ssl_via = get_domain_ssl_files(domain, env)
|
||||||
return create_csr(domain, ssl_key, env)
|
return create_csr(domain, ssl_key, env)
|
||||||
|
|
||||||
@app.route('/ssl/install', methods=['POST'])
|
@app.route('/ssl/install', methods=['POST'])
|
||||||
@@ -300,6 +340,24 @@ def web_update():
|
|||||||
|
|
||||||
# System
|
# System
|
||||||
|
|
||||||
|
@app.route('/system/version', methods=["GET"])
|
||||||
|
@authorized_personnel_only
|
||||||
|
def system_version():
|
||||||
|
from status_checks import what_version_is_this
|
||||||
|
try:
|
||||||
|
return what_version_is_this(env)
|
||||||
|
except Exception as e:
|
||||||
|
return (str(e), 500)
|
||||||
|
|
||||||
|
@app.route('/system/latest-upstream-version', methods=["POST"])
|
||||||
|
@authorized_personnel_only
|
||||||
|
def system_latest_upstream_version():
|
||||||
|
from status_checks import get_latest_miab_version
|
||||||
|
try:
|
||||||
|
return get_latest_miab_version()
|
||||||
|
except Exception as e:
|
||||||
|
return (str(e), 500)
|
||||||
|
|
||||||
@app.route('/system/status', methods=["POST"])
|
@app.route('/system/status', methods=["POST"])
|
||||||
@authorized_personnel_only
|
@authorized_personnel_only
|
||||||
def system_status():
|
def system_status():
|
||||||
@@ -318,7 +376,7 @@ def system_status():
|
|||||||
def print_line(self, message, monospace=False):
|
def print_line(self, message, monospace=False):
|
||||||
self.items[-1]["extra"].append({ "text": message, "monospace": monospace })
|
self.items[-1]["extra"].append({ "text": message, "monospace": monospace })
|
||||||
output = WebOutput()
|
output = WebOutput()
|
||||||
run_checks(env, output)
|
run_checks(False, env, output, pool)
|
||||||
return json_response(output.items)
|
return json_response(output.items)
|
||||||
|
|
||||||
@app.route('/system/updates')
|
@app.route('/system/updates')
|
||||||
@@ -344,6 +402,17 @@ def backup_status():
|
|||||||
from backup import backup_status
|
from backup import backup_status
|
||||||
return json_response(backup_status(env))
|
return json_response(backup_status(env))
|
||||||
|
|
||||||
|
# MUNIN
|
||||||
|
|
||||||
|
@app.route('/munin/')
|
||||||
|
@app.route('/munin/<path:filename>')
|
||||||
|
@authorized_personnel_only
|
||||||
|
def munin(filename=""):
|
||||||
|
# Checks administrative access (@authorized_personnel_only) and then just proxies
|
||||||
|
# the request to static files.
|
||||||
|
if filename == "": filename = "index.html"
|
||||||
|
return send_from_directory("/var/cache/munin/www", filename)
|
||||||
|
|
||||||
# APP
|
# APP
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
# and mail aliases and restarts nsd.
|
# and mail aliases and restarts nsd.
|
||||||
########################################################################
|
########################################################################
|
||||||
|
|
||||||
import os, os.path, urllib.parse, datetime, re, hashlib, base64
|
import sys, os, os.path, urllib.parse, datetime, re, hashlib, base64
|
||||||
import ipaddress
|
import ipaddress
|
||||||
import rtyaml
|
import rtyaml
|
||||||
import dns.resolver
|
import dns.resolver
|
||||||
@@ -24,7 +24,7 @@ def get_dns_zones(env):
|
|||||||
# What domains should we create DNS zones for? Never create a zone for
|
# What domains should we create DNS zones for? Never create a zone for
|
||||||
# a domain & a subdomain of that domain.
|
# a domain & a subdomain of that domain.
|
||||||
domains = get_dns_domains(env)
|
domains = get_dns_domains(env)
|
||||||
|
|
||||||
# Exclude domains that are subdomains of other domains we know. Proceed
|
# Exclude domains that are subdomains of other domains we know. Proceed
|
||||||
# by looking at shorter domains first.
|
# by looking at shorter domains first.
|
||||||
zone_domains = set()
|
zone_domains = set()
|
||||||
@@ -49,17 +49,6 @@ def get_dns_zones(env):
|
|||||||
zonefiles.sort(key = lambda zone : zone_order.index(zone[0]) )
|
zonefiles.sort(key = lambda zone : zone_order.index(zone[0]) )
|
||||||
|
|
||||||
return zonefiles
|
return zonefiles
|
||||||
|
|
||||||
def get_custom_dns_config(env):
|
|
||||||
try:
|
|
||||||
return rtyaml.load(open(os.path.join(env['STORAGE_ROOT'], 'dns/custom.yaml')))
|
|
||||||
except:
|
|
||||||
return { }
|
|
||||||
|
|
||||||
def write_custom_dns_config(config, env):
|
|
||||||
config_yaml = rtyaml.dump(config)
|
|
||||||
with open(os.path.join(env['STORAGE_ROOT'], 'dns/custom.yaml'), "w") as f:
|
|
||||||
f.write(config_yaml)
|
|
||||||
|
|
||||||
def do_dns_update(env, force=False):
|
def do_dns_update(env, force=False):
|
||||||
# What domains (and their zone filenames) should we build?
|
# What domains (and their zone filenames) should we build?
|
||||||
@@ -67,14 +56,16 @@ def do_dns_update(env, force=False):
|
|||||||
zonefiles = get_dns_zones(env)
|
zonefiles = get_dns_zones(env)
|
||||||
|
|
||||||
# Custom records to add to zones.
|
# Custom records to add to zones.
|
||||||
additional_records = get_custom_dns_config(env)
|
additional_records = list(get_custom_dns_config(env))
|
||||||
|
from web_update import get_default_www_redirects
|
||||||
|
www_redirect_domains = get_default_www_redirects(env)
|
||||||
|
|
||||||
# Write zone files.
|
# Write zone files.
|
||||||
os.makedirs('/etc/nsd/zones', exist_ok=True)
|
os.makedirs('/etc/nsd/zones', exist_ok=True)
|
||||||
updated_domains = []
|
updated_domains = []
|
||||||
for i, (domain, zonefile) in enumerate(zonefiles):
|
for i, (domain, zonefile) in enumerate(zonefiles):
|
||||||
# Build the records to put in the zone.
|
# Build the records to put in the zone.
|
||||||
records = build_zone(domain, domains, additional_records, env)
|
records = build_zone(domain, domains, additional_records, www_redirect_domains, env)
|
||||||
|
|
||||||
# See if the zone has changed, and if so update the serial number
|
# See if the zone has changed, and if so update the serial number
|
||||||
# and write the zone file.
|
# and write the zone file.
|
||||||
@@ -122,7 +113,7 @@ def do_dns_update(env, force=False):
|
|||||||
shell('check_call', ["/usr/sbin/service", "nsd", "restart"])
|
shell('check_call', ["/usr/sbin/service", "nsd", "restart"])
|
||||||
|
|
||||||
# Write the OpenDKIM configuration tables.
|
# Write the OpenDKIM configuration tables.
|
||||||
if write_opendkim_tables(zonefiles, env):
|
if write_opendkim_tables(domains, env):
|
||||||
# Settings changed. Kick opendkim.
|
# Settings changed. Kick opendkim.
|
||||||
shell('check_call', ["/usr/sbin/service", "opendkim", "restart"])
|
shell('check_call', ["/usr/sbin/service", "opendkim", "restart"])
|
||||||
if len(updated_domains) == 0:
|
if len(updated_domains) == 0:
|
||||||
@@ -137,7 +128,7 @@ def do_dns_update(env, force=False):
|
|||||||
|
|
||||||
########################################################################
|
########################################################################
|
||||||
|
|
||||||
def build_zone(domain, all_domains, additional_records, env, is_zone=True):
|
def build_zone(domain, all_domains, additional_records, www_redirect_domains, env, is_zone=True):
|
||||||
records = []
|
records = []
|
||||||
|
|
||||||
# For top-level zones, define the authoritative name servers.
|
# For top-level zones, define the authoritative name servers.
|
||||||
@@ -153,7 +144,7 @@ def build_zone(domain, all_domains, additional_records, env, is_zone=True):
|
|||||||
records.append((None, "NS", "ns1.%s." % env["PRIMARY_HOSTNAME"], False))
|
records.append((None, "NS", "ns1.%s." % env["PRIMARY_HOSTNAME"], False))
|
||||||
|
|
||||||
# Define ns2.PRIMARY_HOSTNAME or whatever the user overrides.
|
# Define ns2.PRIMARY_HOSTNAME or whatever the user overrides.
|
||||||
secondary_ns = additional_records.get("_secondary_nameserver", "ns2." + env["PRIMARY_HOSTNAME"])
|
secondary_ns = get_secondary_dns(additional_records) or ("ns2." + env["PRIMARY_HOSTNAME"])
|
||||||
records.append((None, "NS", secondary_ns+'.', False))
|
records.append((None, "NS", secondary_ns+'.', False))
|
||||||
|
|
||||||
|
|
||||||
@@ -183,16 +174,12 @@ def build_zone(domain, all_domains, additional_records, env, is_zone=True):
|
|||||||
# The MX record says where email for the domain should be delivered: Here!
|
# The MX record says where email for the domain should be delivered: Here!
|
||||||
records.append((None, "MX", "10 %s." % env["PRIMARY_HOSTNAME"], "Required. Specifies the hostname (and priority) of the machine that handles @%s mail." % domain))
|
records.append((None, "MX", "10 %s." % env["PRIMARY_HOSTNAME"], "Required. Specifies the hostname (and priority) of the machine that handles @%s mail." % domain))
|
||||||
|
|
||||||
# SPF record: Permit the box ('mx', see above) to send mail on behalf of
|
|
||||||
# the domain, and no one else.
|
|
||||||
records.append((None, "TXT", 'v=spf1 mx -all', "Recommended. Specifies that only the box is permitted to send @%s mail." % domain))
|
|
||||||
|
|
||||||
# Add DNS records for any subdomains of this domain. We should not have a zone for
|
# Add DNS records for any subdomains of this domain. We should not have a zone for
|
||||||
# both a domain and one of its subdomains.
|
# both a domain and one of its subdomains.
|
||||||
subdomains = [d for d in all_domains if d.endswith("." + domain)]
|
subdomains = [d for d in all_domains if d.endswith("." + domain)]
|
||||||
for subdomain in subdomains:
|
for subdomain in subdomains:
|
||||||
subdomain_qname = subdomain[0:-len("." + domain)]
|
subdomain_qname = subdomain[0:-len("." + domain)]
|
||||||
subzone = build_zone(subdomain, [], additional_records, env, is_zone=False)
|
subzone = build_zone(subdomain, [], additional_records, www_redirect_domains, env, is_zone=False)
|
||||||
for child_qname, child_rtype, child_value, child_explanation in subzone:
|
for child_qname, child_rtype, child_value, child_explanation in subzone:
|
||||||
if child_qname == None:
|
if child_qname == None:
|
||||||
child_qname = subdomain_qname
|
child_qname = subdomain_qname
|
||||||
@@ -200,50 +187,88 @@ def build_zone(domain, all_domains, additional_records, env, is_zone=True):
|
|||||||
child_qname += "." + subdomain_qname
|
child_qname += "." + subdomain_qname
|
||||||
records.append((child_qname, child_rtype, child_value, child_explanation))
|
records.append((child_qname, child_rtype, child_value, child_explanation))
|
||||||
|
|
||||||
|
has_rec_base = list(records) # clone current state
|
||||||
def has_rec(qname, rtype, prefix=None):
|
def has_rec(qname, rtype, prefix=None):
|
||||||
for rec in records:
|
for rec in has_rec_base:
|
||||||
if rec[0] == qname and rec[1] == rtype and (prefix is None or rec[2].startswith(prefix)):
|
if rec[0] == qname and rec[1] == rtype and (prefix is None or rec[2].startswith(prefix)):
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# The user may set other records that don't conflict with our settings.
|
# The user may set other records that don't conflict with our settings.
|
||||||
for qname, rtype, value in get_custom_records(domain, additional_records, env):
|
# Don't put any TXT records above this line, or it'll prevent any custom TXT records.
|
||||||
|
for qname, rtype, value in filter_custom_records(domain, additional_records):
|
||||||
|
# Don't allow custom records for record types that override anything above.
|
||||||
|
# But allow multiple custom records for the same rtype --- see how has_rec_base is used.
|
||||||
if has_rec(qname, rtype): continue
|
if has_rec(qname, rtype): continue
|
||||||
|
|
||||||
|
# The "local" keyword on A/AAAA records are short-hand for our own IP.
|
||||||
|
# This also flags for web configuration that the user wants a website here.
|
||||||
|
if rtype == "A" and value == "local":
|
||||||
|
value = env["PUBLIC_IP"]
|
||||||
|
if rtype == "AAAA" and value == "local":
|
||||||
|
if "PUBLIC_IPV6" in env:
|
||||||
|
value = env["PUBLIC_IPV6"]
|
||||||
|
else:
|
||||||
|
continue
|
||||||
records.append((qname, rtype, value, "(Set by user.)"))
|
records.append((qname, rtype, value, "(Set by user.)"))
|
||||||
|
|
||||||
# Add defaults if not overridden by the user's custom settings (and not otherwise configured).
|
# Add defaults if not overridden by the user's custom settings (and not otherwise configured).
|
||||||
# Any "CNAME" record on the qname overrides A and AAAA.
|
# Any CNAME or A record on the qname overrides A and AAAA. But when we set the default A record,
|
||||||
|
# we should not cause the default AAAA record to be skipped because it thinks a custom A record
|
||||||
|
# was set. So set has_rec_base to a clone of the current set of DNS settings, and don't update
|
||||||
|
# during this process.
|
||||||
|
has_rec_base = list(records)
|
||||||
defaults = [
|
defaults = [
|
||||||
(None, "A", env["PUBLIC_IP"], "Required. May have a different value. Sets the IP address that %s resolves to for web hosting and other services besides mail. The A record must be present but its value does not affect mail delivery." % domain),
|
(None, "A", env["PUBLIC_IP"], "Required. May have a different value. Sets the IP address that %s resolves to for web hosting and other services besides mail. The A record must be present but its value does not affect mail delivery." % domain),
|
||||||
("www", "A", env["PUBLIC_IP"], "Optional. Sets the IP address that www.%s resolves to, e.g. for web hosting." % domain),
|
|
||||||
(None, "AAAA", env.get('PUBLIC_IPV6'), "Optional. Sets the IPv6 address that %s resolves to, e.g. for web hosting. (It is not necessary for receiving mail on this domain.)" % domain),
|
(None, "AAAA", env.get('PUBLIC_IPV6'), "Optional. Sets the IPv6 address that %s resolves to, e.g. for web hosting. (It is not necessary for receiving mail on this domain.)" % domain),
|
||||||
("www", "AAAA", env.get('PUBLIC_IPV6'), "Optional. Sets the IPv6 address that www.%s resolves to, e.g. for web hosting." % domain),
|
|
||||||
]
|
]
|
||||||
|
if "www." + domain in www_redirect_domains:
|
||||||
|
defaults += [
|
||||||
|
("www", "A", env["PUBLIC_IP"], "Optional. Sets the IP address that www.%s resolves to so that the box can provide a redirect to the parent domain." % domain),
|
||||||
|
("www", "AAAA", env.get('PUBLIC_IPV6'), "Optional. Sets the IPv6 address that www.%s resolves to so that the box can provide a redirect to the parent domain." % domain),
|
||||||
|
]
|
||||||
for qname, rtype, value, explanation in defaults:
|
for qname, rtype, value, explanation in defaults:
|
||||||
if value is None or value.strip() == "": continue # skip IPV6 if not set
|
if value is None or value.strip() == "": continue # skip IPV6 if not set
|
||||||
if not is_zone and qname == "www": continue # don't create any default 'www' subdomains on what are themselves subdomains
|
if not is_zone and qname == "www": continue # don't create any default 'www' subdomains on what are themselves subdomains
|
||||||
if not has_rec(qname, rtype) and not has_rec(qname, "CNAME"):
|
# Set the default record, but not if:
|
||||||
|
# (1) there is not a user-set record of the same type already
|
||||||
|
# (2) there is not a CNAME record already, since you can't set both and who knows what takes precedence
|
||||||
|
# (2) there is not an A record already (if this is an A record this is a dup of (1), and if this is an AAAA record then don't set a default AAAA record if the user sets a custom A record, since the default wouldn't make sense and it should not resolve if the user doesn't provide a new AAAA record)
|
||||||
|
if not has_rec(qname, rtype) and not has_rec(qname, "CNAME") and not has_rec(qname, "A"):
|
||||||
records.append((qname, rtype, value, explanation))
|
records.append((qname, rtype, value, explanation))
|
||||||
|
|
||||||
|
# Don't pin the list of records that has_rec checks against anymore.
|
||||||
|
has_rec_base = records
|
||||||
|
|
||||||
|
# SPF record: Permit the box ('mx', see above) to send mail on behalf of
|
||||||
|
# the domain, and no one else.
|
||||||
|
# Skip if the user has set a custom SPF record.
|
||||||
|
if not has_rec(None, "TXT", prefix="v=spf1 "):
|
||||||
|
records.append((None, "TXT", 'v=spf1 mx -all', "Recommended. Specifies that only the box is permitted to send @%s mail." % domain))
|
||||||
|
|
||||||
# Append the DKIM TXT record to the zone as generated by OpenDKIM.
|
# Append the DKIM TXT record to the zone as generated by OpenDKIM.
|
||||||
|
# Skip if the user has set a DKIM record already.
|
||||||
opendkim_record_file = os.path.join(env['STORAGE_ROOT'], 'mail/dkim/mail.txt')
|
opendkim_record_file = os.path.join(env['STORAGE_ROOT'], 'mail/dkim/mail.txt')
|
||||||
with open(opendkim_record_file) as orf:
|
with open(opendkim_record_file) as orf:
|
||||||
m = re.match(r'(\S+)\s+IN\s+TXT\s+\( "([^"]+)"\s+"([^"]+)"\s*\)', orf.read(), re.S)
|
m = re.match(r'(\S+)\s+IN\s+TXT\s+\( ((?:"[^"]+"\s+)+)\)', orf.read(), re.S)
|
||||||
val = m.group(2) + m.group(3)
|
val = "".join(re.findall(r'"([^"]+)"', m.group(2)))
|
||||||
records.append((m.group(1), "TXT", val, "Recommended. Provides a way for recipients to verify that this machine sent @%s mail." % domain))
|
if not has_rec(m.group(1), "TXT", prefix="v=DKIM1; "):
|
||||||
|
records.append((m.group(1), "TXT", val, "Recommended. Provides a way for recipients to verify that this machine sent @%s mail." % domain))
|
||||||
|
|
||||||
# Append a DMARC record.
|
# Append a DMARC record.
|
||||||
records.append(("_dmarc", "TXT", 'v=DMARC1; p=quarantine', "Optional. Specifies that mail that does not originate from the box but claims to be from @%s is suspect and should be quarantined by the recipient's mail system." % domain))
|
# Skip if the user has set a DMARC record already.
|
||||||
|
if not has_rec("_dmarc", "TXT", prefix="v=DMARC1; "):
|
||||||
|
records.append(("_dmarc", "TXT", 'v=DMARC1; p=quarantine', "Recommended. Specifies that mail that does not originate from the box but claims to be from @%s or which does not have a valid DKIM signature is suspect and should be quarantined by the recipient's mail system." % domain))
|
||||||
|
|
||||||
# For any subdomain with an A record but no SPF or DMARC record, add strict policy records.
|
# For any subdomain with an A record but no SPF or DMARC record, add strict policy records.
|
||||||
all_resolvable_qnames = set(r[0] for r in records if r[1] in ("A", "AAAA"))
|
all_resolvable_qnames = set(r[0] for r in records if r[1] in ("A", "AAAA"))
|
||||||
for qname in all_resolvable_qnames:
|
for qname in all_resolvable_qnames:
|
||||||
if not has_rec(qname, "TXT", prefix="v=spf1 "):
|
if not has_rec(qname, "TXT", prefix="v=spf1 "):
|
||||||
records.append((qname, "TXT", 'v=spf1 a mx -all', "Prevents unauthorized use of this domain name for outbound mail by requiring outbound mail to originate from the indicated host(s)."))
|
records.append((qname, "TXT", 'v=spf1 -all', "Recommended. Prevents use of this domain name for outbound mail by specifying that no servers are valid sources for mail from @%s. If you do send email from this domain name you should either override this record such that the SPF rule does allow the originating server, or, take the recommended approach and have the box handle mail for this domain (simply add any receiving alias at this domain name to make this machine treat the domain name as one of its mail domains)." % (qname + "." + domain)))
|
||||||
dmarc_qname = "_dmarc" + ("" if qname is None else "." + qname)
|
dmarc_qname = "_dmarc" + ("" if qname is None else "." + qname)
|
||||||
if not has_rec(dmarc_qname, "TXT", prefix="v=DMARC1; "):
|
if not has_rec(dmarc_qname, "TXT", prefix="v=DMARC1; "):
|
||||||
records.append((dmarc_qname, "TXT", 'v=DMARC1; p=reject', "Prevents unauthorized use of this domain name for outbound mail by requiring a valid DKIM signature."))
|
records.append((dmarc_qname, "TXT", 'v=DMARC1; p=reject', "Recommended. Prevents use of this domain name for outbound mail by specifying that the SPF rule should be honoured for mail from @%s." % (qname + "." + domain)))
|
||||||
|
|
||||||
|
|
||||||
# Sort the records. The None records *must* go first in the nsd zone file. Otherwise it doesn't matter.
|
# Sort the records. The None records *must* go first in the nsd zone file. Otherwise it doesn't matter.
|
||||||
records.sort(key = lambda rec : list(reversed(rec[0].split(".")) if rec[0] is not None else ""))
|
records.sort(key = lambda rec : list(reversed(rec[0].split(".")) if rec[0] is not None else ""))
|
||||||
@@ -252,48 +277,6 @@ def build_zone(domain, all_domains, additional_records, env, is_zone=True):
|
|||||||
|
|
||||||
########################################################################
|
########################################################################
|
||||||
|
|
||||||
def get_custom_records(domain, additional_records, env):
|
|
||||||
for qname, value in additional_records.items():
|
|
||||||
# Is this record for the domain or one of its subdomains?
|
|
||||||
# If `domain` is None, return records for all domains.
|
|
||||||
if domain is not None and qname != domain and not qname.endswith("." + domain): continue
|
|
||||||
|
|
||||||
# Turn the fully qualified domain name in the YAML file into
|
|
||||||
# our short form (None => domain, or a relative QNAME) if
|
|
||||||
# domain is not None.
|
|
||||||
if domain is not None:
|
|
||||||
if qname == domain:
|
|
||||||
qname = None
|
|
||||||
else:
|
|
||||||
qname = qname[0:len(qname)-len("." + domain)]
|
|
||||||
|
|
||||||
# Short form. Mapping a domain name to a string is short-hand
|
|
||||||
# for creating A records.
|
|
||||||
if isinstance(value, str):
|
|
||||||
values = [("A", value)]
|
|
||||||
if value == "local" and env.get("PUBLIC_IPV6"):
|
|
||||||
values.append( ("AAAA", value) )
|
|
||||||
|
|
||||||
# A mapping creates multiple records.
|
|
||||||
elif isinstance(value, dict):
|
|
||||||
values = value.items()
|
|
||||||
|
|
||||||
# No other type of data is allowed.
|
|
||||||
else:
|
|
||||||
raise ValueError()
|
|
||||||
|
|
||||||
for rtype, value2 in values:
|
|
||||||
# The "local" keyword on A/AAAA records are short-hand for our own IP.
|
|
||||||
# This also flags for web configuration that the user wants a website here.
|
|
||||||
if rtype == "A" and value2 == "local":
|
|
||||||
value2 = env["PUBLIC_IP"]
|
|
||||||
if rtype == "AAAA" and value2 == "local":
|
|
||||||
if "PUBLIC_IPV6" not in env: continue # no IPv6 address is available so don't set anything
|
|
||||||
value2 = env["PUBLIC_IPV6"]
|
|
||||||
yield (qname, rtype, value2)
|
|
||||||
|
|
||||||
########################################################################
|
|
||||||
|
|
||||||
def build_tlsa_record(env):
|
def build_tlsa_record(env):
|
||||||
# A DANE TLSA record in DNS specifies that connections on a port
|
# A DANE TLSA record in DNS specifies that connections on a port
|
||||||
# must use TLS and the certificate must match a particular certificate.
|
# must use TLS and the certificate must match a particular certificate.
|
||||||
@@ -353,7 +336,7 @@ def build_sshfp_records():
|
|||||||
# Lots of things can go wrong. Don't let it disturb the DNS
|
# Lots of things can go wrong. Don't let it disturb the DNS
|
||||||
# zone.
|
# zone.
|
||||||
pass
|
pass
|
||||||
|
|
||||||
########################################################################
|
########################################################################
|
||||||
|
|
||||||
def write_nsd_zone(domain, zonefile, records, env, force):
|
def write_nsd_zone(domain, zonefile, records, env, force):
|
||||||
@@ -382,26 +365,24 @@ $TTL 1800 ; default time to live
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
# Replace replacement strings.
|
# Replace replacement strings.
|
||||||
zone = zone.format(domain=domain.encode("idna").decode("ascii"), primary_domain=env["PRIMARY_HOSTNAME"].encode("idna").decode("ascii"))
|
zone = zone.format(domain=domain, primary_domain=env["PRIMARY_HOSTNAME"])
|
||||||
|
|
||||||
# Add records.
|
# Add records.
|
||||||
for subdomain, querytype, value, explanation in records:
|
for subdomain, querytype, value, explanation in records:
|
||||||
if subdomain:
|
if subdomain:
|
||||||
zone += subdomain.encode("idna").decode("ascii")
|
zone += subdomain
|
||||||
zone += "\tIN\t" + querytype + "\t"
|
zone += "\tIN\t" + querytype + "\t"
|
||||||
if querytype == "TXT":
|
if querytype == "TXT":
|
||||||
# Quote and escape.
|
# Divide into 255-byte max substrings.
|
||||||
value = value.replace('\\', '\\\\') # escape backslashes
|
v2 = ""
|
||||||
value = value.replace('"', '\\"') # escape quotes
|
while len(value) > 0:
|
||||||
value = '"' + value + '"' # wrap in quotes
|
s = value[0:255]
|
||||||
elif querytype in ("NS", "CNAME"):
|
value = value[255:]
|
||||||
# These records must be IDNA-encoded.
|
s = s.replace('\\', '\\\\') # escape backslashes
|
||||||
value = value.encode("idna").decode("ascii")
|
s = s.replace('"', '\\"') # escape quotes
|
||||||
elif querytype == "MX":
|
s = '"' + s + '"' # wrap in quotes
|
||||||
# Also IDNA-encoded, but must parse first.
|
v2 += s + " "
|
||||||
priority, host = value.split(" ", 1)
|
value = v2
|
||||||
host = host.encode("idna").decode("ascii")
|
|
||||||
value = priority + " " + host
|
|
||||||
zone += value + "\n"
|
zone += value + "\n"
|
||||||
|
|
||||||
# DNSSEC requires re-signing a zone periodically. That requires
|
# DNSSEC requires re-signing a zone periodically. That requires
|
||||||
@@ -469,25 +450,9 @@ $TTL 1800 ; default time to live
|
|||||||
########################################################################
|
########################################################################
|
||||||
|
|
||||||
def write_nsd_conf(zonefiles, additional_records, env):
|
def write_nsd_conf(zonefiles, additional_records, env):
|
||||||
# Basic header.
|
# Write the list of zones to a configuration file.
|
||||||
nsdconf = """
|
nsd_conf_file = "/etc/nsd/zones.conf"
|
||||||
server:
|
nsdconf = ""
|
||||||
hide-version: yes
|
|
||||||
|
|
||||||
# identify the server (CH TXT ID.SERVER entry).
|
|
||||||
identity: ""
|
|
||||||
|
|
||||||
# The directory for zonefile: files.
|
|
||||||
zonesdir: "/etc/nsd/zones"
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Since we have bind9 listening on localhost for locally-generated
|
|
||||||
# DNS queries that require a recursive nameserver, and the system
|
|
||||||
# might have other network interfaces for e.g. tunnelling, we have
|
|
||||||
# to be specific about the network interfaces that nsd binds to.
|
|
||||||
for ipaddr in (env.get("PRIVATE_IP", "") + " " + env.get("PRIVATE_IPV6", "")).split(" "):
|
|
||||||
if ipaddr == "": continue
|
|
||||||
nsdconf += " ip-address: %s\n" % ipaddr
|
|
||||||
|
|
||||||
# Append the zones.
|
# Append the zones.
|
||||||
for domain, zonefile in zonefiles:
|
for domain, zonefile in zonefiles:
|
||||||
@@ -495,13 +460,13 @@ server:
|
|||||||
zone:
|
zone:
|
||||||
name: %s
|
name: %s
|
||||||
zonefile: %s
|
zonefile: %s
|
||||||
""" % (domain.encode("idna").decode("ascii"), zonefile)
|
""" % (domain, zonefile)
|
||||||
|
|
||||||
# If a custom secondary nameserver has been set, allow zone transfers
|
# If a custom secondary nameserver has been set, allow zone transfers
|
||||||
# and notifies to that nameserver.
|
# and notifies to that nameserver.
|
||||||
if additional_records.get("_secondary_nameserver"):
|
if get_secondary_dns(additional_records):
|
||||||
# Get the IP address of the nameserver by resolving it.
|
# Get the IP address of the nameserver by resolving it.
|
||||||
hostname = additional_records.get("_secondary_nameserver")
|
hostname = get_secondary_dns(additional_records)
|
||||||
resolver = dns.resolver.get_default_resolver()
|
resolver = dns.resolver.get_default_resolver()
|
||||||
response = dns.resolver.query(hostname+'.', "A")
|
response = dns.resolver.query(hostname+'.', "A")
|
||||||
ipaddr = str(response[0])
|
ipaddr = str(response[0])
|
||||||
@@ -509,16 +474,17 @@ zone:
|
|||||||
provide-xfr: %s NOKEY
|
provide-xfr: %s NOKEY
|
||||||
""" % (ipaddr, ipaddr)
|
""" % (ipaddr, ipaddr)
|
||||||
|
|
||||||
|
# Check if the file is changing. If it isn't changing,
|
||||||
# Check if the nsd.conf is changing. If it isn't changing,
|
|
||||||
# return False to flag that no change was made.
|
# return False to flag that no change was made.
|
||||||
with open("/etc/nsd/nsd.conf") as f:
|
if os.path.exists(nsd_conf_file):
|
||||||
if f.read() == nsdconf:
|
with open(nsd_conf_file) as f:
|
||||||
return False
|
if f.read() == nsdconf:
|
||||||
|
return False
|
||||||
|
|
||||||
with open("/etc/nsd/nsd.conf", "w") as f:
|
# Write out new contents and return True to signal that
|
||||||
|
# configuration changed.
|
||||||
|
with open(nsd_conf_file, "w") as f:
|
||||||
f.write(nsdconf)
|
f.write(nsdconf)
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
########################################################################
|
########################################################################
|
||||||
@@ -540,14 +506,11 @@ def sign_zone(domain, zonefile, env):
|
|||||||
algo = dnssec_choose_algo(domain, env)
|
algo = dnssec_choose_algo(domain, env)
|
||||||
dnssec_keys = load_env_vars_from_file(os.path.join(env['STORAGE_ROOT'], 'dns/dnssec/%s.conf' % algo))
|
dnssec_keys = load_env_vars_from_file(os.path.join(env['STORAGE_ROOT'], 'dns/dnssec/%s.conf' % algo))
|
||||||
|
|
||||||
# From here, use the IDNA encoding of the domain name.
|
|
||||||
domain = domain.encode("idna").decode("ascii")
|
|
||||||
|
|
||||||
# In order to use the same keys for all domains, we have to generate
|
# In order to use the same keys for all domains, we have to generate
|
||||||
# a new .key file with a DNSSEC record for the specific domain. We
|
# a new .key file with a DNSSEC record for the specific domain. We
|
||||||
# can reuse the same key, but it won't validate without a DNSSEC
|
# can reuse the same key, but it won't validate without a DNSSEC
|
||||||
# record specifically for the domain.
|
# record specifically for the domain.
|
||||||
#
|
#
|
||||||
# Copy the .key and .private files to /tmp to patch them up.
|
# Copy the .key and .private files to /tmp to patch them up.
|
||||||
#
|
#
|
||||||
# Use os.umask and open().write() to securely create a copy that only
|
# Use os.umask and open().write() to securely create a copy that only
|
||||||
@@ -612,8 +575,9 @@ def sign_zone(domain, zonefile, env):
|
|||||||
|
|
||||||
########################################################################
|
########################################################################
|
||||||
|
|
||||||
def write_opendkim_tables(zonefiles, env):
|
def write_opendkim_tables(domains, env):
|
||||||
# Append a record to OpenDKIM's KeyTable and SigningTable for each domain.
|
# Append a record to OpenDKIM's KeyTable and SigningTable for each domain
|
||||||
|
# that we send mail from (zones and all subdomains).
|
||||||
|
|
||||||
opendkim_key_file = os.path.join(env['STORAGE_ROOT'], 'mail/dkim/mail.private')
|
opendkim_key_file = os.path.join(env['STORAGE_ROOT'], 'mail/dkim/mail.private')
|
||||||
|
|
||||||
@@ -632,7 +596,7 @@ def write_opendkim_tables(zonefiles, env):
|
|||||||
"SigningTable":
|
"SigningTable":
|
||||||
"".join(
|
"".join(
|
||||||
"*@{domain} {domain}\n".format(domain=domain)
|
"*@{domain} {domain}\n".format(domain=domain)
|
||||||
for domain, zonefile in zonefiles
|
for domain in domains
|
||||||
),
|
),
|
||||||
|
|
||||||
# The KeyTable specifies the signing domain, the DKIM selector, and the
|
# The KeyTable specifies the signing domain, the DKIM selector, and the
|
||||||
@@ -641,7 +605,7 @@ def write_opendkim_tables(zonefiles, env):
|
|||||||
"KeyTable":
|
"KeyTable":
|
||||||
"".join(
|
"".join(
|
||||||
"{domain} {domain}:mail:{key_file}\n".format(domain=domain, key_file=opendkim_key_file)
|
"{domain} {domain}:mail:{key_file}\n".format(domain=domain, key_file=opendkim_key_file)
|
||||||
for domain, zonefile in zonefiles
|
for domain in domains
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -664,7 +628,94 @@ def write_opendkim_tables(zonefiles, env):
|
|||||||
|
|
||||||
########################################################################
|
########################################################################
|
||||||
|
|
||||||
def set_custom_dns_record(qname, rtype, value, env):
|
def get_custom_dns_config(env):
|
||||||
|
try:
|
||||||
|
custom_dns = rtyaml.load(open(os.path.join(env['STORAGE_ROOT'], 'dns/custom.yaml')))
|
||||||
|
if not isinstance(custom_dns, dict): raise ValueError() # caught below
|
||||||
|
except:
|
||||||
|
return [ ]
|
||||||
|
|
||||||
|
for qname, value in custom_dns.items():
|
||||||
|
# Short form. Mapping a domain name to a string is short-hand
|
||||||
|
# for creating A records.
|
||||||
|
if isinstance(value, str):
|
||||||
|
values = [("A", value)]
|
||||||
|
|
||||||
|
# A mapping creates multiple records.
|
||||||
|
elif isinstance(value, dict):
|
||||||
|
values = value.items()
|
||||||
|
|
||||||
|
# No other type of data is allowed.
|
||||||
|
else:
|
||||||
|
raise ValueError()
|
||||||
|
|
||||||
|
for rtype, value2 in values:
|
||||||
|
if isinstance(value2, str):
|
||||||
|
yield (qname, rtype, value2)
|
||||||
|
elif isinstance(value2, list):
|
||||||
|
for value3 in value2:
|
||||||
|
yield (qname, rtype, value3)
|
||||||
|
# No other type of data is allowed.
|
||||||
|
else:
|
||||||
|
raise ValueError()
|
||||||
|
|
||||||
|
def filter_custom_records(domain, custom_dns_iter):
|
||||||
|
for qname, rtype, value in custom_dns_iter:
|
||||||
|
# We don't count the secondary nameserver config (if present) as a record - that would just be
|
||||||
|
# confusing to users. Instead it is accessed/manipulated directly via (get/set)_custom_dns_config.
|
||||||
|
if qname == "_secondary_nameserver": continue
|
||||||
|
|
||||||
|
# Is this record for the domain or one of its subdomains?
|
||||||
|
# If `domain` is None, return records for all domains.
|
||||||
|
if domain is not None and qname != domain and not qname.endswith("." + domain): continue
|
||||||
|
|
||||||
|
# Turn the fully qualified domain name in the YAML file into
|
||||||
|
# our short form (None => domain, or a relative QNAME) if
|
||||||
|
# domain is not None.
|
||||||
|
if domain is not None:
|
||||||
|
if qname == domain:
|
||||||
|
qname = None
|
||||||
|
else:
|
||||||
|
qname = qname[0:len(qname)-len("." + domain)]
|
||||||
|
|
||||||
|
yield (qname, rtype, value)
|
||||||
|
|
||||||
|
def write_custom_dns_config(config, env):
|
||||||
|
# We get a list of (qname, rtype, value) triples. Convert this into a
|
||||||
|
# nice dictionary format for storage on disk.
|
||||||
|
from collections import OrderedDict
|
||||||
|
config = list(config)
|
||||||
|
dns = OrderedDict()
|
||||||
|
seen_qnames = set()
|
||||||
|
|
||||||
|
# Process the qnames in the order we see them.
|
||||||
|
for qname in [rec[0] for rec in config]:
|
||||||
|
if qname in seen_qnames: continue
|
||||||
|
seen_qnames.add(qname)
|
||||||
|
|
||||||
|
records = [(rec[1], rec[2]) for rec in config if rec[0] == qname]
|
||||||
|
if len(records) == 1 and records[0][0] == "A":
|
||||||
|
dns[qname] = records[0][1]
|
||||||
|
else:
|
||||||
|
dns[qname] = OrderedDict()
|
||||||
|
seen_rtypes = set()
|
||||||
|
|
||||||
|
# Process the rtypes in the order we see them.
|
||||||
|
for rtype in [rec[0] for rec in records]:
|
||||||
|
if rtype in seen_rtypes: continue
|
||||||
|
seen_rtypes.add(rtype)
|
||||||
|
|
||||||
|
values = [rec[1] for rec in records if rec[0] == rtype]
|
||||||
|
if len(values) == 1:
|
||||||
|
values = values[0]
|
||||||
|
dns[qname][rtype] = values
|
||||||
|
|
||||||
|
# Write.
|
||||||
|
config_yaml = rtyaml.dump(dns)
|
||||||
|
with open(os.path.join(env['STORAGE_ROOT'], 'dns/custom.yaml'), "w") as f:
|
||||||
|
f.write(config_yaml)
|
||||||
|
|
||||||
|
def set_custom_dns_record(qname, rtype, value, action, env):
|
||||||
# validate qname
|
# validate qname
|
||||||
for zone, fn in get_dns_zones(env):
|
for zone, fn in get_dns_zones(env):
|
||||||
# It must match a zone apex or be a subdomain of a zone
|
# It must match a zone apex or be a subdomain of a zone
|
||||||
@@ -673,85 +724,83 @@ def set_custom_dns_record(qname, rtype, value, env):
|
|||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
# No match.
|
# No match.
|
||||||
raise ValueError("%s is not a domain name or a subdomain of a domain name managed by this box." % qname)
|
if qname != "_secondary_nameserver":
|
||||||
|
raise ValueError("%s is not a domain name or a subdomain of a domain name managed by this box." % qname)
|
||||||
|
|
||||||
# validate rtype
|
# validate rtype
|
||||||
rtype = rtype.upper()
|
rtype = rtype.upper()
|
||||||
if value is not None:
|
if value is not None and qname != "_secondary_nameserver":
|
||||||
if rtype in ("A", "AAAA"):
|
if rtype in ("A", "AAAA"):
|
||||||
v = ipaddress.ip_address(value)
|
if value != "local": # "local" is a special flag for us
|
||||||
if rtype == "A" and not isinstance(v, ipaddress.IPv4Address): raise ValueError("That's an IPv6 address.")
|
v = ipaddress.ip_address(value) # raises a ValueError if there's a problem
|
||||||
if rtype == "AAAA" and not isinstance(v, ipaddress.IPv6Address): raise ValueError("That's an IPv4 address.")
|
if rtype == "A" and not isinstance(v, ipaddress.IPv4Address): raise ValueError("That's an IPv6 address.")
|
||||||
elif rtype in ("CNAME", "TXT", "SRV"):
|
if rtype == "AAAA" and not isinstance(v, ipaddress.IPv6Address): raise ValueError("That's an IPv4 address.")
|
||||||
|
elif rtype in ("CNAME", "TXT", "SRV", "MX"):
|
||||||
# anything goes
|
# anything goes
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
raise ValueError("Unknown record type '%s'." % rtype)
|
raise ValueError("Unknown record type '%s'." % rtype)
|
||||||
|
|
||||||
# load existing config
|
# load existing config
|
||||||
config = get_custom_dns_config(env)
|
config = list(get_custom_dns_config(env))
|
||||||
|
|
||||||
# update
|
# update
|
||||||
if qname not in config:
|
newconfig = []
|
||||||
if value is None:
|
made_change = False
|
||||||
# Is asking to delete a record that does not exist.
|
needs_add = True
|
||||||
return False
|
for _qname, _rtype, _value in config:
|
||||||
elif rtype == "A":
|
if action == "add":
|
||||||
# Add this record using the short form 'qname: value'.
|
if (_qname, _rtype, _value) == (qname, rtype, value):
|
||||||
config[qname] = value
|
# Record already exists. Bail.
|
||||||
else:
|
|
||||||
# Add this record. This is the qname's first record.
|
|
||||||
config[qname] = { rtype: value }
|
|
||||||
else:
|
|
||||||
if isinstance(config[qname], str):
|
|
||||||
# This is a short-form 'qname: value' implicit-A record.
|
|
||||||
if value is None and rtype != "A":
|
|
||||||
# Is asking to delete a record that doesn't exist.
|
|
||||||
return False
|
return False
|
||||||
elif value is None and rtype == "A":
|
elif action == "set":
|
||||||
# Delete record.
|
if (_qname, _rtype) == (qname, rtype):
|
||||||
del config[qname]
|
if _value == value:
|
||||||
elif rtype == "A":
|
# Flag that the record already exists, don't
|
||||||
# Update, keeping short form.
|
# need to add it.
|
||||||
if config[qname] == "value":
|
needs_add = False
|
||||||
# No change.
|
|
||||||
return False
|
|
||||||
config[qname] = value
|
|
||||||
else:
|
|
||||||
# Expand short form so we can add a new record type.
|
|
||||||
config[qname] = { "A": config[qname], rtype: value }
|
|
||||||
else:
|
|
||||||
# This is the qname: { ... } (dict) format.
|
|
||||||
if value is None:
|
|
||||||
if rtype not in config[qname]:
|
|
||||||
# Is asking to delete a record that doesn't exist.
|
|
||||||
return False
|
|
||||||
else:
|
else:
|
||||||
# Delete the record. If it's the last record, delete the domain.
|
# Drop any other values for this (qname, rtype).
|
||||||
del config[qname][rtype]
|
made_change = True
|
||||||
if len(config[qname]) == 0:
|
continue
|
||||||
del config[qname]
|
elif action == "remove":
|
||||||
else:
|
if (_qname, _rtype, _value) == (qname, rtype, value):
|
||||||
# Update the record.
|
# Drop this record.
|
||||||
if config[qname].get(rtype) == "value":
|
made_change = True
|
||||||
# No change.
|
continue
|
||||||
return False
|
if value == None and (_qname, _rtype) == (qname, rtype):
|
||||||
config[qname][rtype] = value
|
# Drop all qname-rtype records.
|
||||||
|
made_change = True
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
raise ValueError("Invalid action: " + action)
|
||||||
|
|
||||||
# serialize & save
|
# Preserve this record.
|
||||||
write_custom_dns_config(config, env)
|
newconfig.append((_qname, _rtype, _value))
|
||||||
|
|
||||||
return True
|
if action in ("add", "set") and needs_add and value is not None:
|
||||||
|
newconfig.append((qname, rtype, value))
|
||||||
|
made_change = True
|
||||||
|
|
||||||
|
if made_change:
|
||||||
|
# serialize & save
|
||||||
|
write_custom_dns_config(newconfig, env)
|
||||||
|
|
||||||
|
return made_change
|
||||||
|
|
||||||
########################################################################
|
########################################################################
|
||||||
|
|
||||||
|
def get_secondary_dns(custom_dns):
|
||||||
|
for qname, rtype, value in custom_dns:
|
||||||
|
if qname == "_secondary_nameserver":
|
||||||
|
return value
|
||||||
|
return None
|
||||||
|
|
||||||
def set_secondary_dns(hostname, env):
|
def set_secondary_dns(hostname, env):
|
||||||
config = get_custom_dns_config(env)
|
|
||||||
|
|
||||||
if hostname in (None, ""):
|
if hostname in (None, ""):
|
||||||
# Clear.
|
# Clear.
|
||||||
if "_secondary_nameserver" in config:
|
set_custom_dns_record("_secondary_nameserver", "A", None, "set", env)
|
||||||
del config["_secondary_nameserver"]
|
|
||||||
else:
|
else:
|
||||||
# Validate.
|
# Validate.
|
||||||
hostname = hostname.strip().lower()
|
hostname = hostname.strip().lower()
|
||||||
@@ -762,10 +811,9 @@ def set_secondary_dns(hostname, env):
|
|||||||
raise ValueError("Could not resolve the IP address of %s." % hostname)
|
raise ValueError("Could not resolve the IP address of %s." % hostname)
|
||||||
|
|
||||||
# Set.
|
# Set.
|
||||||
config["_secondary_nameserver"] = hostname
|
set_custom_dns_record("_secondary_nameserver", "A", hostname, "set", env)
|
||||||
|
|
||||||
# Save and apply.
|
# Apply.
|
||||||
write_custom_dns_config(config, env)
|
|
||||||
return do_dns_update(env)
|
return do_dns_update(env)
|
||||||
|
|
||||||
|
|
||||||
@@ -816,9 +864,11 @@ def build_recommended_dns(env):
|
|||||||
ret = []
|
ret = []
|
||||||
domains = get_dns_domains(env)
|
domains = get_dns_domains(env)
|
||||||
zonefiles = get_dns_zones(env)
|
zonefiles = get_dns_zones(env)
|
||||||
additional_records = get_custom_dns_config(env)
|
additional_records = list(get_custom_dns_config(env))
|
||||||
|
from web_update import get_default_www_redirects
|
||||||
|
www_redirect_domains = get_default_www_redirects(env)
|
||||||
for domain, zonefile in zonefiles:
|
for domain, zonefile in zonefiles:
|
||||||
records = build_zone(domain, domains, additional_records, env)
|
records = build_zone(domain, domains, additional_records, www_redirect_domains, env)
|
||||||
|
|
||||||
# remove records that we don't dislay
|
# remove records that we don't dislay
|
||||||
records = [r for r in records if r[3] is not False]
|
records = [r for r in records if r[3] is not False]
|
||||||
@@ -847,8 +897,11 @@ def build_recommended_dns(env):
|
|||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
from utils import load_environment
|
from utils import load_environment
|
||||||
env = load_environment()
|
env = load_environment()
|
||||||
for zone, records in build_recommended_dns(env):
|
if sys.argv[-1] == "--lint":
|
||||||
for record in records:
|
write_custom_dns_config(get_custom_dns_config(env), env)
|
||||||
print("; " + record['explanation'])
|
else:
|
||||||
print(record['qname'], record['rtype'], record['value'], sep="\t")
|
for zone, records in build_recommended_dns(env):
|
||||||
print()
|
for record in records:
|
||||||
|
print("; " + record['explanation'])
|
||||||
|
print(record['qname'], record['rtype'], record['value'], sep="\t")
|
||||||
|
print()
|
||||||
|
|||||||
@@ -2,69 +2,82 @@
|
|||||||
|
|
||||||
import subprocess, shutil, os, sqlite3, re
|
import subprocess, shutil, os, sqlite3, re
|
||||||
import utils
|
import utils
|
||||||
|
from email_validator import validate_email as validate_email_, EmailNotValidError
|
||||||
|
|
||||||
def validate_email(email, mode=None):
|
def validate_email(email, mode=None):
|
||||||
# There are a lot of characters permitted in email addresses, but
|
# Checks that an email address is syntactically valid. Returns True/False.
|
||||||
# Dovecot's sqlite driver seems to get confused if there are any
|
# Until Postfix supports SMTPUTF8, an email address may contain ASCII
|
||||||
# unusual characters in the address. Bah. Also note that since
|
# characters only; IDNs must be IDNA-encoded.
|
||||||
# the mailbox path name is based on the email address, the address
|
#
|
||||||
# shouldn't be absurdly long and must not have a forward slash.
|
# When mode=="user", we're checking that this can be a user account name.
|
||||||
|
# Dovecot has tighter restrictions - letters, numbers, underscore, and
|
||||||
|
# dash only!
|
||||||
|
#
|
||||||
|
# When mode=="alias", we're allowing anything that can be in a Postfix
|
||||||
|
# alias table, i.e. omitting the local part ("@domain.tld") is OK.
|
||||||
|
|
||||||
if len(email) > 255: return False
|
# Check the syntax of the address.
|
||||||
|
|
||||||
if mode == 'user':
|
|
||||||
# For Dovecot's benefit, only allow basic characters.
|
|
||||||
ATEXT = r'[a-zA-Z0-9_\-]'
|
|
||||||
elif mode in (None, 'alias'):
|
|
||||||
# For aliases, we can allow any valid email address.
|
|
||||||
# Based on RFC 2822 and https://github.com/SyrusAkbary/validate_email/blob/master/validate_email.py,
|
|
||||||
# these characters are permitted in email addresses.
|
|
||||||
ATEXT = r'[\w!#$%&\'\*\+\-/=\?\^`\{\|\}~]' # see 3.2.4
|
|
||||||
else:
|
|
||||||
raise ValueError(mode)
|
|
||||||
|
|
||||||
# per RFC 2822 3.2.4
|
|
||||||
DOT_ATOM_TEXT_LOCAL = ATEXT + r'+(?:\.' + ATEXT + r'+)*'
|
|
||||||
if mode == 'alias':
|
|
||||||
# For aliases, Postfix accepts '@domain.tld' format for
|
|
||||||
# catch-all addresses on the source side and domain aliases
|
|
||||||
# on the destination side. Make the local part optional.
|
|
||||||
DOT_ATOM_TEXT_LOCAL = '(?:' + DOT_ATOM_TEXT_LOCAL + ')?'
|
|
||||||
|
|
||||||
# as above, but we can require that the host part have at least
|
|
||||||
# one period in it, so use a "+" rather than a "*" at the end
|
|
||||||
DOT_ATOM_TEXT_HOST = ATEXT + r'+(?:\.' + ATEXT + r'+)+'
|
|
||||||
|
|
||||||
# per RFC 2822 3.4.1
|
|
||||||
ADDR_SPEC = '^(%s)@(%s)$' % (DOT_ATOM_TEXT_LOCAL, DOT_ATOM_TEXT_HOST)
|
|
||||||
|
|
||||||
# Check the regular expression.
|
|
||||||
m = re.match(ADDR_SPEC, email)
|
|
||||||
if not m: return False
|
|
||||||
|
|
||||||
# Check that the domain part is IDNA-encodable.
|
|
||||||
localpart, domainpart = m.groups()
|
|
||||||
try:
|
try:
|
||||||
domainpart.encode("idna")
|
validate_email_(email,
|
||||||
except:
|
allow_smtputf8=False,
|
||||||
|
check_deliverability=False,
|
||||||
|
allow_empty_local=(mode=="alias")
|
||||||
|
)
|
||||||
|
except EmailNotValidError:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
if mode == 'user':
|
||||||
|
# There are a lot of characters permitted in email addresses, but
|
||||||
|
# Dovecot's sqlite auth driver seems to get confused if there are any
|
||||||
|
# unusual characters in the address. Bah. Also note that since
|
||||||
|
# the mailbox path name is based on the email address, the address
|
||||||
|
# shouldn't be absurdly long and must not have a forward slash.
|
||||||
|
# Our database is case sensitive (oops), which affects mail delivery
|
||||||
|
# (Postfix always queries in lowercase?), so also only permit lowercase
|
||||||
|
# letters.
|
||||||
|
if len(email) > 255: return False
|
||||||
|
if re.search(r'[^\@\.a-z0-9_\-]+', email):
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Everything looks good.
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def sanitize_idn_email_address(email):
|
def sanitize_idn_email_address(email):
|
||||||
# Convert an IDNA-encoded email address (domain part) into Unicode
|
# The user may enter Unicode in an email address. Convert the domain part
|
||||||
# before storing in our database. Chrome may IDNA-ize <input type="email">
|
# to IDNA before going into our database. Leave the local part alone ---
|
||||||
# values before POSTing, so we want to normalize before putting
|
# although validate_email will reject non-ASCII characters.
|
||||||
# values into the database.
|
#
|
||||||
|
# The domain name system only exists in ASCII, so it doesn't make sense
|
||||||
|
# to store domain names in Unicode. We want to store what is meaningful
|
||||||
|
# to the underlying protocols.
|
||||||
try:
|
try:
|
||||||
localpart, domainpart = email.split("@")
|
localpart, domainpart = email.split("@")
|
||||||
domainpart = domainpart.encode("ascii").decode("idna")
|
domainpart = domainpart.encode("idna").decode('ascii')
|
||||||
return localpart + "@" + domainpart
|
return localpart + "@" + domainpart
|
||||||
except:
|
except:
|
||||||
# Domain part is already Unicode or not IDNA-valid, so
|
# Domain part is not IDNA-valid, so leave unchanged. If there
|
||||||
# leave unchanged.
|
# are non-ASCII characters it will be filtered out by
|
||||||
|
# validate_email.
|
||||||
return email
|
return email
|
||||||
|
|
||||||
|
def prettify_idn_email_address(email):
|
||||||
|
# This is the opposite of sanitize_idn_email_address. We store domain
|
||||||
|
# names in IDNA in the database, but we want to show Unicode to the user.
|
||||||
|
try:
|
||||||
|
localpart, domainpart = email.split("@")
|
||||||
|
domainpart = domainpart.encode("ascii").decode('idna')
|
||||||
|
return localpart + "@" + domainpart
|
||||||
|
except:
|
||||||
|
# Failed to decode IDNA. Should never happen.
|
||||||
|
return email
|
||||||
|
|
||||||
|
def is_dcv_address(email):
|
||||||
|
email = email.lower()
|
||||||
|
for localpart in ("admin", "administrator", "postmaster", "hostmaster", "webmaster"):
|
||||||
|
if email.startswith(localpart+"@") or email.startswith(localpart+"+"):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
def open_database(env, with_connection=False):
|
def open_database(env, with_connection=False):
|
||||||
conn = sqlite3.connect(env["STORAGE_ROOT"] + "/mail/users.sqlite")
|
conn = sqlite3.connect(env["STORAGE_ROOT"] + "/mail/users.sqlite")
|
||||||
if not with_connection:
|
if not with_connection:
|
||||||
@@ -90,11 +103,7 @@ def get_mail_users_ex(env, with_archived=False, with_slow_info=False):
|
|||||||
# {
|
# {
|
||||||
# email: "name@domain.tld",
|
# email: "name@domain.tld",
|
||||||
# privileges: [ "priv1", "priv2", ... ],
|
# privileges: [ "priv1", "priv2", ... ],
|
||||||
# status: "active",
|
# status: "active" | "inactive",
|
||||||
# aliases: [
|
|
||||||
# ("alias@domain.tld", ["indirect.alias@domain.tld", ...]),
|
|
||||||
# ...
|
|
||||||
# ]
|
|
||||||
# },
|
# },
|
||||||
# ...
|
# ...
|
||||||
# ]
|
# ]
|
||||||
@@ -102,9 +111,6 @@ def get_mail_users_ex(env, with_archived=False, with_slow_info=False):
|
|||||||
# ...
|
# ...
|
||||||
# ]
|
# ]
|
||||||
|
|
||||||
# Pre-load all aliases.
|
|
||||||
aliases = get_mail_alias_map(env)
|
|
||||||
|
|
||||||
# Get users and their privileges.
|
# Get users and their privileges.
|
||||||
users = []
|
users = []
|
||||||
active_accounts = set()
|
active_accounts = set()
|
||||||
@@ -121,10 +127,6 @@ def get_mail_users_ex(env, with_archived=False, with_slow_info=False):
|
|||||||
users.append(user)
|
users.append(user)
|
||||||
|
|
||||||
if with_slow_info:
|
if with_slow_info:
|
||||||
user["aliases"] = [
|
|
||||||
(alias, sorted(evaluate_mail_alias_map(alias, aliases, env)))
|
|
||||||
for alias in aliases.get(email.lower(), [])
|
|
||||||
]
|
|
||||||
user["mailbox_size"] = utils.du(os.path.join(env['STORAGE_ROOT'], 'mail/mailboxes', *reversed(email.split("@"))))
|
user["mailbox_size"] = utils.du(os.path.join(env['STORAGE_ROOT'], 'mail/mailboxes', *reversed(email.split("@"))))
|
||||||
|
|
||||||
# Add in archived accounts.
|
# Add in archived accounts.
|
||||||
@@ -136,7 +138,7 @@ def get_mail_users_ex(env, with_archived=False, with_slow_info=False):
|
|||||||
mbox = os.path.join(root, domain, user)
|
mbox = os.path.join(root, domain, user)
|
||||||
if email in active_accounts: continue
|
if email in active_accounts: continue
|
||||||
user = {
|
user = {
|
||||||
"email": email,
|
"email": email,
|
||||||
"privileges": "",
|
"privileges": "",
|
||||||
"status": "inactive",
|
"status": "inactive",
|
||||||
"mailbox": mbox,
|
"mailbox": mbox,
|
||||||
@@ -193,7 +195,8 @@ def get_mail_aliases_ex(env):
|
|||||||
# domain: "domain.tld",
|
# domain: "domain.tld",
|
||||||
# alias: [
|
# alias: [
|
||||||
# {
|
# {
|
||||||
# source: "name@domain.tld",
|
# source: "name@domain.tld", # IDNA-encoded
|
||||||
|
# source_display: "name@domain.tld", # full Unicode
|
||||||
# destination: ["target1@domain.com", "target2@domain.com", ...],
|
# destination: ["target1@domain.com", "target2@domain.com", ...],
|
||||||
# required: True|False
|
# required: True|False
|
||||||
# },
|
# },
|
||||||
@@ -208,7 +211,7 @@ def get_mail_aliases_ex(env):
|
|||||||
for source, destination in get_mail_aliases(env):
|
for source, destination in get_mail_aliases(env):
|
||||||
# get alias info
|
# get alias info
|
||||||
domain = get_domain(source)
|
domain = get_domain(source)
|
||||||
required = ((source in required_aliases) or (source == get_system_administrator(env)))
|
required = (source in required_aliases)
|
||||||
|
|
||||||
# add to list
|
# add to list
|
||||||
if not domain in domains:
|
if not domain in domains:
|
||||||
@@ -218,7 +221,8 @@ def get_mail_aliases_ex(env):
|
|||||||
}
|
}
|
||||||
domains[domain]["aliases"].append({
|
domains[domain]["aliases"].append({
|
||||||
"source": source,
|
"source": source,
|
||||||
"destination": [d.strip() for d in destination.split(",")],
|
"source_display": prettify_idn_email_address(source),
|
||||||
|
"destination": [prettify_idn_email_address(d.strip()) for d in destination.split(",")],
|
||||||
"required": required,
|
"required": required,
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -230,40 +234,36 @@ def get_mail_aliases_ex(env):
|
|||||||
domain["aliases"].sort(key = lambda alias : (alias["required"], alias["source"]))
|
domain["aliases"].sort(key = lambda alias : (alias["required"], alias["source"]))
|
||||||
return domains
|
return domains
|
||||||
|
|
||||||
def get_mail_alias_map(env):
|
def get_domain(emailaddr, as_unicode=True):
|
||||||
aliases = { }
|
# Gets the domain part of an email address. Turns IDNA
|
||||||
for alias, targets in get_mail_aliases(env):
|
# back to Unicode for display.
|
||||||
for em in targets.split(","):
|
ret = emailaddr.split('@', 1)[1]
|
||||||
em = em.strip().lower()
|
if as_unicode: ret = ret.encode('ascii').decode('idna')
|
||||||
aliases.setdefault(em, []).append(alias)
|
|
||||||
return aliases
|
|
||||||
|
|
||||||
def evaluate_mail_alias_map(email, aliases, env):
|
|
||||||
ret = set()
|
|
||||||
for alias in aliases.get(email.lower(), []):
|
|
||||||
ret.add(alias)
|
|
||||||
ret |= evaluate_mail_alias_map(alias, aliases, env)
|
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
def get_domain(emailaddr):
|
|
||||||
return emailaddr.split('@', 1)[1]
|
|
||||||
|
|
||||||
def get_mail_domains(env, filter_aliases=lambda alias : True):
|
def get_mail_domains(env, filter_aliases=lambda alias : True):
|
||||||
|
# Returns the domain names (IDNA-encoded) of all of the email addresses
|
||||||
|
# configured on the system.
|
||||||
return set(
|
return set(
|
||||||
[get_domain(addr) for addr in get_mail_users(env)]
|
[get_domain(addr, as_unicode=False) for addr in get_mail_users(env)]
|
||||||
+ [get_domain(source) for source, target in get_mail_aliases(env) if filter_aliases((source, target)) ]
|
+ [get_domain(source, as_unicode=False) for source, target in get_mail_aliases(env) if filter_aliases((source, target)) ]
|
||||||
)
|
)
|
||||||
|
|
||||||
def add_mail_user(email, pw, privs, env):
|
def add_mail_user(email, pw, privs, env):
|
||||||
# accept IDNA domain names but normalize to Unicode before going into database
|
|
||||||
email = sanitize_idn_email_address(email)
|
|
||||||
|
|
||||||
# validate email
|
# validate email
|
||||||
if email.strip() == "":
|
if email.strip() == "":
|
||||||
return ("No email address provided.", 400)
|
return ("No email address provided.", 400)
|
||||||
if not validate_email(email, mode='user'):
|
elif not validate_email(email):
|
||||||
return ("Invalid email address.", 400)
|
return ("Invalid email address.", 400)
|
||||||
|
elif not validate_email(email, mode='user'):
|
||||||
|
return ("User account email addresses may only use the lowercase ASCII letters a-z, the digits 0-9, underscore (_), hyphen (-), and period (.).", 400)
|
||||||
|
elif is_dcv_address(email) and len(get_mail_users(env)) > 0:
|
||||||
|
# Make domain control validation hijacking a little harder to mess up by preventing the usual
|
||||||
|
# addresses used for DCV from being user accounts. Except let it be the first account because
|
||||||
|
# during box setup the user won't know the rules.
|
||||||
|
return ("You may not make a user account for that address because it is frequently used for domain control validation. Use an alias instead if necessary.", 400)
|
||||||
|
|
||||||
|
# validate password
|
||||||
validate_password(pw)
|
validate_password(pw)
|
||||||
|
|
||||||
# validate privileges
|
# validate privileges
|
||||||
@@ -291,9 +291,11 @@ def add_mail_user(email, pw, privs, env):
|
|||||||
# write databasebefore next step
|
# write databasebefore next step
|
||||||
conn.commit()
|
conn.commit()
|
||||||
|
|
||||||
# Create the user's INBOX, Spam, and Drafts folders, and subscribe them.
|
# Create & subscribe the user's INBOX, Trash, Spam, and Drafts folders.
|
||||||
# K-9 mail will poll every 90 seconds if a Drafts folder does not exist, so create it
|
# * Our sieve rule for spam expects that the Spam folder exists.
|
||||||
# to avoid unnecessary polling.
|
# * Roundcube will show an error if the user tries to delete a message before the Trash folder exists (#359).
|
||||||
|
# * K-9 mail will poll every 90 seconds if a Drafts folder does not exist, so create it
|
||||||
|
# to avoid unnecessary polling.
|
||||||
|
|
||||||
# Check if the mailboxes exist before creating them. When creating a user that had previously
|
# Check if the mailboxes exist before creating them. When creating a user that had previously
|
||||||
# been deleted, the mailboxes will still exist because they are still on disk.
|
# been deleted, the mailboxes will still exist because they are still on disk.
|
||||||
@@ -304,7 +306,7 @@ def add_mail_user(email, pw, privs, env):
|
|||||||
conn.commit()
|
conn.commit()
|
||||||
return ("Failed to initialize the user: " + e.output.decode("utf8"), 400)
|
return ("Failed to initialize the user: " + e.output.decode("utf8"), 400)
|
||||||
|
|
||||||
for folder in ("INBOX", "Spam", "Drafts"):
|
for folder in ("INBOX", "Trash", "Spam", "Drafts"):
|
||||||
if folder not in existing_mboxes:
|
if folder not in existing_mboxes:
|
||||||
utils.shell('check_call', ["doveadm", "mailbox", "create", "-u", email, "-s", folder])
|
utils.shell('check_call', ["doveadm", "mailbox", "create", "-u", email, "-s", folder])
|
||||||
|
|
||||||
@@ -312,12 +314,9 @@ def add_mail_user(email, pw, privs, env):
|
|||||||
return kick(env, "mail user added")
|
return kick(env, "mail user added")
|
||||||
|
|
||||||
def set_mail_password(email, pw, env):
|
def set_mail_password(email, pw, env):
|
||||||
# accept IDNA domain names but normalize to Unicode before going into database
|
|
||||||
email = sanitize_idn_email_address(email)
|
|
||||||
|
|
||||||
# validate that password is acceptable
|
# validate that password is acceptable
|
||||||
validate_password(pw)
|
validate_password(pw)
|
||||||
|
|
||||||
# hash the password
|
# hash the password
|
||||||
pw = hash_password(pw)
|
pw = hash_password(pw)
|
||||||
|
|
||||||
@@ -348,9 +347,6 @@ def get_mail_password(email, env):
|
|||||||
return rows[0][0]
|
return rows[0][0]
|
||||||
|
|
||||||
def remove_mail_user(email, env):
|
def remove_mail_user(email, env):
|
||||||
# accept IDNA domain names but normalize to Unicode before going into database
|
|
||||||
email = sanitize_idn_email_address(email)
|
|
||||||
|
|
||||||
# remove
|
# remove
|
||||||
conn, c = open_database(env, with_connection=True)
|
conn, c = open_database(env, with_connection=True)
|
||||||
c.execute("DELETE FROM users WHERE email=?", (email,))
|
c.execute("DELETE FROM users WHERE email=?", (email,))
|
||||||
@@ -364,15 +360,13 @@ def remove_mail_user(email, env):
|
|||||||
def parse_privs(value):
|
def parse_privs(value):
|
||||||
return [p for p in value.split("\n") if p.strip() != ""]
|
return [p for p in value.split("\n") if p.strip() != ""]
|
||||||
|
|
||||||
def get_mail_user_privileges(email, env):
|
def get_mail_user_privileges(email, env, empty_on_error=False):
|
||||||
# accept IDNA domain names but normalize to Unicode before going into database
|
|
||||||
email = sanitize_idn_email_address(email)
|
|
||||||
|
|
||||||
# get privs
|
# get privs
|
||||||
c = open_database(env)
|
c = open_database(env)
|
||||||
c.execute('SELECT privileges FROM users WHERE email=?', (email,))
|
c.execute('SELECT privileges FROM users WHERE email=?', (email,))
|
||||||
rows = c.fetchall()
|
rows = c.fetchall()
|
||||||
if len(rows) != 1:
|
if len(rows) != 1:
|
||||||
|
if empty_on_error: return []
|
||||||
return ("That's not a user (%s)." % email, 400)
|
return ("That's not a user (%s)." % email, 400)
|
||||||
return parse_privs(rows[0][0])
|
return parse_privs(rows[0][0])
|
||||||
|
|
||||||
@@ -382,9 +376,6 @@ def validate_privilege(priv):
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
def add_remove_mail_user_privilege(email, priv, action, env):
|
def add_remove_mail_user_privilege(email, priv, action, env):
|
||||||
# accept IDNA domain names but normalize to Unicode before going into database
|
|
||||||
email = sanitize_idn_email_address(email)
|
|
||||||
|
|
||||||
# validate
|
# validate
|
||||||
validation = validate_privilege(priv)
|
validation = validate_privilege(priv)
|
||||||
if validation: return validation
|
if validation: return validation
|
||||||
@@ -412,32 +403,51 @@ def add_remove_mail_user_privilege(email, priv, action, env):
|
|||||||
return "OK"
|
return "OK"
|
||||||
|
|
||||||
def add_mail_alias(source, destination, env, update_if_exists=False, do_kick=True):
|
def add_mail_alias(source, destination, env, update_if_exists=False, do_kick=True):
|
||||||
# accept IDNA domain names but normalize to Unicode before going into database
|
# convert Unicode domain to IDNA
|
||||||
source = sanitize_idn_email_address(source)
|
source = sanitize_idn_email_address(source)
|
||||||
|
|
||||||
|
# Our database is case sensitive (oops), which affects mail delivery
|
||||||
|
# (Postfix always queries in lowercase?), so force lowercase.
|
||||||
|
source = source.lower()
|
||||||
|
|
||||||
# validate source
|
# validate source
|
||||||
if source.strip() == "":
|
source = source.strip()
|
||||||
|
if source == "":
|
||||||
return ("No incoming email address provided.", 400)
|
return ("No incoming email address provided.", 400)
|
||||||
if not validate_email(source, mode='alias'):
|
if not validate_email(source, mode='alias'):
|
||||||
return ("Invalid incoming email address (%s)." % source, 400)
|
return ("Invalid incoming email address (%s)." % source, 400)
|
||||||
|
|
||||||
|
# extra checks for email addresses used in domain control validation
|
||||||
|
is_dcv_source = is_dcv_address(source)
|
||||||
|
|
||||||
# validate destination
|
# validate destination
|
||||||
dests = []
|
dests = []
|
||||||
destination = destination.strip()
|
destination = destination.strip()
|
||||||
if validate_email(destination, mode='alias'):
|
|
||||||
# Oostfix allows a single @domain.tld as the destination, which means
|
# Postfix allows a single @domain.tld as the destination, which means
|
||||||
# the local part on the address is preserved in the rewrite.
|
# the local part on the address is preserved in the rewrite. We must
|
||||||
dests.append(sanitize_idn_email_address(destination))
|
# try to convert Unicode to IDNA first before validating that it's a
|
||||||
|
# legitimate alias address. Don't allow this sort of rewriting for
|
||||||
|
# DCV source addresses.
|
||||||
|
d1 = sanitize_idn_email_address(destination)
|
||||||
|
if validate_email(d1, mode='alias') and not is_dcv_source:
|
||||||
|
dests.append(d1)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
# Parse comma and \n-separated destination emails & validate. In this
|
# Parse comma and \n-separated destination emails & validate. In this
|
||||||
# case, the recipients must be complete email addresses.
|
# case, the recipients must be complete email addresses.
|
||||||
for line in destination.split("\n"):
|
for line in destination.split("\n"):
|
||||||
for email in line.split(","):
|
for email in line.split(","):
|
||||||
email = email.strip()
|
email = email.strip()
|
||||||
email = sanitize_idn_email_address(email) # Unicode => IDNA
|
|
||||||
if email == "": continue
|
if email == "": continue
|
||||||
|
email = sanitize_idn_email_address(email) # Unicode => IDNA
|
||||||
if not validate_email(email):
|
if not validate_email(email):
|
||||||
return ("Invalid destination email address (%s)." % email, 400)
|
return ("Invalid destination email address (%s)." % email, 400)
|
||||||
|
if is_dcv_source and not is_dcv_address(email) and "admin" not in get_mail_user_privileges(email, env, empty_on_error=True):
|
||||||
|
# Make domain control validation hijacking a little harder to mess up by
|
||||||
|
# requiring aliases for email addresses typically used in DCV to forward
|
||||||
|
# only to accounts that are administrators on this system.
|
||||||
|
return ("This alias can only have administrators of this system as destinations because the address is frequently used for domain control validation.", 400)
|
||||||
dests.append(email)
|
dests.append(email)
|
||||||
if len(destination) == 0:
|
if len(destination) == 0:
|
||||||
return ("No destination email address(es) provided.", 400)
|
return ("No destination email address(es) provided.", 400)
|
||||||
@@ -462,7 +472,7 @@ def add_mail_alias(source, destination, env, update_if_exists=False, do_kick=Tru
|
|||||||
return kick(env, return_status)
|
return kick(env, return_status)
|
||||||
|
|
||||||
def remove_mail_alias(source, env, do_kick=True):
|
def remove_mail_alias(source, env, do_kick=True):
|
||||||
# accept IDNA domain names but normalize to Unicode before going into database
|
# convert Unicode domain to IDNA
|
||||||
source = sanitize_idn_email_address(source)
|
source = sanitize_idn_email_address(source)
|
||||||
|
|
||||||
# remove
|
# remove
|
||||||
@@ -483,15 +493,17 @@ def get_required_aliases(env):
|
|||||||
# These are the aliases that must exist.
|
# These are the aliases that must exist.
|
||||||
aliases = set()
|
aliases = set()
|
||||||
|
|
||||||
|
# The system administrator alias is required.
|
||||||
|
aliases.add(get_system_administrator(env))
|
||||||
|
|
||||||
# The hostmaster alias is exposed in the DNS SOA for each zone.
|
# The hostmaster alias is exposed in the DNS SOA for each zone.
|
||||||
aliases.add("hostmaster@" + env['PRIMARY_HOSTNAME'])
|
aliases.add("hostmaster@" + env['PRIMARY_HOSTNAME'])
|
||||||
|
|
||||||
# Get a list of domains we serve mail for, except ones for which the only
|
# Get a list of domains we serve mail for, except ones for which the only
|
||||||
# email on that domain is a postmaster/admin alias to the administrator
|
# email on that domain are the required aliases or a catch-all/domain-forwarder.
|
||||||
# or a wildcard alias (since it will forward postmaster/admin).
|
|
||||||
real_mail_domains = get_mail_domains(env,
|
real_mail_domains = get_mail_domains(env,
|
||||||
filter_aliases = lambda alias :
|
filter_aliases = lambda alias :
|
||||||
((not alias[0].startswith("postmaster@") and not alias[0].startswith("admin@")) or alias[1] != get_system_administrator(env))
|
not alias[0].startswith("postmaster@") and not alias[0].startswith("admin@")
|
||||||
and not alias[0].startswith("@")
|
and not alias[0].startswith("@")
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -508,7 +520,7 @@ def get_required_aliases(env):
|
|||||||
def kick(env, mail_result=None):
|
def kick(env, mail_result=None):
|
||||||
results = []
|
results = []
|
||||||
|
|
||||||
# Inclde the current operation's result in output.
|
# Include the current operation's result in output.
|
||||||
|
|
||||||
if mail_result is not None:
|
if mail_result is not None:
|
||||||
results.append(mail_result + "\n")
|
results.append(mail_result + "\n")
|
||||||
@@ -528,12 +540,12 @@ def kick(env, mail_result=None):
|
|||||||
for s, t in existing_aliases:
|
for s, t in existing_aliases:
|
||||||
if s == source:
|
if s == source:
|
||||||
return
|
return
|
||||||
|
|
||||||
# Doesn't exist.
|
# Doesn't exist.
|
||||||
administrator = get_system_administrator(env)
|
administrator = get_system_administrator(env)
|
||||||
add_mail_alias(source, administrator, env, do_kick=False)
|
add_mail_alias(source, administrator, env, do_kick=False)
|
||||||
results.append("added alias %s (=> %s)\n" % (source, administrator))
|
results.append("added alias %s (=> %s)\n" % (source, administrator))
|
||||||
|
|
||||||
|
|
||||||
for alias in required_aliases:
|
for alias in required_aliases:
|
||||||
ensure_admin_alias_exists(alias)
|
ensure_admin_alias_exists(alias)
|
||||||
|
|
||||||
|
|||||||
@@ -6,23 +6,23 @@
|
|||||||
|
|
||||||
__ALL__ = ['check_certificate']
|
__ALL__ = ['check_certificate']
|
||||||
|
|
||||||
import os, os.path, re, subprocess, datetime, multiprocessing.pool
|
import sys, os, os.path, re, subprocess, datetime, multiprocessing.pool
|
||||||
|
|
||||||
import dns.reversename, dns.resolver
|
import dns.reversename, dns.resolver
|
||||||
import dateutil.parser, dateutil.tz
|
import dateutil.parser, dateutil.tz
|
||||||
|
|
||||||
from dns_update import get_dns_zones, build_tlsa_record, get_custom_dns_config
|
from dns_update import get_dns_zones, build_tlsa_record, get_custom_dns_config, get_secondary_dns
|
||||||
from web_update import get_web_domains, get_domain_ssl_files
|
from web_update import get_web_domains, get_default_www_redirects, get_domain_ssl_files
|
||||||
from mailconfig import get_mail_domains, get_mail_aliases
|
from mailconfig import get_mail_domains, get_mail_aliases
|
||||||
|
|
||||||
from utils import shell, sort_domains, load_env_vars_from_file
|
from utils import shell, sort_domains, load_env_vars_from_file
|
||||||
|
|
||||||
def run_checks(env, output):
|
def run_checks(rounded_values, env, output, pool):
|
||||||
# run systems checks
|
# run systems checks
|
||||||
output.add_heading("System")
|
output.add_heading("System")
|
||||||
|
|
||||||
# check that services are running
|
# check that services are running
|
||||||
if not run_services_checks(env, output):
|
if not run_services_checks(env, output, pool):
|
||||||
# If critical services are not running, stop. If bind9 isn't running,
|
# If critical services are not running, stop. If bind9 isn't running,
|
||||||
# all later DNS checks will timeout and that will take forever to
|
# all later DNS checks will timeout and that will take forever to
|
||||||
# go through, and if running over the web will cause a fastcgi timeout.
|
# go through, and if running over the web will cause a fastcgi timeout.
|
||||||
@@ -33,17 +33,32 @@ def run_checks(env, output):
|
|||||||
# that in run_services checks.)
|
# that in run_services checks.)
|
||||||
shell('check_call', ["/usr/sbin/rndc", "flush"], trap=True)
|
shell('check_call', ["/usr/sbin/rndc", "flush"], trap=True)
|
||||||
|
|
||||||
run_system_checks(env, output)
|
run_system_checks(rounded_values, env, output)
|
||||||
|
|
||||||
# perform other checks asynchronously
|
# perform other checks asynchronously
|
||||||
|
|
||||||
pool = multiprocessing.pool.Pool(processes=1)
|
run_network_checks(env, output)
|
||||||
r1 = pool.apply_async(run_network_checks, [env])
|
run_domain_checks(rounded_values, env, output, pool)
|
||||||
r2 = run_domain_checks(env)
|
|
||||||
r1.get().playback(output)
|
|
||||||
r2.playback(output)
|
|
||||||
|
|
||||||
def run_services_checks(env, output):
|
def get_ssh_port():
|
||||||
|
# Returns ssh port
|
||||||
|
try:
|
||||||
|
output = shell('check_output', ['sshd', '-T'])
|
||||||
|
except FileNotFoundError:
|
||||||
|
# sshd is not installed. That's ok.
|
||||||
|
return None
|
||||||
|
|
||||||
|
returnNext = False
|
||||||
|
for e in output.split():
|
||||||
|
if returnNext:
|
||||||
|
return int(e)
|
||||||
|
if e == "port":
|
||||||
|
returnNext = True
|
||||||
|
|
||||||
|
# Did not find port!
|
||||||
|
return None
|
||||||
|
|
||||||
|
def run_services_checks(env, output, pool):
|
||||||
# Check that system services are running.
|
# Check that system services are running.
|
||||||
|
|
||||||
services = [
|
services = [
|
||||||
@@ -54,11 +69,12 @@ def run_services_checks(env, output):
|
|||||||
{ "name": "Postgrey", "port": 10023, "public": False, },
|
{ "name": "Postgrey", "port": 10023, "public": False, },
|
||||||
{ "name": "Spamassassin", "port": 10025, "public": False, },
|
{ "name": "Spamassassin", "port": 10025, "public": False, },
|
||||||
{ "name": "OpenDKIM", "port": 8891, "public": False, },
|
{ "name": "OpenDKIM", "port": 8891, "public": False, },
|
||||||
|
{ "name": "OpenDMARC", "port": 8893, "public": False, },
|
||||||
{ "name": "Memcached", "port": 11211, "public": False, },
|
{ "name": "Memcached", "port": 11211, "public": False, },
|
||||||
{ "name": "Sieve (dovecot)", "port": 4190, "public": True, },
|
{ "name": "Sieve (dovecot)", "port": 4190, "public": False, },
|
||||||
{ "name": "Mail-in-a-Box Management Daemon", "port": 10222, "public": False, },
|
{ "name": "Mail-in-a-Box Management Daemon", "port": 10222, "public": False, },
|
||||||
|
|
||||||
{ "name": "SSH Login (ssh)", "port": 22, "public": True, },
|
{ "name": "SSH Login (ssh)", "port": get_ssh_port(), "public": True, },
|
||||||
{ "name": "Public DNS (nsd4)", "port": 53, "public": True, },
|
{ "name": "Public DNS (nsd4)", "port": 53, "public": True, },
|
||||||
{ "name": "Incoming Mail (SMTP/postfix)", "port": 25, "public": True, },
|
{ "name": "Incoming Mail (SMTP/postfix)", "port": 25, "public": True, },
|
||||||
{ "name": "Outgoing Mail (SMTP 587/postfix)", "port": 587, "public": True, },
|
{ "name": "Outgoing Mail (SMTP 587/postfix)", "port": 587, "public": True, },
|
||||||
@@ -70,9 +86,9 @@ def run_services_checks(env, output):
|
|||||||
|
|
||||||
all_running = True
|
all_running = True
|
||||||
fatal = False
|
fatal = False
|
||||||
pool = multiprocessing.pool.Pool(processes=10)
|
|
||||||
ret = pool.starmap(check_service, ((i, service, env) for i, service in enumerate(services)), chunksize=1)
|
ret = pool.starmap(check_service, ((i, service, env) for i, service in enumerate(services)), chunksize=1)
|
||||||
for i, running, fatal2, output2 in sorted(ret):
|
for i, running, fatal2, output2 in sorted(ret):
|
||||||
|
if output2 is None: continue # skip check (e.g. no port was set, e.g. no sshd)
|
||||||
all_running = all_running and running
|
all_running = all_running and running
|
||||||
fatal = fatal or fatal2
|
fatal = fatal or fatal2
|
||||||
output2.playback(output)
|
output2.playback(output)
|
||||||
@@ -83,6 +99,10 @@ def run_services_checks(env, output):
|
|||||||
return not fatal
|
return not fatal
|
||||||
|
|
||||||
def check_service(i, service, env):
|
def check_service(i, service, env):
|
||||||
|
if not service["port"]:
|
||||||
|
# Skip check (no port, e.g. no sshd).
|
||||||
|
return (i, None, None, None)
|
||||||
|
|
||||||
import socket
|
import socket
|
||||||
output = BufferedOutput()
|
output = BufferedOutput()
|
||||||
running = False
|
running = False
|
||||||
@@ -90,13 +110,28 @@ def check_service(i, service, env):
|
|||||||
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||||
s.settimeout(1)
|
s.settimeout(1)
|
||||||
try:
|
try:
|
||||||
s.connect((
|
try:
|
||||||
"127.0.0.1" if not service["public"] else env['PUBLIC_IP'],
|
s.connect((
|
||||||
service["port"]))
|
"127.0.0.1" if not service["public"] else env['PUBLIC_IP'],
|
||||||
running = True
|
service["port"]))
|
||||||
|
running = True
|
||||||
|
except OSError as e1:
|
||||||
|
if service["public"] and service["port"] != 53:
|
||||||
|
# For public services (except DNS), try the private IP as a fallback.
|
||||||
|
s1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||||
|
s1.settimeout(1)
|
||||||
|
try:
|
||||||
|
s1.connect(("127.0.0.1", service["port"]))
|
||||||
|
output.print_error("%s is running but is not publicly accessible at %s:%d (%s)." % (service['name'], env['PUBLIC_IP'], service['port'], str(e1)))
|
||||||
|
except:
|
||||||
|
raise e1
|
||||||
|
finally:
|
||||||
|
s1.close()
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
output.print_error("%s is not running (%s)." % (service['name'], str(e)))
|
output.print_error("%s is not running (%s; port %d)." % (service['name'], str(e), service['port']))
|
||||||
|
|
||||||
# Why is nginx not running?
|
# Why is nginx not running?
|
||||||
if service["port"] in (80, 443):
|
if service["port"] in (80, 443):
|
||||||
@@ -110,11 +145,11 @@ def check_service(i, service, env):
|
|||||||
|
|
||||||
return (i, running, fatal, output)
|
return (i, running, fatal, output)
|
||||||
|
|
||||||
def run_system_checks(env, output):
|
def run_system_checks(rounded_values, env, output):
|
||||||
check_ssh_password(env, output)
|
check_ssh_password(env, output)
|
||||||
check_software_updates(env, output)
|
check_software_updates(env, output)
|
||||||
check_system_aliases(env, output)
|
check_system_aliases(env, output)
|
||||||
check_free_disk_space(env, output)
|
check_free_disk_space(rounded_values, env, output)
|
||||||
|
|
||||||
def check_ssh_password(env, output):
|
def check_ssh_password(env, output):
|
||||||
# Check that SSH login with password is disabled. The openssh-server
|
# Check that SSH login with password is disabled. The openssh-server
|
||||||
@@ -147,14 +182,17 @@ def check_software_updates(env, output):
|
|||||||
def check_system_aliases(env, output):
|
def check_system_aliases(env, output):
|
||||||
# Check that the administrator alias exists since that's where all
|
# Check that the administrator alias exists since that's where all
|
||||||
# admin email is automatically directed.
|
# admin email is automatically directed.
|
||||||
check_alias_exists("administrator@" + env['PRIMARY_HOSTNAME'], env, output)
|
check_alias_exists("System administrator address", "administrator@" + env['PRIMARY_HOSTNAME'], env, output)
|
||||||
|
|
||||||
def check_free_disk_space(env, output):
|
def check_free_disk_space(rounded_values, env, output):
|
||||||
# Check free disk space.
|
# Check free disk space.
|
||||||
st = os.statvfs(env['STORAGE_ROOT'])
|
st = os.statvfs(env['STORAGE_ROOT'])
|
||||||
bytes_total = st.f_blocks * st.f_frsize
|
bytes_total = st.f_blocks * st.f_frsize
|
||||||
bytes_free = st.f_bavail * st.f_frsize
|
bytes_free = st.f_bavail * st.f_frsize
|
||||||
disk_msg = "The disk has %s GB space remaining." % str(round(bytes_free/1024.0/1024.0/1024.0*10.0)/10.0)
|
if not rounded_values:
|
||||||
|
disk_msg = "The disk has %s GB space remaining." % str(round(bytes_free/1024.0/1024.0/1024.0*10.0)/10)
|
||||||
|
else:
|
||||||
|
disk_msg = "The disk has less than %s%% space left." % str(round(bytes_free/bytes_total/10 + .5)*10)
|
||||||
if bytes_free > .3 * bytes_total:
|
if bytes_free > .3 * bytes_total:
|
||||||
output.print_ok(disk_msg)
|
output.print_ok(disk_msg)
|
||||||
elif bytes_free > .15 * bytes_total:
|
elif bytes_free > .15 * bytes_total:
|
||||||
@@ -162,10 +200,9 @@ def check_free_disk_space(env, output):
|
|||||||
else:
|
else:
|
||||||
output.print_error(disk_msg)
|
output.print_error(disk_msg)
|
||||||
|
|
||||||
def run_network_checks(env):
|
def run_network_checks(env, output):
|
||||||
# Also see setup/network-checks.sh.
|
# Also see setup/network-checks.sh.
|
||||||
|
|
||||||
output = BufferedOutput()
|
|
||||||
output.add_heading("Network")
|
output.add_heading("Network")
|
||||||
|
|
||||||
# Stop if we cannot make an outbound connection on port 25. Many residential
|
# Stop if we cannot make an outbound connection on port 25. Many residential
|
||||||
@@ -193,9 +230,7 @@ def run_network_checks(env):
|
|||||||
which may prevent recipients from receiving your email. See http://www.spamhaus.org/query/ip/%s."""
|
which may prevent recipients from receiving your email. See http://www.spamhaus.org/query/ip/%s."""
|
||||||
% (env['PUBLIC_IP'], zen, env['PUBLIC_IP']))
|
% (env['PUBLIC_IP'], zen, env['PUBLIC_IP']))
|
||||||
|
|
||||||
return output
|
def run_domain_checks(rounded_time, env, output, pool):
|
||||||
|
|
||||||
def run_domain_checks(env):
|
|
||||||
# Get the list of domains we handle mail for.
|
# Get the list of domains we handle mail for.
|
||||||
mail_domains = get_mail_domains(env)
|
mail_domains = get_mail_domains(env)
|
||||||
|
|
||||||
@@ -204,29 +239,27 @@ def run_domain_checks(env):
|
|||||||
dns_domains = set(dns_zonefiles)
|
dns_domains = set(dns_zonefiles)
|
||||||
|
|
||||||
# Get the list of domains we serve HTTPS for.
|
# Get the list of domains we serve HTTPS for.
|
||||||
web_domains = set(get_web_domains(env))
|
web_domains = set(get_web_domains(env) + get_default_www_redirects(env))
|
||||||
|
|
||||||
domains_to_check = mail_domains | dns_domains | web_domains
|
domains_to_check = mail_domains | dns_domains | web_domains
|
||||||
|
|
||||||
# Serial version:
|
# Serial version:
|
||||||
#for domain in sort_domains(domains_to_check, env):
|
#for domain in sort_domains(domains_to_check, env):
|
||||||
# run_domain_checks_on_domain(domain, env, dns_domains, dns_zonefiles, mail_domains, web_domains)
|
# run_domain_checks_on_domain(domain, rounded_time, env, dns_domains, dns_zonefiles, mail_domains, web_domains)
|
||||||
|
|
||||||
# Parallelize the checks across a worker pool.
|
# Parallelize the checks across a worker pool.
|
||||||
args = ((domain, env, dns_domains, dns_zonefiles, mail_domains, web_domains)
|
args = ((domain, rounded_time, env, dns_domains, dns_zonefiles, mail_domains, web_domains)
|
||||||
for domain in domains_to_check)
|
for domain in domains_to_check)
|
||||||
pool = multiprocessing.pool.Pool(processes=10)
|
|
||||||
ret = pool.starmap(run_domain_checks_on_domain, args, chunksize=1)
|
ret = pool.starmap(run_domain_checks_on_domain, args, chunksize=1)
|
||||||
ret = dict(ret) # (domain, output) => { domain: output }
|
ret = dict(ret) # (domain, output) => { domain: output }
|
||||||
output = BufferedOutput()
|
|
||||||
for domain in sort_domains(ret, env):
|
for domain in sort_domains(ret, env):
|
||||||
ret[domain].playback(output)
|
ret[domain].playback(output)
|
||||||
return output
|
|
||||||
|
|
||||||
def run_domain_checks_on_domain(domain, env, dns_domains, dns_zonefiles, mail_domains, web_domains):
|
def run_domain_checks_on_domain(domain, rounded_time, env, dns_domains, dns_zonefiles, mail_domains, web_domains):
|
||||||
output = BufferedOutput()
|
output = BufferedOutput()
|
||||||
|
|
||||||
output.add_heading(domain)
|
# The domain is IDNA-encoded, but for display use Unicode.
|
||||||
|
output.add_heading(domain.encode('ascii').decode('idna'))
|
||||||
|
|
||||||
if domain == env["PRIMARY_HOSTNAME"]:
|
if domain == env["PRIMARY_HOSTNAME"]:
|
||||||
check_primary_hostname_dns(domain, env, output, dns_domains, dns_zonefiles)
|
check_primary_hostname_dns(domain, env, output, dns_domains, dns_zonefiles)
|
||||||
@@ -238,7 +271,7 @@ def run_domain_checks_on_domain(domain, env, dns_domains, dns_zonefiles, mail_do
|
|||||||
check_mail_domain(domain, env, output)
|
check_mail_domain(domain, env, output)
|
||||||
|
|
||||||
if domain in web_domains:
|
if domain in web_domains:
|
||||||
check_web_domain(domain, env, output)
|
check_web_domain(domain, rounded_time, env, output)
|
||||||
|
|
||||||
if domain in dns_domains:
|
if domain in dns_domains:
|
||||||
check_dns_zone_suggestions(domain, env, output, dns_zonefiles)
|
check_dns_zone_suggestions(domain, env, output, dns_zonefiles)
|
||||||
@@ -247,29 +280,40 @@ def run_domain_checks_on_domain(domain, env, dns_domains, dns_zonefiles, mail_do
|
|||||||
|
|
||||||
def check_primary_hostname_dns(domain, env, output, dns_domains, dns_zonefiles):
|
def check_primary_hostname_dns(domain, env, output, dns_domains, dns_zonefiles):
|
||||||
# If a DS record is set on the zone containing this domain, check DNSSEC now.
|
# If a DS record is set on the zone containing this domain, check DNSSEC now.
|
||||||
|
has_dnssec = False
|
||||||
for zone in dns_domains:
|
for zone in dns_domains:
|
||||||
if zone == domain or domain.endswith("." + zone):
|
if zone == domain or domain.endswith("." + zone):
|
||||||
if query_dns(zone, "DS", nxdomain=None) is not None:
|
if query_dns(zone, "DS", nxdomain=None) is not None:
|
||||||
|
has_dnssec = True
|
||||||
check_dnssec(zone, env, output, dns_zonefiles, is_checking_primary=True)
|
check_dnssec(zone, env, output, dns_zonefiles, is_checking_primary=True)
|
||||||
|
|
||||||
|
ip = query_dns(domain, "A")
|
||||||
|
ns_ips = query_dns("ns1." + domain, "A") + '/' + query_dns("ns2." + domain, "A")
|
||||||
|
|
||||||
# Check that the ns1/ns2 hostnames resolve to A records. This information probably
|
# Check that the ns1/ns2 hostnames resolve to A records. This information probably
|
||||||
# comes from the TLD since the information is set at the registrar as glue records.
|
# comes from the TLD since the information is set at the registrar as glue records.
|
||||||
# We're probably not actually checking that here but instead checking that we, as
|
# We're probably not actually checking that here but instead checking that we, as
|
||||||
# the nameserver, are reporting the right info --- but if the glue is incorrect this
|
# the nameserver, are reporting the right info --- but if the glue is incorrect this
|
||||||
# will probably fail.
|
# will probably fail.
|
||||||
ip = query_dns("ns1." + domain, "A") + '/' + query_dns("ns2." + domain, "A")
|
if ns_ips == env['PUBLIC_IP'] + '/' + env['PUBLIC_IP']:
|
||||||
if ip == env['PUBLIC_IP'] + '/' + env['PUBLIC_IP']:
|
output.print_ok("Nameserver glue records are correct at registrar. [ns1/ns2.%s ↦ %s]" % (env['PRIMARY_HOSTNAME'], env['PUBLIC_IP']))
|
||||||
output.print_ok("Nameserver glue records are correct at registrar. [ns1/ns2.%s => %s]" % (env['PRIMARY_HOSTNAME'], env['PUBLIC_IP']))
|
|
||||||
|
elif ip == env['PUBLIC_IP']:
|
||||||
|
# The NS records are not what we expect, but the domain resolves correctly, so
|
||||||
|
# the user may have set up external DNS. List this discrepancy as a warning.
|
||||||
|
output.print_warning("""Nameserver glue records (ns1.%s and ns2.%s) should be configured at your domain name
|
||||||
|
registrar as having the IP address of this box (%s). They currently report addresses of %s. If you have set up External DNS, this may be OK."""
|
||||||
|
% (env['PRIMARY_HOSTNAME'], env['PRIMARY_HOSTNAME'], env['PUBLIC_IP'], ns_ips))
|
||||||
|
|
||||||
else:
|
else:
|
||||||
output.print_error("""Nameserver glue records are incorrect. The ns1.%s and ns2.%s nameservers must be configured at your domain name
|
output.print_error("""Nameserver glue records are incorrect. The ns1.%s and ns2.%s nameservers must be configured at your domain name
|
||||||
registrar as having the IP address %s. They currently report addresses of %s. It may take several hours for
|
registrar as having the IP address %s. They currently report addresses of %s. It may take several hours for
|
||||||
public DNS to update after a change."""
|
public DNS to update after a change."""
|
||||||
% (env['PRIMARY_HOSTNAME'], env['PRIMARY_HOSTNAME'], env['PUBLIC_IP'], ip))
|
% (env['PRIMARY_HOSTNAME'], env['PRIMARY_HOSTNAME'], env['PUBLIC_IP'], ns_ips))
|
||||||
|
|
||||||
# Check that PRIMARY_HOSTNAME resolves to PUBLIC_IP in public DNS.
|
# Check that PRIMARY_HOSTNAME resolves to PUBLIC_IP in public DNS.
|
||||||
ip = query_dns(domain, "A")
|
|
||||||
if ip == env['PUBLIC_IP']:
|
if ip == env['PUBLIC_IP']:
|
||||||
output.print_ok("Domain resolves to box's IP address. [%s => %s]" % (env['PRIMARY_HOSTNAME'], env['PUBLIC_IP']))
|
output.print_ok("Domain resolves to box's IP address. [%s ↦ %s]" % (env['PRIMARY_HOSTNAME'], env['PUBLIC_IP']))
|
||||||
else:
|
else:
|
||||||
output.print_error("""This domain must resolve to your box's IP address (%s) in public DNS but it currently resolves
|
output.print_error("""This domain must resolve to your box's IP address (%s) in public DNS but it currently resolves
|
||||||
to %s. It may take several hours for public DNS to update after a change. This problem may result from other
|
to %s. It may take several hours for public DNS to update after a change. This problem may result from other
|
||||||
@@ -281,7 +325,7 @@ def check_primary_hostname_dns(domain, env, output, dns_domains, dns_zonefiles):
|
|||||||
ipaddr_rev = dns.reversename.from_address(env['PUBLIC_IP'])
|
ipaddr_rev = dns.reversename.from_address(env['PUBLIC_IP'])
|
||||||
existing_rdns = query_dns(ipaddr_rev, "PTR")
|
existing_rdns = query_dns(ipaddr_rev, "PTR")
|
||||||
if existing_rdns == domain:
|
if existing_rdns == domain:
|
||||||
output.print_ok("Reverse DNS is set correctly at ISP. [%s => %s]" % (env['PUBLIC_IP'], env['PRIMARY_HOSTNAME']))
|
output.print_ok("Reverse DNS is set correctly at ISP. [%s ↦ %s]" % (env['PUBLIC_IP'], env['PRIMARY_HOSTNAME']))
|
||||||
else:
|
else:
|
||||||
output.print_error("""Your box's reverse DNS is currently %s, but it should be %s. Your ISP or cloud provider will have instructions
|
output.print_error("""Your box's reverse DNS is currently %s, but it should be %s. Your ISP or cloud provider will have instructions
|
||||||
on setting up reverse DNS for your box at %s.""" % (existing_rdns, domain, env['PUBLIC_IP']) )
|
on setting up reverse DNS for your box at %s.""" % (existing_rdns, domain, env['PUBLIC_IP']) )
|
||||||
@@ -293,19 +337,22 @@ def check_primary_hostname_dns(domain, env, output, dns_domains, dns_zonefiles):
|
|||||||
if tlsa25 == tlsa25_expected:
|
if tlsa25 == tlsa25_expected:
|
||||||
output.print_ok("""The DANE TLSA record for incoming mail is correct (%s).""" % tlsa_qname,)
|
output.print_ok("""The DANE TLSA record for incoming mail is correct (%s).""" % tlsa_qname,)
|
||||||
elif tlsa25 is None:
|
elif tlsa25 is None:
|
||||||
output.print_error("""The DANE TLSA record for incoming mail is not set. This is optional.""")
|
if has_dnssec:
|
||||||
|
# Omit a warning about it not being set if DNSSEC isn't enabled,
|
||||||
|
# since TLSA shouldn't be used without DNSSEC.
|
||||||
|
output.print_warning("""The DANE TLSA record for incoming mail is not set. This is optional.""")
|
||||||
else:
|
else:
|
||||||
output.print_error("""The DANE TLSA record for incoming mail (%s) is not correct. It is '%s' but it should be '%s'.
|
output.print_error("""The DANE TLSA record for incoming mail (%s) is not correct. It is '%s' but it should be '%s'.
|
||||||
It may take several hours for public DNS to update after a change."""
|
It may take several hours for public DNS to update after a change."""
|
||||||
% (tlsa_qname, tlsa25, tlsa25_expected))
|
% (tlsa_qname, tlsa25, tlsa25_expected))
|
||||||
|
|
||||||
# Check that the hostmaster@ email address exists.
|
# Check that the hostmaster@ email address exists.
|
||||||
check_alias_exists("hostmaster@" + domain, env, output)
|
check_alias_exists("Hostmaster contact address", "hostmaster@" + domain, env, output)
|
||||||
|
|
||||||
def check_alias_exists(alias, env, output):
|
def check_alias_exists(alias_name, alias, env, output):
|
||||||
mail_alises = dict(get_mail_aliases(env))
|
mail_alises = dict(get_mail_aliases(env))
|
||||||
if alias in mail_alises:
|
if alias in mail_alises:
|
||||||
output.print_ok("%s exists as a mail alias [=> %s]" % (alias, mail_alises[alias]))
|
output.print_ok("%s exists as a mail alias. [%s ↦ %s]" % (alias_name, alias, mail_alises[alias]))
|
||||||
else:
|
else:
|
||||||
output.print_error("""You must add a mail alias for %s and direct email to you or another administrator.""" % alias)
|
output.print_error("""You must add a mail alias for %s and direct email to you or another administrator.""" % alias)
|
||||||
|
|
||||||
@@ -321,14 +368,20 @@ def check_dns_zone(domain, env, output, dns_zonefiles):
|
|||||||
# whois information -- we may be getting the NS records from us rather than
|
# whois information -- we may be getting the NS records from us rather than
|
||||||
# the TLD, and so we're not actually checking the TLD. For that we'd need
|
# the TLD, and so we're not actually checking the TLD. For that we'd need
|
||||||
# to do a DNS trace.
|
# to do a DNS trace.
|
||||||
custom_dns = get_custom_dns_config(env)
|
ip = query_dns(domain, "A")
|
||||||
|
secondary_ns = get_secondary_dns(get_custom_dns_config(env)) or "ns2." + env['PRIMARY_HOSTNAME']
|
||||||
existing_ns = query_dns(domain, "NS")
|
existing_ns = query_dns(domain, "NS")
|
||||||
correct_ns = "; ".join(sorted([
|
correct_ns = "; ".join(sorted([
|
||||||
"ns1." + env['PRIMARY_HOSTNAME'],
|
"ns1." + env['PRIMARY_HOSTNAME'],
|
||||||
custom_dns.get("_secondary_nameserver", "ns2." + env['PRIMARY_HOSTNAME']),
|
secondary_ns,
|
||||||
]))
|
]))
|
||||||
if existing_ns.lower() == correct_ns.lower():
|
if existing_ns.lower() == correct_ns.lower():
|
||||||
output.print_ok("Nameservers are set correctly at registrar. [%s]" % correct_ns)
|
output.print_ok("Nameservers are set correctly at registrar. [%s]" % correct_ns)
|
||||||
|
elif ip == env['PUBLIC_IP']:
|
||||||
|
# The domain resolves correctly, so maybe the user is using External DNS.
|
||||||
|
output.print_warning("""The nameservers set on this domain at your domain name registrar should be %s. They are currently %s.
|
||||||
|
If you are using External DNS, this may be OK."""
|
||||||
|
% (correct_ns, existing_ns) )
|
||||||
else:
|
else:
|
||||||
output.print_error("""The nameservers set on this domain are incorrect. They are currently %s. Use your domain name registrar's
|
output.print_error("""The nameservers set on this domain are incorrect. They are currently %s. Use your domain name registrar's
|
||||||
control panel to set the nameservers to %s."""
|
control panel to set the nameservers to %s."""
|
||||||
@@ -367,7 +420,7 @@ def check_dnssec(domain, env, output, dns_zonefiles, is_checking_primary=False):
|
|||||||
else:
|
else:
|
||||||
if ds == None:
|
if ds == None:
|
||||||
if is_checking_primary: return
|
if is_checking_primary: return
|
||||||
output.print_error("""This domain's DNSSEC DS record is not set. The DS record is optional. The DS record activates DNSSEC.
|
output.print_warning("""This domain's DNSSEC DS record is not set. The DS record is optional. The DS record activates DNSSEC.
|
||||||
To set a DS record, you must follow the instructions provided by your domain name registrar and provide to them this information:""")
|
To set a DS record, you must follow the instructions provided by your domain name registrar and provide to them this information:""")
|
||||||
else:
|
else:
|
||||||
if is_checking_primary:
|
if is_checking_primary:
|
||||||
@@ -399,13 +452,17 @@ def check_dnssec(domain, env, output, dns_zonefiles, is_checking_primary=False):
|
|||||||
def check_mail_domain(domain, env, output):
|
def check_mail_domain(domain, env, output):
|
||||||
# Check the MX record.
|
# Check the MX record.
|
||||||
|
|
||||||
|
recommended_mx = "10 " + env['PRIMARY_HOSTNAME']
|
||||||
mx = query_dns(domain, "MX", nxdomain=None)
|
mx = query_dns(domain, "MX", nxdomain=None)
|
||||||
expected_mx = "10 " + env['PRIMARY_HOSTNAME']
|
|
||||||
|
|
||||||
if mx == expected_mx:
|
if mx is None:
|
||||||
output.print_ok("Domain's email is directed to this domain. [%s => %s]" % (domain, mx))
|
mxhost = None
|
||||||
|
else:
|
||||||
|
# query_dns returns a semicolon-delimited list
|
||||||
|
# of priority-host pairs.
|
||||||
|
mxhost = mx.split('; ')[0].split(' ')[1]
|
||||||
|
|
||||||
elif mx == None:
|
if mxhost == None:
|
||||||
# A missing MX record is okay on the primary hostname because
|
# A missing MX record is okay on the primary hostname because
|
||||||
# the primary hostname's A record (the MX fallback) is... itself,
|
# the primary hostname's A record (the MX fallback) is... itself,
|
||||||
# which is what we want the MX to be.
|
# which is what we want the MX to be.
|
||||||
@@ -423,15 +480,22 @@ def check_mail_domain(domain, env, output):
|
|||||||
else:
|
else:
|
||||||
output.print_error("""This domain's DNS MX record is not set. It should be '%s'. Mail will not
|
output.print_error("""This domain's DNS MX record is not set. It should be '%s'. Mail will not
|
||||||
be delivered to this box. It may take several hours for public DNS to update after a
|
be delivered to this box. It may take several hours for public DNS to update after a
|
||||||
change. This problem may result from other issues listed here.""" % (expected_mx,))
|
change. This problem may result from other issues listed here.""" % (recommended_mx,))
|
||||||
|
|
||||||
|
elif mxhost == env['PRIMARY_HOSTNAME']:
|
||||||
|
good_news = "Domain's email is directed to this domain. [%s ↦ %s]" % (domain, mx)
|
||||||
|
if mx != recommended_mx:
|
||||||
|
good_news += " This configuration is non-standard. The recommended configuration is '%s'." % (recommended_mx,)
|
||||||
|
output.print_ok(good_news)
|
||||||
else:
|
else:
|
||||||
output.print_error("""This domain's DNS MX record is incorrect. It is currently set to '%s' but should be '%s'. Mail will not
|
output.print_error("""This domain's DNS MX record is incorrect. It is currently set to '%s' but should be '%s'. Mail will not
|
||||||
be delivered to this box. It may take several hours for public DNS to update after a change. This problem may result from
|
be delivered to this box. It may take several hours for public DNS to update after a change. This problem may result from
|
||||||
other issues listed here.""" % (mx, expected_mx))
|
other issues listed here.""" % (mx, recommended_mx))
|
||||||
|
|
||||||
# Check that the postmaster@ email address exists.
|
# Check that the postmaster@ email address exists. Not required if the domain has a
|
||||||
check_alias_exists("postmaster@" + domain, env, output)
|
# catch-all address or domain alias.
|
||||||
|
if "@" + domain not in dict(get_mail_aliases(env)):
|
||||||
|
check_alias_exists("Postmaster contact address", "postmaster@" + domain, env, output)
|
||||||
|
|
||||||
# Stop if the domain is listed in the Spamhaus Domain Block List.
|
# Stop if the domain is listed in the Spamhaus Domain Block List.
|
||||||
# The user might have chosen a domain that was previously in use by a spammer
|
# The user might have chosen a domain that was previously in use by a spammer
|
||||||
@@ -444,14 +508,14 @@ def check_mail_domain(domain, env, output):
|
|||||||
which may prevent recipients from receiving your mail.
|
which may prevent recipients from receiving your mail.
|
||||||
See http://www.spamhaus.org/dbl/ and http://www.spamhaus.org/query/domain/%s.""" % (dbl, domain))
|
See http://www.spamhaus.org/dbl/ and http://www.spamhaus.org/query/domain/%s.""" % (dbl, domain))
|
||||||
|
|
||||||
def check_web_domain(domain, env, output):
|
def check_web_domain(domain, rounded_time, env, output):
|
||||||
# See if the domain's A record resolves to our PUBLIC_IP. This is already checked
|
# See if the domain's A record resolves to our PUBLIC_IP. This is already checked
|
||||||
# for PRIMARY_HOSTNAME, for which it is required for mail specifically. For it and
|
# for PRIMARY_HOSTNAME, for which it is required for mail specifically. For it and
|
||||||
# other domains, it is required to access its website.
|
# other domains, it is required to access its website.
|
||||||
if domain != env['PRIMARY_HOSTNAME']:
|
if domain != env['PRIMARY_HOSTNAME']:
|
||||||
ip = query_dns(domain, "A")
|
ip = query_dns(domain, "A")
|
||||||
if ip == env['PUBLIC_IP']:
|
if ip == env['PUBLIC_IP']:
|
||||||
output.print_ok("Domain resolves to this box's IP address. [%s => %s]" % (domain, env['PUBLIC_IP']))
|
output.print_ok("Domain resolves to this box's IP address. [%s ↦ %s]" % (domain, env['PUBLIC_IP']))
|
||||||
else:
|
else:
|
||||||
output.print_error("""This domain should resolve to your box's IP address (%s) if you would like the box to serve
|
output.print_error("""This domain should resolve to your box's IP address (%s) if you would like the box to serve
|
||||||
webmail or a website on this domain. The domain currently resolves to %s in public DNS. It may take several hours for
|
webmail or a website on this domain. The domain currently resolves to %s in public DNS. It may take several hours for
|
||||||
@@ -460,7 +524,7 @@ def check_web_domain(domain, env, output):
|
|||||||
# We need a SSL certificate for PRIMARY_HOSTNAME because that's where the
|
# We need a SSL certificate for PRIMARY_HOSTNAME because that's where the
|
||||||
# user will log in with IMAP or webmail. Any other domain we serve a
|
# user will log in with IMAP or webmail. Any other domain we serve a
|
||||||
# website for also needs a signed certificate.
|
# website for also needs a signed certificate.
|
||||||
check_ssl_cert(domain, env, output)
|
check_ssl_cert(domain, rounded_time, env, output)
|
||||||
|
|
||||||
def query_dns(qname, rtype, nxdomain='[Not Set]'):
|
def query_dns(qname, rtype, nxdomain='[Not Set]'):
|
||||||
# Make the qname absolute by appending a period. Without this, dns.resolver.query
|
# Make the qname absolute by appending a period. Without this, dns.resolver.query
|
||||||
@@ -487,14 +551,14 @@ def query_dns(qname, rtype, nxdomain='[Not Set]'):
|
|||||||
# can compare to a well known order.
|
# can compare to a well known order.
|
||||||
return "; ".join(sorted(str(r).rstrip('.') for r in response))
|
return "; ".join(sorted(str(r).rstrip('.') for r in response))
|
||||||
|
|
||||||
def check_ssl_cert(domain, env, output):
|
def check_ssl_cert(domain, rounded_time, env, output):
|
||||||
# Check that SSL certificate is signed.
|
# Check that SSL certificate is signed.
|
||||||
|
|
||||||
# Skip the check if the A record is not pointed here.
|
# Skip the check if the A record is not pointed here.
|
||||||
if query_dns(domain, "A", None) not in (env['PUBLIC_IP'], None): return
|
if query_dns(domain, "A", None) not in (env['PUBLIC_IP'], None): return
|
||||||
|
|
||||||
# Where is the SSL stored?
|
# Where is the SSL stored?
|
||||||
ssl_key, ssl_certificate = get_domain_ssl_files(domain, env)
|
ssl_key, ssl_certificate, ssl_via = get_domain_ssl_files(domain, env)
|
||||||
|
|
||||||
if not os.path.exists(ssl_certificate):
|
if not os.path.exists(ssl_certificate):
|
||||||
output.print_error("The SSL certificate file for this domain is missing.")
|
output.print_error("The SSL certificate file for this domain is missing.")
|
||||||
@@ -502,11 +566,11 @@ def check_ssl_cert(domain, env, output):
|
|||||||
|
|
||||||
# Check that the certificate is good.
|
# Check that the certificate is good.
|
||||||
|
|
||||||
cert_status, cert_status_details = check_certificate(domain, ssl_certificate, ssl_key)
|
cert_status, cert_status_details = check_certificate(domain, ssl_certificate, ssl_key, rounded_time=rounded_time)
|
||||||
|
|
||||||
if cert_status == "OK":
|
if cert_status == "OK":
|
||||||
# The certificate is ok. The details has expiry info.
|
# The certificate is ok. The details has expiry info.
|
||||||
output.print_ok("SSL certificate is signed & valid. " + cert_status_details)
|
output.print_ok("SSL certificate is signed & valid. %s %s" % (ssl_via if ssl_via else "", cert_status_details))
|
||||||
|
|
||||||
elif cert_status == "SELF-SIGNED":
|
elif cert_status == "SELF-SIGNED":
|
||||||
# Offer instructions for purchasing a signed certificate.
|
# Offer instructions for purchasing a signed certificate.
|
||||||
@@ -541,124 +605,156 @@ def check_ssl_cert(domain, env, output):
|
|||||||
output.print_line(cert_status_details)
|
output.print_line(cert_status_details)
|
||||||
output.print_line("")
|
output.print_line("")
|
||||||
|
|
||||||
def check_certificate(domain, ssl_certificate, ssl_private_key):
|
def check_certificate(domain, ssl_certificate, ssl_private_key, warn_if_expiring_soon=True, rounded_time=False, just_check_domain=False):
|
||||||
# Use openssl verify to check the status of a certificate.
|
# Check that the ssl_certificate & ssl_private_key files are good
|
||||||
|
# for the provided domain.
|
||||||
|
|
||||||
# First check that the certificate is for the right domain. The domain
|
from cryptography.hazmat.primitives.asymmetric.rsa import RSAPrivateKey
|
||||||
# must be found in the Subject Common Name (CN) or be one of the
|
from cryptography.x509 import Certificate, DNSName, ExtensionNotFound, OID_COMMON_NAME, OID_SUBJECT_ALTERNATIVE_NAME
|
||||||
# Subject Alternative Names. A wildcard might also appear as the CN
|
|
||||||
# or in the SAN list, so check for that tool.
|
|
||||||
retcode, cert_dump = shell('check_output', [
|
|
||||||
"openssl", "x509",
|
|
||||||
"-in", ssl_certificate,
|
|
||||||
"-noout", "-text", "-nameopt", "rfc2253",
|
|
||||||
], trap=True)
|
|
||||||
|
|
||||||
# If the certificate is catastrophically bad, catch that now and report it.
|
# The ssl_certificate file may contain a chain of certificates. We'll
|
||||||
# More information was probably written to stderr (which we aren't capturing),
|
# need to split that up before we can pass anything to openssl or
|
||||||
# but it is probably not helpful to the user anyway.
|
# parse them in Python. Parse it with the cryptography library.
|
||||||
if retcode != 0:
|
try:
|
||||||
return ("The SSL certificate appears to be corrupted or not a PEM-formatted SSL certificate file. (%s)" % ssl_certificate, None)
|
ssl_cert_chain = load_cert_chain(ssl_certificate)
|
||||||
|
cert = load_pem(ssl_cert_chain[0])
|
||||||
|
if not isinstance(cert, Certificate): raise ValueError("This is not a certificate file.")
|
||||||
|
except ValueError as e:
|
||||||
|
return ("There is a problem with the certificate file: %s" % str(e), None)
|
||||||
|
|
||||||
cert_dump = cert_dump.split("\n")
|
# First check that the domain name is one of the names allowed by
|
||||||
certificate_names = set()
|
# the certificate.
|
||||||
cert_expiration_date = None
|
if domain is not None:
|
||||||
while len(cert_dump) > 0:
|
# The domain must be found in the Subject Common Name (CN)...
|
||||||
line = cert_dump.pop(0)
|
certificate_names = set()
|
||||||
|
try:
|
||||||
|
certificate_names.add(
|
||||||
|
cert.subject.get_attributes_for_oid(OID_COMMON_NAME)[0].value
|
||||||
|
)
|
||||||
|
except IndexError:
|
||||||
|
# No common name? Certificate is probably generated incorrectly.
|
||||||
|
# But we'll let it error-out when it doesn't find the domain.
|
||||||
|
pass
|
||||||
|
|
||||||
# Grab from the Subject Common Name. We include the indentation
|
# ... or be one of the Subject Alternative Names.
|
||||||
# at the start of the line in case maybe the cert includes the
|
try:
|
||||||
# common name of some other referenced entity (which would be
|
sans = cert.extensions.get_extension_for_oid(OID_SUBJECT_ALTERNATIVE_NAME).value.get_values_for_type(DNSName)
|
||||||
# indented, I hope).
|
for san in sans:
|
||||||
m = re.match(" Subject: CN=([^,]+)", line)
|
certificate_names.add(san)
|
||||||
if m:
|
except ExtensionNotFound:
|
||||||
certificate_names.add(m.group(1))
|
pass
|
||||||
|
|
||||||
# Grab from the Subject Alternative Name, which is a comma-delim
|
|
||||||
# list of names, like DNS:mydomain.com, DNS:otherdomain.com.
|
|
||||||
m = re.match(" X509v3 Subject Alternative Name:", line)
|
|
||||||
if m:
|
|
||||||
names = re.split(",\s*", cert_dump.pop(0).strip())
|
|
||||||
for n in names:
|
|
||||||
m = re.match("DNS:(.*)", n)
|
|
||||||
if m:
|
|
||||||
certificate_names.add(m.group(1))
|
|
||||||
|
|
||||||
m = re.match(" Not After : (.*)", line)
|
# Check that the domain appears among the acceptable names, or a wildcard
|
||||||
if m:
|
# form of the domain name (which is a stricter check than the specs but
|
||||||
cert_expiration_date = dateutil.parser.parse(m.group(1))
|
# should work in normal cases).
|
||||||
|
wildcard_domain = re.sub("^[^\.]+", "*", domain)
|
||||||
|
if domain not in certificate_names and wildcard_domain not in certificate_names:
|
||||||
|
return ("The certificate is for the wrong domain name. It is for %s."
|
||||||
|
% ", ".join(sorted(certificate_names)), None)
|
||||||
|
|
||||||
domain = domain.encode("idna").decode("ascii")
|
# Second, check that the certificate matches the private key.
|
||||||
wildcard_domain = re.sub("^[^\.]+", "*", domain)
|
|
||||||
if domain is not None and domain not in certificate_names and wildcard_domain not in certificate_names:
|
|
||||||
return ("The certificate is for the wrong domain name. It is for %s."
|
|
||||||
% ", ".join(sorted(certificate_names)), None)
|
|
||||||
|
|
||||||
# Second, check that the certificate matches the private key. Get the modulus of the
|
|
||||||
# private key and of the public key in the certificate. They should match. The output
|
|
||||||
# of each command looks like "Modulus=XXXXX".
|
|
||||||
if ssl_private_key is not None:
|
if ssl_private_key is not None:
|
||||||
private_key_modulus = shell('check_output', [
|
priv_key = load_pem(open(ssl_private_key, 'rb').read())
|
||||||
"openssl", "rsa",
|
if not isinstance(priv_key, RSAPrivateKey):
|
||||||
"-inform", "PEM",
|
return ("The private key file %s is not a private key file." % ssl_private_key, None)
|
||||||
"-noout", "-modulus",
|
|
||||||
"-in", ssl_private_key])
|
if priv_key.public_key().public_numbers() != cert.public_key().public_numbers():
|
||||||
cert_key_modulus = shell('check_output', [
|
return ("The certificate does not correspond to the private key at %s." % ssl_private_key, None)
|
||||||
"openssl", "x509",
|
|
||||||
"-in", ssl_certificate,
|
# We could also use the openssl command line tool to get the modulus
|
||||||
"-noout", "-modulus"])
|
# listed in each file. The output of each command below looks like "Modulus=XXXXX".
|
||||||
if private_key_modulus != cert_key_modulus:
|
# $ openssl rsa -inform PEM -noout -modulus -in ssl_private_key
|
||||||
return ("The certificate installed at %s does not correspond to the private key at %s." % (ssl_certificate, ssl_private_key), None)
|
# $ openssl x509 -in ssl_certificate -noout -modulus
|
||||||
|
|
||||||
|
# Third, check if the certificate is self-signed. Return a special flag string.
|
||||||
|
if cert.issuer == cert.subject:
|
||||||
|
return ("SELF-SIGNED", None)
|
||||||
|
|
||||||
|
# When selecting which certificate to use for non-primary domains, we check if the primary
|
||||||
|
# certificate or a www-parent-domain certificate is good for the domain. There's no need
|
||||||
|
# to run extra checks beyond this point.
|
||||||
|
if just_check_domain:
|
||||||
|
return ("OK", None)
|
||||||
|
|
||||||
|
# Check that the certificate hasn't expired. The datetimes returned by the
|
||||||
|
# certificate are 'naive' and in UTC. We need to get the current time in UTC.
|
||||||
|
now = datetime.datetime.utcnow()
|
||||||
|
if not(cert.not_valid_before <= now <= cert.not_valid_after):
|
||||||
|
return ("The certificate has expired or is not yet valid. It is valid from %s to %s." % (cert.not_valid_before, cert.not_valid_after), None)
|
||||||
|
|
||||||
# Next validate that the certificate is valid. This checks whether the certificate
|
# Next validate that the certificate is valid. This checks whether the certificate
|
||||||
# is self-signed, that the chain of trust makes sense, that it is signed by a CA
|
# is self-signed, that the chain of trust makes sense, that it is signed by a CA
|
||||||
# that Ubuntu has installed on this machine's list of CAs, and I think that it hasn't
|
# that Ubuntu has installed on this machine's list of CAs, and I think that it hasn't
|
||||||
# expired.
|
# expired.
|
||||||
|
|
||||||
# In order to verify with openssl, we need to split out any
|
# The certificate chain has to be passed separately and is given via STDIN.
|
||||||
# intermediary certificates in the chain (if any) from our
|
|
||||||
# certificate (at the top). They need to be passed separately.
|
|
||||||
|
|
||||||
cert = open(ssl_certificate).read()
|
|
||||||
m = re.match(r'(-*BEGIN CERTIFICATE-*.*?-*END CERTIFICATE-*)(.*)', cert, re.S)
|
|
||||||
if m == None:
|
|
||||||
return ("The certificate file is an invalid PEM certificate.", None)
|
|
||||||
mycert, chaincerts = m.groups()
|
|
||||||
|
|
||||||
# This command returns a non-zero exit status in most cases, so trap errors.
|
# This command returns a non-zero exit status in most cases, so trap errors.
|
||||||
|
|
||||||
retcode, verifyoutput = shell('check_output', [
|
retcode, verifyoutput = shell('check_output', [
|
||||||
"openssl",
|
"openssl",
|
||||||
"verify", "-verbose",
|
"verify", "-verbose",
|
||||||
"-purpose", "sslserver", "-policy_check",]
|
"-purpose", "sslserver", "-policy_check",]
|
||||||
+ ([] if chaincerts.strip() == "" else ["-untrusted", "/dev/stdin"])
|
+ ([] if len(ssl_cert_chain) == 1 else ["-untrusted", "/dev/stdin"])
|
||||||
+ [ssl_certificate],
|
+ [ssl_certificate],
|
||||||
input=chaincerts.encode('ascii'),
|
input=b"\n\n".join(ssl_cert_chain[1:]),
|
||||||
trap=True)
|
trap=True)
|
||||||
|
|
||||||
if "self signed" in verifyoutput:
|
if "self signed" in verifyoutput:
|
||||||
# Certificate is self-signed.
|
# Certificate is self-signed. Probably we detected this above.
|
||||||
return ("SELF-SIGNED", None)
|
return ("SELF-SIGNED", None)
|
||||||
|
|
||||||
elif retcode != 0:
|
elif retcode != 0:
|
||||||
if "unable to get local issuer certificate" in verifyoutput:
|
if "unable to get local issuer certificate" in verifyoutput:
|
||||||
return ("The certificate is missing an intermediate chain or the intermediate chain is incorrect or incomplete. (%s)" % verifyoutput, None)
|
return ("The certificate is missing an intermediate chain or the intermediate chain is incorrect or incomplete. (%s)" % verifyoutput, None)
|
||||||
|
|
||||||
# There is some unknown problem. Return the `openssl verify` raw output.
|
# There is some unknown problem. Return the `openssl verify` raw output.
|
||||||
return ("There is a problem with the SSL certificate.", verifyoutput.strip())
|
return ("There is a problem with the SSL certificate.", verifyoutput.strip())
|
||||||
|
|
||||||
else:
|
else:
|
||||||
# `openssl verify` returned a zero exit status so the cert is currently
|
# `openssl verify` returned a zero exit status so the cert is currently
|
||||||
# good.
|
# good.
|
||||||
|
|
||||||
# But is it expiring soon?
|
# But is it expiring soon?
|
||||||
now = datetime.datetime.now(dateutil.tz.tzlocal())
|
cert_expiration_date = cert.not_valid_after
|
||||||
ndays = (cert_expiration_date-now).days
|
ndays = (cert_expiration_date-now).days
|
||||||
expiry_info = "The certificate expires in %d days on %s." % (ndays, cert_expiration_date.strftime("%x"))
|
if not rounded_time or ndays < 7:
|
||||||
if ndays <= 31:
|
expiry_info = "The certificate expires in %d days on %s." % (ndays, cert_expiration_date.strftime("%x"))
|
||||||
|
elif ndays <= 14:
|
||||||
|
expiry_info = "The certificate expires in less than two weeks, on %s." % cert_expiration_date.strftime("%x")
|
||||||
|
elif ndays <= 31:
|
||||||
|
expiry_info = "The certificate expires in less than a month, on %s." % cert_expiration_date.strftime("%x")
|
||||||
|
else:
|
||||||
|
expiry_info = "The certificate expires on %s." % cert_expiration_date.strftime("%x")
|
||||||
|
|
||||||
|
if ndays <= 31 and warn_if_expiring_soon:
|
||||||
return ("The certificate is expiring soon: " + expiry_info, None)
|
return ("The certificate is expiring soon: " + expiry_info, None)
|
||||||
|
|
||||||
# Return the special OK code.
|
# Return the special OK code.
|
||||||
return ("OK", expiry_info)
|
return ("OK", expiry_info)
|
||||||
|
|
||||||
|
def load_cert_chain(pemfile):
|
||||||
|
# A certificate .pem file may contain a chain of certificates.
|
||||||
|
# Load the file and split them apart.
|
||||||
|
re_pem = rb"(-+BEGIN (?:.+)-+[\r\n](?:[A-Za-z0-9+/=]{1,64}[\r\n])+-+END (?:.+)-+[\r\n])"
|
||||||
|
with open(pemfile, "rb") as f:
|
||||||
|
pem = f.read() + b"\n" # ensure trailing newline
|
||||||
|
pemblocks = re.findall(re_pem, pem)
|
||||||
|
if len(pemblocks) == 0:
|
||||||
|
raise ValueError("File does not contain valid PEM data.")
|
||||||
|
return pemblocks
|
||||||
|
|
||||||
|
def load_pem(pem):
|
||||||
|
# Parse a "---BEGIN .... END---" PEM string and return a Python object for it
|
||||||
|
# using classes from the cryptography package.
|
||||||
|
from cryptography.x509 import load_pem_x509_certificate
|
||||||
|
from cryptography.hazmat.primitives import serialization
|
||||||
|
from cryptography.hazmat.backends import default_backend
|
||||||
|
pem_type = re.match(b"-+BEGIN (.*?)-+\n", pem).group(1)
|
||||||
|
if pem_type == b"RSA PRIVATE KEY":
|
||||||
|
return serialization.load_pem_private_key(pem, password=None, backend=default_backend())
|
||||||
|
if pem_type == b"CERTIFICATE":
|
||||||
|
return load_pem_x509_certificate(pem, default_backend())
|
||||||
|
raise ValueError("Unsupported PEM object type: " + pem_type.decode("ascii", "replace"))
|
||||||
|
|
||||||
_apt_updates = None
|
_apt_updates = None
|
||||||
def list_apt_updates(apt_update=True):
|
def list_apt_updates(apt_update=True):
|
||||||
# See if we have this information cached recently.
|
# See if we have this information cached recently.
|
||||||
@@ -693,17 +789,123 @@ def list_apt_updates(apt_update=True):
|
|||||||
|
|
||||||
return pkgs
|
return pkgs
|
||||||
|
|
||||||
|
def what_version_is_this(env):
|
||||||
|
# This function runs `git describe` on the Mail-in-a-Box installation directory.
|
||||||
|
# Git may not be installed and Mail-in-a-Box may not have been cloned from github,
|
||||||
|
# so this function may raise all sorts of exceptions.
|
||||||
|
miab_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||||
|
tag = shell("check_output", ["/usr/bin/git", "describe"], env={"GIT_DIR": os.path.join(miab_dir, '.git')}).strip()
|
||||||
|
return tag
|
||||||
|
|
||||||
class ConsoleOutput:
|
def get_latest_miab_version():
|
||||||
try:
|
# This pings https://mailinabox.email/bootstrap.sh and extracts the tag named in
|
||||||
terminal_columns = int(shell('check_output', ['stty', 'size']).split()[1])
|
# the script to determine the current product version.
|
||||||
except:
|
import urllib.request
|
||||||
terminal_columns = 76
|
return re.search(b'TAG=(.*)', urllib.request.urlopen("https://mailinabox.email/bootstrap.sh?ping=1").read()).group(1).decode("utf8")
|
||||||
|
|
||||||
|
def run_and_output_changes(env, pool, send_via_email):
|
||||||
|
import json
|
||||||
|
from difflib import SequenceMatcher
|
||||||
|
|
||||||
|
if not send_via_email:
|
||||||
|
out = ConsoleOutput()
|
||||||
|
else:
|
||||||
|
import io
|
||||||
|
out = FileOutput(io.StringIO(""), 70)
|
||||||
|
|
||||||
|
# Run status checks.
|
||||||
|
cur = BufferedOutput()
|
||||||
|
run_checks(True, env, cur, pool)
|
||||||
|
|
||||||
|
# Load previously saved status checks.
|
||||||
|
cache_fn = "/var/cache/mailinabox/status_checks.json"
|
||||||
|
if os.path.exists(cache_fn):
|
||||||
|
prev = json.load(open(cache_fn))
|
||||||
|
|
||||||
|
# Group the serial output into categories by the headings.
|
||||||
|
def group_by_heading(lines):
|
||||||
|
from collections import OrderedDict
|
||||||
|
ret = OrderedDict()
|
||||||
|
k = []
|
||||||
|
ret["No Category"] = k
|
||||||
|
for line_type, line_args, line_kwargs in lines:
|
||||||
|
if line_type == "add_heading":
|
||||||
|
k = []
|
||||||
|
ret[line_args[0]] = k
|
||||||
|
else:
|
||||||
|
k.append((line_type, line_args, line_kwargs))
|
||||||
|
return ret
|
||||||
|
prev_status = group_by_heading(prev)
|
||||||
|
cur_status = group_by_heading(cur.buf)
|
||||||
|
|
||||||
|
# Compare the previous to the current status checks
|
||||||
|
# category by category.
|
||||||
|
for category, cur_lines in cur_status.items():
|
||||||
|
if category not in prev_status:
|
||||||
|
out.add_heading(category + " -- Added")
|
||||||
|
BufferedOutput(with_lines=cur_lines).playback(out)
|
||||||
|
else:
|
||||||
|
# Actual comparison starts here...
|
||||||
|
prev_lines = prev_status[category]
|
||||||
|
def stringify(lines):
|
||||||
|
return [json.dumps(line) for line in lines]
|
||||||
|
diff = SequenceMatcher(None, stringify(prev_lines), stringify(cur_lines)).get_opcodes()
|
||||||
|
for op, i1, i2, j1, j2 in diff:
|
||||||
|
if op == "replace":
|
||||||
|
out.add_heading(category + " -- Previously:")
|
||||||
|
elif op == "delete":
|
||||||
|
out.add_heading(category + " -- Removed")
|
||||||
|
if op in ("replace", "delete"):
|
||||||
|
BufferedOutput(with_lines=prev_lines[i1:i2]).playback(out)
|
||||||
|
|
||||||
|
if op == "replace":
|
||||||
|
out.add_heading(category + " -- Currently:")
|
||||||
|
elif op == "insert":
|
||||||
|
out.add_heading(category + " -- Added")
|
||||||
|
if op in ("replace", "insert"):
|
||||||
|
BufferedOutput(with_lines=cur_lines[j1:j2]).playback(out)
|
||||||
|
|
||||||
|
for category, prev_lines in prev_status.items():
|
||||||
|
if category not in cur_status:
|
||||||
|
out.add_heading(category)
|
||||||
|
out.print_warning("This section was removed.")
|
||||||
|
|
||||||
|
if send_via_email:
|
||||||
|
# If there were changes, send off an email.
|
||||||
|
buf = out.buf.getvalue()
|
||||||
|
if len(buf) > 0:
|
||||||
|
# create MIME message
|
||||||
|
from email.message import Message
|
||||||
|
msg = Message()
|
||||||
|
msg['From'] = "\"%s\" <administrator@%s>" % (env['PRIMARY_HOSTNAME'], env['PRIMARY_HOSTNAME'])
|
||||||
|
msg['To'] = "administrator@%s" % env['PRIMARY_HOSTNAME']
|
||||||
|
msg['Subject'] = "[%s] Status Checks Change Notice" % env['PRIMARY_HOSTNAME']
|
||||||
|
msg.set_payload(buf, "UTF-8")
|
||||||
|
|
||||||
|
# send to administrator@
|
||||||
|
import smtplib
|
||||||
|
mailserver = smtplib.SMTP('localhost', 25)
|
||||||
|
mailserver.ehlo()
|
||||||
|
mailserver.sendmail(
|
||||||
|
"administrator@%s" % env['PRIMARY_HOSTNAME'], # MAIL FROM
|
||||||
|
"administrator@%s" % env['PRIMARY_HOSTNAME'], # RCPT TO
|
||||||
|
msg.as_string())
|
||||||
|
mailserver.quit()
|
||||||
|
|
||||||
|
# Store the current status checks output for next time.
|
||||||
|
os.makedirs(os.path.dirname(cache_fn), exist_ok=True)
|
||||||
|
with open(cache_fn, "w") as f:
|
||||||
|
json.dump(cur.buf, f, indent=True)
|
||||||
|
|
||||||
|
class FileOutput:
|
||||||
|
def __init__(self, buf, width):
|
||||||
|
self.buf = buf
|
||||||
|
self.width = width
|
||||||
|
|
||||||
def add_heading(self, heading):
|
def add_heading(self, heading):
|
||||||
print()
|
print(file=self.buf)
|
||||||
print(heading)
|
print(heading, file=self.buf)
|
||||||
print("=" * len(heading))
|
print("=" * len(heading), file=self.buf)
|
||||||
|
|
||||||
def print_ok(self, message):
|
def print_ok(self, message):
|
||||||
self.print_block(message, first_line="✓ ")
|
self.print_block(message, first_line="✓ ")
|
||||||
@@ -715,28 +917,36 @@ class ConsoleOutput:
|
|||||||
self.print_block(message, first_line="? ")
|
self.print_block(message, first_line="? ")
|
||||||
|
|
||||||
def print_block(self, message, first_line=" "):
|
def print_block(self, message, first_line=" "):
|
||||||
print(first_line, end='')
|
print(first_line, end='', file=self.buf)
|
||||||
message = re.sub("\n\s*", " ", message)
|
message = re.sub("\n\s*", " ", message)
|
||||||
words = re.split("(\s+)", message)
|
words = re.split("(\s+)", message)
|
||||||
linelen = 0
|
linelen = 0
|
||||||
for w in words:
|
for w in words:
|
||||||
if linelen + len(w) > self.terminal_columns-1-len(first_line):
|
if linelen + len(w) > self.width-1-len(first_line):
|
||||||
print()
|
print(file=self.buf)
|
||||||
print(" ", end="")
|
print(" ", end="", file=self.buf)
|
||||||
linelen = 0
|
linelen = 0
|
||||||
if linelen == 0 and w.strip() == "": continue
|
if linelen == 0 and w.strip() == "": continue
|
||||||
print(w, end="")
|
print(w, end="", file=self.buf)
|
||||||
linelen += len(w)
|
linelen += len(w)
|
||||||
print()
|
print(file=self.buf)
|
||||||
|
|
||||||
def print_line(self, message, monospace=False):
|
def print_line(self, message, monospace=False):
|
||||||
for line in message.split("\n"):
|
for line in message.split("\n"):
|
||||||
self.print_block(line)
|
self.print_block(line)
|
||||||
|
|
||||||
|
class ConsoleOutput(FileOutput):
|
||||||
|
def __init__(self):
|
||||||
|
self.buf = sys.stdout
|
||||||
|
try:
|
||||||
|
self.width = int(shell('check_output', ['stty', 'size']).split()[1])
|
||||||
|
except:
|
||||||
|
self.width = 76
|
||||||
|
|
||||||
class BufferedOutput:
|
class BufferedOutput:
|
||||||
# Record all of the instance method calls so we can play them back later.
|
# Record all of the instance method calls so we can play them back later.
|
||||||
def __init__(self):
|
def __init__(self, with_lines=None):
|
||||||
self.buf = []
|
self.buf = [] if not with_lines else with_lines
|
||||||
def __getattr__(self, attr):
|
def __getattr__(self, attr):
|
||||||
if attr not in ("add_heading", "print_ok", "print_error", "print_warning", "print_block", "print_line"):
|
if attr not in ("add_heading", "print_ok", "print_error", "print_warning", "print_block", "print_line"):
|
||||||
raise AttributeError
|
raise AttributeError
|
||||||
@@ -748,21 +958,31 @@ class BufferedOutput:
|
|||||||
for attr, args, kwargs in self.buf:
|
for attr, args, kwargs in self.buf:
|
||||||
getattr(output, attr)(*args, **kwargs)
|
getattr(output, attr)(*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
import sys
|
|
||||||
from utils import load_environment
|
from utils import load_environment
|
||||||
|
|
||||||
env = load_environment()
|
env = load_environment()
|
||||||
|
pool = multiprocessing.pool.Pool(processes=10)
|
||||||
|
|
||||||
if len(sys.argv) == 1:
|
if len(sys.argv) == 1:
|
||||||
run_checks(env, ConsoleOutput())
|
run_checks(False, env, ConsoleOutput(), pool)
|
||||||
|
|
||||||
|
elif sys.argv[1] == "--show-changes":
|
||||||
|
run_and_output_changes(env, pool, sys.argv[-1] == "--smtp")
|
||||||
|
|
||||||
elif sys.argv[1] == "--check-primary-hostname":
|
elif sys.argv[1] == "--check-primary-hostname":
|
||||||
# See if the primary hostname appears resolvable and has a signed certificate.
|
# See if the primary hostname appears resolvable and has a signed certificate.
|
||||||
domain = env['PRIMARY_HOSTNAME']
|
domain = env['PRIMARY_HOSTNAME']
|
||||||
if query_dns(domain, "A") != env['PUBLIC_IP']:
|
if query_dns(domain, "A") != env['PUBLIC_IP']:
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
ssl_key, ssl_certificate = get_domain_ssl_files(domain, env)
|
ssl_key, ssl_certificate, ssl_via = get_domain_ssl_files(domain, env)
|
||||||
if not os.path.exists(ssl_certificate):
|
if not os.path.exists(ssl_certificate):
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
cert_status, cert_status_details = check_certificate(domain, ssl_certificate, ssl_key)
|
cert_status, cert_status_details = check_certificate(domain, ssl_certificate, ssl_key, warn_if_expiring_soon=False)
|
||||||
if cert_status != "OK":
|
if cert_status != "OK":
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
|
elif sys.argv[1] == "--version":
|
||||||
|
print(what_version_is_this(env))
|
||||||
|
|||||||
@@ -27,7 +27,7 @@
|
|||||||
<label for="addaliasEmail" class="col-sm-1 control-label">Alias</label>
|
<label for="addaliasEmail" class="col-sm-1 control-label">Alias</label>
|
||||||
<div class="col-sm-10">
|
<div class="col-sm-10">
|
||||||
<input type="email" class="form-control" id="addaliasEmail">
|
<input type="email" class="form-control" id="addaliasEmail">
|
||||||
<div style="margin-top: 3px; padding-left: 3px; font-size: 90%" class="text-muted">You may use international (non-ASCII) characters, but this has not yet been well tested.</div>
|
<div style="margin-top: 3px; padding-left: 3px; font-size: 90%" class="text-muted">You may use international (non-ASCII) characters for the domain part of the email address only.</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
<div class="form-group">
|
<div class="form-group">
|
||||||
@@ -57,7 +57,7 @@
|
|||||||
</tbody>
|
</tbody>
|
||||||
</table>
|
</table>
|
||||||
|
|
||||||
<p style="margin-top: 1.5em"><small>Hostmaster@, postmaster@, and admin@ email addresses are required on some domains.</small></p>
|
<p style="margin-top: 1.5em"><small>hostmaster@, postmaster@, and admin@ email addresses are required on some domains.</small></p>
|
||||||
|
|
||||||
<div style="display: none">
|
<div style="display: none">
|
||||||
<table>
|
<table>
|
||||||
@@ -98,8 +98,8 @@ function show_aliases() {
|
|||||||
n.attr('id', '');
|
n.attr('id', '');
|
||||||
|
|
||||||
if (alias.required) n.addClass('alias-required');
|
if (alias.required) n.addClass('alias-required');
|
||||||
n.attr('data-email', alias.source);
|
n.attr('data-email', alias.source_display); // this is decoded from IDNA, but will get re-coded to IDNA on the backend
|
||||||
n.find('td.email').text(alias.source)
|
n.find('td.email').text(alias.source_display)
|
||||||
for (var j = 0; j < alias.destination.length; j++)
|
for (var j = 0; j < alias.destination.length; j++)
|
||||||
n.find('td.target').append($("<div></div>").text(alias.destination[j]))
|
n.find('td.target').append($("<div></div>").text(alias.destination[j]))
|
||||||
$('#alias_table tbody').append(n);
|
$('#alias_table tbody').append(n);
|
||||||
|
|||||||
@@ -35,6 +35,7 @@
|
|||||||
<option value="AAAA" data-hint="Enter an IPv6 address.">AAAA (IPv6 address)</option>
|
<option value="AAAA" data-hint="Enter an IPv6 address.">AAAA (IPv6 address)</option>
|
||||||
<option value="CNAME" data-hint="Enter another domain name followed by a period at the end (e.g. mypage.github.io.).">CNAME (DNS forwarding)</option>
|
<option value="CNAME" data-hint="Enter another domain name followed by a period at the end (e.g. mypage.github.io.).">CNAME (DNS forwarding)</option>
|
||||||
<option value="TXT" data-hint="Enter arbitrary text.">TXT (text record)</option>
|
<option value="TXT" data-hint="Enter arbitrary text.">TXT (text record)</option>
|
||||||
|
<option value="MX" data-hint="Enter record in the form of PRIORIY DOMAIN., including trailing period (e.g. 20 mx.example.com.).">MX (mail exchanger)</option>
|
||||||
</select>
|
</select>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
@@ -92,44 +93,56 @@
|
|||||||
|
|
||||||
<p>Use your box’s DNS API to set custom DNS records on domains hosted here. For instance, you can create your own dynamic DNS service.</p>
|
<p>Use your box’s DNS API to set custom DNS records on domains hosted here. For instance, you can create your own dynamic DNS service.</p>
|
||||||
|
|
||||||
<p>Send a POST request like this:</p>
|
<p>Usage:</p>
|
||||||
|
|
||||||
<pre>curl -d "" --user {email}:{password} https://{{hostname}}/admin/dns/set/<b>qname</b>[/<b>rtype</b>[/<b>value</b>]]</pre>
|
<pre>curl -X <b>VERB</b> [-d "<b>value</b>"] --user {email}:{password} https://{{hostname}}/admin/dns/custom[/<b>qname</b>[/<b>rtype</b>]]</pre>
|
||||||
|
|
||||||
<h4>HTTP POST parameters</h4>
|
<p>(Brackets denote an optional argument.)</p>
|
||||||
|
|
||||||
|
<h4>Verbs</h4>
|
||||||
|
|
||||||
|
<table class="table">
|
||||||
|
<thead><th>Verb</th> <th>Usage</th></thead>
|
||||||
|
<tr><td>GET</td> <td>Returns matching custom DNS records as a JSON array of objects. Each object has the keys <code>qname</code>, <code>rtype</code>, and <code>value</code>. The optional <code>qname</code> and <code>rtype</code> parameters in the request URL filter the records returned in the response. The request body (<code>-d "..."</code>) must be omitted.</td></tr>
|
||||||
|
<tr><td>PUT</td> <td>Sets a custom DNS record replacing any existing records with the same <code>qname</code> and <code>rtype</code>. Use PUT (instead of POST) when you only have one value for a <code>qname</code> and <code>rtype</code>, such as typical <code>A</code> records (without round-robin).</td></tr>
|
||||||
|
<tr><td>POST</td> <td>Adds a new custom DNS record. Use POST when you have multiple <code>TXT</code> records or round-robin <code>A</code> records. (PUT would delete previously added records.)</td></tr>
|
||||||
|
<tr><td>DELETE</td> <td>Deletes custom DNS records. If the request body (<code>-d "..."</code>) is empty or omitted, deletes all records matching the <code>qname</code> and <code>rtype</code>. If the request body is present, deletes only the record matching the <code>qname</code>, <code>rtype</code> and value.</td></tr>
|
||||||
|
</table>
|
||||||
|
|
||||||
|
<h4>Parameters</h4>
|
||||||
|
|
||||||
<table class="table">
|
<table class="table">
|
||||||
<thead><th>Parameter</th> <th>Value</th></thead>
|
<thead><th>Parameter</th> <th>Value</th></thead>
|
||||||
<tr><td>email</td> <td>The email address of any administrative user here.</td></tr>
|
<tr><td>email</td> <td>The email address of any administrative user here.</td></tr>
|
||||||
<tr><td>password</td> <td>That user’s password.</td></tr>
|
<tr><td>password</td> <td>That user’s password.</td></tr>
|
||||||
<tr><td>qname</td> <td>The fully qualified domain name for the record you are trying to set.</td></tr>
|
<tr><td>qname</td> <td>The fully qualified domain name for the record you are trying to set. It must be one of the domain names or a subdomain of one of the domain names hosted on this box. (Add mail users or aliases to add new domains.)</td></tr>
|
||||||
<tr><td>rtype</td> <td>The resource type. <code>A</code> if omitted. Possible values: <code>A</code> (an IPv4 address), <code>AAAA</code> (an IPv6 address), <code>TXT</code> (a text string), or <code>CNAME</code> (an alias, which is a fully qualified domain name).</td></tr>
|
<tr><td>rtype</td> <td>The resource type. Defaults to <code>A</code> if omitted. Possible values: <code>A</code> (an IPv4 address), <code>AAAA</code> (an IPv6 address), <code>TXT</code> (a text string), <code>CNAME</code> (an alias, which is a fully qualified domain name — don’t forget the final period), <code>MX</code>, or <code>SRV</code>.</td></tr>
|
||||||
<tr><td>value</td> <td>The new record’s value. If omitted, the IPv4 address of the remote host is used. This is handy for dynamic DNS! To delete a record, use “__delete__”.</td></tr>
|
<tr><td>value</td> <td>For PUT, POST, and DELETE, the record’s value. If the <code>rtype</code> is <code>A</code> or <code>AAAA</code> and <code>value</code> is empty or omitted, the IPv4 or IPv6 address of the remote host is used (be sure to use the <code>-4</code> or <code>-6</code> options to curl). This is handy for dynamic DNS!</td></tr>
|
||||||
</table>
|
</table>
|
||||||
|
|
||||||
<p style="margin-top: 1em">Note that <code>-d ""</code> is merely to ensure curl sends a POST request. You do not need to put anything inside the quotes. You can also pass the value using typical form encoding in the POST body.</p>
|
|
||||||
|
|
||||||
<p>Strict <a href="http://tools.ietf.org/html/rfc4408">SPF</a> and <a href="https://datatracker.ietf.org/doc/draft-kucherawy-dmarc-base/?include_text=1">DMARC</a> records will be added to all custom domains unless you override them.</p>
|
<p>Strict <a href="http://tools.ietf.org/html/rfc4408">SPF</a> and <a href="https://datatracker.ietf.org/doc/draft-kucherawy-dmarc-base/?include_text=1">DMARC</a> records will be added to all custom domains unless you override them.</p>
|
||||||
|
|
||||||
<h4>Examples:</h4>
|
<h4>Examples:</h4>
|
||||||
|
|
||||||
|
<p>Try these examples. For simplicity the examples omit the <code>--user me@mydomain.com:yourpassword</code> command line argument which you must fill in with your email address and password.</p>
|
||||||
|
|
||||||
<pre># sets laptop.mydomain.com to point to the IP address of the machine you are executing curl on
|
<pre># sets laptop.mydomain.com to point to the IP address of the machine you are executing curl on
|
||||||
curl -d "" --user me@mydomain.com:###### https://{{hostname}}/admin/dns/set/laptop.mydomain.com
|
curl -X PUT https://{{hostname}}/admin/dns/custom/laptop.mydomain.com
|
||||||
|
|
||||||
# sets an alias
|
# deletes that record and all A records for that domain name
|
||||||
curl -d "" --user me@mydomain.com:###### https://{{hostname}}/admin/dns/set/foo.mydomain.com/cname/bar.mydomain.com
|
curl -X DELETE https://{{hostname}}/admin/dns/custom/laptop.mydomain.com
|
||||||
|
|
||||||
# clears the alias
|
# sets a CNAME alias
|
||||||
curl -d "" --user me@mydomain.com:###### https://{{hostname}}/admin/dns/set/bar.mydomain.com/cname/__delete__
|
curl -X PUT -d "bar.mydomain.com." https://{{hostname}}/admin/dns/custom/foo.mydomain.com/cname
|
||||||
|
|
||||||
# sets a TXT record using the alternate value syntax
|
# deletes that CNAME and all CNAME records for that domain name
|
||||||
curl -d "value=something%20here" --user me@mydomain.com:###### https://{{hostname}}/admin/dns/set/foo.mydomain.com/txt
|
curl -X DELETE https://{{hostname}}/admin/dns/custom/foo.mydomain.com/cname
|
||||||
|
|
||||||
# sets a <a href="http://en.wikipedia.org/wiki/SRV_record">SRV record</a> for the "service" and "protocol" hosted on "target" server
|
# adds a TXT record using POST to preserve any previous TXT records
|
||||||
curl -d "" --user me@mydomain.com:###### https://{{hostname}}/admin/dns/set/_service._protocol.{{hostname}}/srv/"priority weight port target"
|
curl -X POST -d "some text here" https://{{hostname}}/admin/dns/custom/foo.mydomain.com/txt
|
||||||
|
|
||||||
# sets a SRV record using the value syntax
|
# deletes that one TXT record while preserving other TXT records
|
||||||
curl -d "value=priority weight port target" --user me@mydomain.com:###### https://{{hostname}}/admin/dns/set/_service._protocol.host/srv
|
curl -X DELETE -d "some text here" https://{{hostname}}/admin/dns/custom/foo.mydomain.com/txt
|
||||||
</pre>
|
</pre>
|
||||||
|
|
||||||
<script>
|
<script>
|
||||||
@@ -160,7 +173,7 @@ function show_custom_dns() {
|
|||||||
|
|
||||||
function show_current_custom_dns() {
|
function show_current_custom_dns() {
|
||||||
api(
|
api(
|
||||||
"/dns/set",
|
"/dns/custom",
|
||||||
"GET",
|
"GET",
|
||||||
{ },
|
{ },
|
||||||
function(data) {
|
function(data) {
|
||||||
@@ -175,6 +188,7 @@ function show_current_custom_dns() {
|
|||||||
$('#custom-dns-current').find("tbody").append(tr);
|
$('#custom-dns-current').find("tbody").append(tr);
|
||||||
tr.attr('data-qname', data[i].qname);
|
tr.attr('data-qname', data[i].qname);
|
||||||
tr.attr('data-rtype', data[i].rtype);
|
tr.attr('data-rtype', data[i].rtype);
|
||||||
|
tr.attr('data-value', data[i].value);
|
||||||
tr.append($('<td class="long"/>').text(data[i].qname));
|
tr.append($('<td class="long"/>').text(data[i].qname));
|
||||||
tr.append($('<td/>').text(data[i].rtype));
|
tr.append($('<td/>').text(data[i].rtype));
|
||||||
tr.append($('<td class="long"/>').text(data[i].value));
|
tr.append($('<td class="long"/>').text(data[i].value));
|
||||||
@@ -186,7 +200,8 @@ function show_current_custom_dns() {
|
|||||||
function delete_custom_dns_record(elem) {
|
function delete_custom_dns_record(elem) {
|
||||||
var qname = $(elem).parents('tr').attr('data-qname');
|
var qname = $(elem).parents('tr').attr('data-qname');
|
||||||
var rtype = $(elem).parents('tr').attr('data-rtype');
|
var rtype = $(elem).parents('tr').attr('data-rtype');
|
||||||
do_set_custom_dns(qname, rtype, "__delete__");
|
var value = $(elem).parents('tr').attr('data-value');
|
||||||
|
do_set_custom_dns(qname, rtype, value, "DELETE");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -207,7 +222,7 @@ function do_set_secondary_dns() {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
function do_set_custom_dns(qname, rtype, value) {
|
function do_set_custom_dns(qname, rtype, value, method) {
|
||||||
if (!qname) {
|
if (!qname) {
|
||||||
if ($('#customdnsQname').val() != '')
|
if ($('#customdnsQname').val() != '')
|
||||||
qname = $('#customdnsQname').val() + '.' + $('#customdnsZone').val();
|
qname = $('#customdnsQname').val() + '.' + $('#customdnsZone').val();
|
||||||
@@ -215,21 +230,20 @@ function do_set_custom_dns(qname, rtype, value) {
|
|||||||
qname = $('#customdnsZone').val();
|
qname = $('#customdnsZone').val();
|
||||||
rtype = $('#customdnsType').val();
|
rtype = $('#customdnsType').val();
|
||||||
value = $('#customdnsValue').val();
|
value = $('#customdnsValue').val();
|
||||||
|
method = 'POST';
|
||||||
}
|
}
|
||||||
|
|
||||||
api(
|
api(
|
||||||
"/dns/set/" + qname + "/" + rtype,
|
"/dns/custom/" + qname + "/" + rtype,
|
||||||
"POST",
|
method,
|
||||||
{
|
value,
|
||||||
value: value
|
|
||||||
},
|
|
||||||
function(data) {
|
function(data) {
|
||||||
if (data == "") return; // nothing updated
|
if (data == "") return; // nothing updated
|
||||||
show_modal_error("Custom DNS", $("<pre/>").text(data));
|
show_modal_error("Custom DNS", $("<pre/>").text(data));
|
||||||
show_current_custom_dns();
|
show_current_custom_dns();
|
||||||
},
|
},
|
||||||
function(err) {
|
function(err) {
|
||||||
show_modal_error("Custom DNS", $("<pre/>").text(err));
|
show_modal_error("Custom DNS (Error)", $("<pre/>").text(err));
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,18 +1,15 @@
|
|||||||
<!DOCTYPE html>
|
<!DOCTYPE html>
|
||||||
<!--[if lt IE 7]> <html class="no-js lt-ie9 lt-ie8 lt-ie7"> <![endif]-->
|
<html lang="en">
|
||||||
<!--[if IE 7]> <html class="no-js lt-ie9 lt-ie8"> <![endif]-->
|
|
||||||
<!--[if IE 8]> <html class="no-js lt-ie9"> <![endif]-->
|
|
||||||
<!--[if gt IE 8]><!--> <html class="no-js"> <!--<![endif]-->
|
|
||||||
<head>
|
<head>
|
||||||
<meta charset="utf-8">
|
|
||||||
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
|
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
|
||||||
<meta name="viewport" content="width=device-width">
|
<meta charset="utf-8">
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||||
|
|
||||||
<title>{{hostname}} - Mail-in-a-Box Control Panel</title>
|
<title>{{hostname}} - Mail-in-a-Box Control Panel</title>
|
||||||
|
|
||||||
<meta name="robots" content="noindex, nofollow">
|
<meta name="robots" content="noindex, nofollow">
|
||||||
|
|
||||||
<link rel="stylesheet" href="//maxcdn.bootstrapcdn.com/bootstrap/3.2.0/css/bootstrap.min.css">
|
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.0/css/bootstrap.min.css">
|
||||||
<style>
|
<style>
|
||||||
@import url(https://fonts.googleapis.com/css?family=Raleway:400,700);
|
@import url(https://fonts.googleapis.com/css?family=Raleway:400,700);
|
||||||
@import url(https://fonts.googleapis.com/css?family=Ubuntu:300);
|
@import url(https://fonts.googleapis.com/css?family=Ubuntu:300);
|
||||||
@@ -73,18 +70,19 @@
|
|||||||
margin-bottom: 1em;
|
margin-bottom: 1em;
|
||||||
}
|
}
|
||||||
</style>
|
</style>
|
||||||
<link rel="stylesheet" href="//maxcdn.bootstrapcdn.com/bootstrap/3.2.0/css/bootstrap-theme.min.css">
|
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.0/css/bootstrap-theme.min.css">
|
||||||
<style>
|
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/font-awesome/4.3.0/css/font-awesome.min.css">
|
||||||
</style>
|
|
||||||
</head>
|
</head>
|
||||||
<body>
|
<body>
|
||||||
<!--[if lt IE 7]>
|
|
||||||
<p class="chromeframe">You are using an <strong>outdated</strong> browser. Please <a href="http://browsehappy.com/">upgrade your browser</a> or <a href="http://www.google.com/chromeframe/?redirect=true">activate Google Chrome Frame</a> to improve your experience.</p>
|
<!--[if lt IE 8]><p>Internet Explorer version 8 or any modern web browser is required to use this website, sorry.<![endif]-->
|
||||||
<![endif]-->
|
<!--[if gt IE 7]><!-->
|
||||||
<div class="navbar navbar-inverse navbar-fixed-top">
|
|
||||||
|
<div class="navbar navbar-inverse navbar-fixed-top" role="navigation">
|
||||||
<div class="container">
|
<div class="container">
|
||||||
<div class="navbar-header">
|
<div class="navbar-header">
|
||||||
<button type="button" class="navbar-toggle" data-toggle="collapse" data-target=".navbar-collapse">
|
<button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target=".navbar-collapse">
|
||||||
|
<span class="sr-only">Toggle navigation</span>
|
||||||
<span class="icon-bar"></span>
|
<span class="icon-bar"></span>
|
||||||
<span class="icon-bar"></span>
|
<span class="icon-bar"></span>
|
||||||
<span class="icon-bar"></span>
|
<span class="icon-bar"></span>
|
||||||
@@ -100,9 +98,10 @@
|
|||||||
<li><a href="#ssl" onclick="return show_panel(this);">SSL Certificates</a></li>
|
<li><a href="#ssl" onclick="return show_panel(this);">SSL Certificates</a></li>
|
||||||
<li><a href="#system_backup" onclick="return show_panel(this);">Backup Status</a></li>
|
<li><a href="#system_backup" onclick="return show_panel(this);">Backup Status</a></li>
|
||||||
<li class="divider"></li>
|
<li class="divider"></li>
|
||||||
<li class="dropdown-header">Advanced Options</li>
|
<li class="dropdown-header">Advanced Pages</li>
|
||||||
<li><a href="#custom_dns" onclick="return show_panel(this);">Custom DNS</a></li>
|
<li><a href="#custom_dns" onclick="return show_panel(this);">Custom DNS</a></li>
|
||||||
<li><a href="#external_dns" onclick="return show_panel(this);">External DNS</a></li>
|
<li><a href="#external_dns" onclick="return show_panel(this);">External DNS</a></li>
|
||||||
|
<li><a href="/admin/munin">Munin Monitoring</a></li>
|
||||||
</ul>
|
</ul>
|
||||||
</li>
|
</li>
|
||||||
<li class="dropdown">
|
<li class="dropdown">
|
||||||
@@ -115,6 +114,7 @@
|
|||||||
</li>
|
</li>
|
||||||
<li><a href="#sync_guide" onclick="return show_panel(this);">Contacts/Calendar</a></li>
|
<li><a href="#sync_guide" onclick="return show_panel(this);">Contacts/Calendar</a></li>
|
||||||
<li><a href="#web" onclick="return show_panel(this);">Web</a></li>
|
<li><a href="#web" onclick="return show_panel(this);">Web</a></li>
|
||||||
|
<li><a href="#version" onclick="return show_panel(this);">Version</a></li>
|
||||||
</ul>
|
</ul>
|
||||||
<ul class="nav navbar-nav navbar-right">
|
<ul class="nav navbar-nav navbar-right">
|
||||||
<li><a href="#" onclick="do_logout(); return false;" style="color: white">Log out?</a></li>
|
<li><a href="#" onclick="do_logout(); return false;" style="color: white">Log out?</a></li>
|
||||||
@@ -168,6 +168,10 @@
|
|||||||
{% include "ssl.html" %}
|
{% include "ssl.html" %}
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
<div id="panel_version" class="admin_panel">
|
||||||
|
{% include "version.html" %}
|
||||||
|
</div>
|
||||||
|
|
||||||
<hr>
|
<hr>
|
||||||
|
|
||||||
<footer>
|
<footer>
|
||||||
@@ -175,9 +179,9 @@
|
|||||||
</footer>
|
</footer>
|
||||||
</div> <!-- /container -->
|
</div> <!-- /container -->
|
||||||
|
|
||||||
<div id="ajax_loading_indicator" style="display: none; position: fixed; left: 0; top: 0; width: 100%; height: 100%; text-align: center; background-color: rgba(255,255,255,.75)">
|
<div id="ajax_loading_indicator" style="display: none; position: fixed; left: 0; top: 0; width: 100%; height: 100%; z-index: 100000; text-align: center; background-color: rgba(255,255,255,.75)">
|
||||||
<div style="margin: 20% auto">
|
<div style="margin: 20% auto">
|
||||||
<div><span class="glyphicon glyphicon-time"></span></div>
|
<div><span class="fa fa-spinner fa-pulse"></span></div>
|
||||||
<div>Loading...</div>
|
<div>Loading...</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
@@ -200,8 +204,8 @@
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<script src="//ajax.googleapis.com/ajax/libs/jquery/1.10.1/jquery.min.js"></script>
|
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.1/jquery.min.js"></script>
|
||||||
<script src="//maxcdn.bootstrapcdn.com/bootstrap/3.2.0/js/bootstrap.min.js"></script>
|
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.0/js/bootstrap.min.js"></script>
|
||||||
|
|
||||||
<script>
|
<script>
|
||||||
var global_modal_state = null;
|
var global_modal_state = null;
|
||||||
@@ -233,7 +237,7 @@ $(function() {
|
|||||||
function show_modal_error(title, message, callback) {
|
function show_modal_error(title, message, callback) {
|
||||||
$('#global_modal h4').text(title);
|
$('#global_modal h4').text(title);
|
||||||
$('#global_modal .modal-body').html("<p/>");
|
$('#global_modal .modal-body').html("<p/>");
|
||||||
if (typeof question == String) {
|
if (typeof question == 'string') {
|
||||||
$('#global_modal p').text(message);
|
$('#global_modal p').text(message);
|
||||||
$('#global_modal .modal-dialog').addClass("modal-sm");
|
$('#global_modal .modal-dialog').addClass("modal-sm");
|
||||||
} else {
|
} else {
|
||||||
@@ -245,11 +249,12 @@ function show_modal_error(title, message, callback) {
|
|||||||
global_modal_funcs = [callback, callback];
|
global_modal_funcs = [callback, callback];
|
||||||
global_modal_state = null;
|
global_modal_state = null;
|
||||||
$('#global_modal').modal({});
|
$('#global_modal').modal({});
|
||||||
|
return false; // handy when called from onclick
|
||||||
}
|
}
|
||||||
|
|
||||||
function show_modal_confirm(title, question, verb, yes_callback, cancel_callback) {
|
function show_modal_confirm(title, question, verb, yes_callback, cancel_callback) {
|
||||||
$('#global_modal h4').text(title);
|
$('#global_modal h4').text(title);
|
||||||
if (typeof question == String) {
|
if (typeof question == 'string') {
|
||||||
$('#global_modal .modal-dialog').addClass("modal-sm");
|
$('#global_modal .modal-dialog').addClass("modal-sm");
|
||||||
$('#global_modal .modal-body').html("<p/>");
|
$('#global_modal .modal-body').html("<p/>");
|
||||||
$('#global_modal p').text(question);
|
$('#global_modal p').text(question);
|
||||||
@@ -257,11 +262,17 @@ function show_modal_confirm(title, question, verb, yes_callback, cancel_callback
|
|||||||
$('#global_modal .modal-dialog').removeClass("modal-sm");
|
$('#global_modal .modal-dialog').removeClass("modal-sm");
|
||||||
$('#global_modal .modal-body').html("").append(question);
|
$('#global_modal .modal-body').html("").append(question);
|
||||||
}
|
}
|
||||||
$('#global_modal .btn-default').show().text("Cancel");
|
if (typeof verb == 'string') {
|
||||||
$('#global_modal .btn-danger').show().text(verb);
|
$('#global_modal .btn-default').show().text("Cancel");
|
||||||
|
$('#global_modal .btn-danger').show().text(verb);
|
||||||
|
} else {
|
||||||
|
$('#global_modal .btn-default').show().text(verb[1]);
|
||||||
|
$('#global_modal .btn-danger').show().text(verb[0]);
|
||||||
|
}
|
||||||
global_modal_funcs = [yes_callback, cancel_callback];
|
global_modal_funcs = [yes_callback, cancel_callback];
|
||||||
global_modal_state = null;
|
global_modal_state = null;
|
||||||
$('#global_modal').modal({});
|
$('#global_modal').modal({});
|
||||||
|
return false; // handy when called from onclick
|
||||||
}
|
}
|
||||||
|
|
||||||
var ajax_num_executing_requests = 0;
|
var ajax_num_executing_requests = 0;
|
||||||
@@ -270,7 +281,7 @@ function ajax(options) {
|
|||||||
function hide_loading_indicator() {
|
function hide_loading_indicator() {
|
||||||
ajax_num_executing_requests--;
|
ajax_num_executing_requests--;
|
||||||
if (ajax_num_executing_requests == 0)
|
if (ajax_num_executing_requests == 0)
|
||||||
$('#ajax_loading_indicator').stop().hide(); // stop() prevents an ongoing fade from causing the thing to be shown again after this call
|
$('#ajax_loading_indicator').stop(true).hide(); // stop() prevents an ongoing fade from causing the thing to be shown again after this call
|
||||||
}
|
}
|
||||||
var old_success = options.success;
|
var old_success = options.success;
|
||||||
var old_error = options.error;
|
var old_error = options.error;
|
||||||
@@ -290,6 +301,7 @@ function ajax(options) {
|
|||||||
};
|
};
|
||||||
ajax_num_executing_requests++;
|
ajax_num_executing_requests++;
|
||||||
$.ajax(options);
|
$.ajax(options);
|
||||||
|
return false; // handy when called from onclick
|
||||||
}
|
}
|
||||||
|
|
||||||
var api_credentials = ["", ""];
|
var api_credentials = ["", ""];
|
||||||
@@ -329,7 +341,13 @@ function api(url, method, data, callback, callback_error) {
|
|||||||
ajax({
|
ajax({
|
||||||
url: "/admin" + url,
|
url: "/admin" + url,
|
||||||
method: method,
|
method: method,
|
||||||
|
cache: false,
|
||||||
data: data,
|
data: data,
|
||||||
|
|
||||||
|
// the custom DNS api sends raw POST/PUT bodies --- prevent URL-encoding
|
||||||
|
processData: typeof data != "string",
|
||||||
|
mimeType: typeof data == "string" ? "text/plain; charset=ascii" : null,
|
||||||
|
|
||||||
beforeSend: function(xhr) {
|
beforeSend: function(xhr) {
|
||||||
// We don't store user credentials in a cookie to avoid the hassle of CSRF
|
// We don't store user credentials in a cookie to avoid the hassle of CSRF
|
||||||
// attacks. The Authorization header only gets set in our AJAX calls triggered
|
// attacks. The Authorization header only gets set in our AJAX calls triggered
|
||||||
|
|||||||
@@ -1,12 +1,20 @@
|
|||||||
<h1 style="margin: 1em; text-align: center">{{hostname}}</h1>
|
<h1 style="margin: 1em; text-align: center">{{hostname}}</h1>
|
||||||
|
|
||||||
{% if no_admins_exist %}
|
{% if no_users_exist or no_admins_exist %}
|
||||||
<div class="row">
|
<div class="row">
|
||||||
<div class="col-md-offset-2 col-md-8">
|
<div class="col-md-offset-2 col-md-8">
|
||||||
|
{% if no_users_exist %}
|
||||||
|
<p class="text-danger">There are no users on this system! To make an administrative user,
|
||||||
|
log into this machine using SSH (like when you first set it up) and run:</p>
|
||||||
|
<pre>cd mailinabox
|
||||||
|
sudo tools/mail.py user add me@{{hostname}}
|
||||||
|
sudo tools/mail.py user make-admin me@{{hostname}}</pre>
|
||||||
|
{% else %}
|
||||||
<p class="text-danger">There are no administrative users on this system! To make an administrative user,
|
<p class="text-danger">There are no administrative users on this system! To make an administrative user,
|
||||||
log into this machine using SSH (like when you first set it up) and run:</p>
|
log into this machine using SSH (like when you first set it up) and run:</p>
|
||||||
<pre>cd mailinabox
|
<pre>cd mailinabox
|
||||||
sudo tools/mail.py user make-admin your@emailaddress.com</pre>
|
sudo tools/mail.py user make-admin me@{{hostname}}</pre>
|
||||||
|
{% endif %}
|
||||||
<hr>
|
<hr>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|||||||
@@ -40,7 +40,7 @@
|
|||||||
|
|
||||||
<h4>Exchange/ActiveSync settings</h4>
|
<h4>Exchange/ActiveSync settings</h4>
|
||||||
|
|
||||||
<p>On iOS devices and devices on this <a href="http://z-push.org/compatibility/">compatibility list</a>, you may set up your mail as an Exchange or ActiveSync server. However, we’ve found this to be more buggy than using IMAP. If you encounter any problems, please use the manual settings above.</p>
|
<p>On iOS devices, devices on this <a href="http://z-push.org/compatibility/">compatibility list</a>, or using Outlook 2007 or later on Windows 7 and later, you may set up your mail as an Exchange or ActiveSync server. However, we’ve found this to be more buggy than using IMAP as described above. If you encounter any problems, please use the manual settings above.</p>
|
||||||
|
|
||||||
<table class="table">
|
<table class="table">
|
||||||
<tr><th>Server</th> <td>{{hostname}}</td></tr>
|
<tr><th>Server</th> <td>{{hostname}}</td></tr>
|
||||||
@@ -60,7 +60,7 @@
|
|||||||
<p>Your box using a technique called greylisting to cut down on spam. Greylisting works by delaying mail from people you haven’t received mail from before for up to about 10 minutes. The vast majority of spam gets tricked by this. If you are waiting for an email from someone new, such as if you are registering on a new website and are waiting for an email confirmation, please give it up to 10-15 minutes to arrive.</p>
|
<p>Your box using a technique called greylisting to cut down on spam. Greylisting works by delaying mail from people you haven’t received mail from before for up to about 10 minutes. The vast majority of spam gets tricked by this. If you are waiting for an email from someone new, such as if you are registering on a new website and are waiting for an email confirmation, please give it up to 10-15 minutes to arrive.</p>
|
||||||
|
|
||||||
<h4>+tag addresses</h4>
|
<h4>+tag addresses</h4>
|
||||||
<p>Every incoming email address also receives mail for <code>+tag</code> addresses. If your email address is <code>you@yourdomain.com</code>, you can also accept mail at <code>you+anythinghere@yourdomain.com</code>. Use this as a fast way to create aliases or to segment incoming mail for your own filtering rules.</p>
|
<p>Every incoming email address also receives mail for <code>+tag</code> addresses. If your email address is <code>you@yourdomain.com</code>, you’ll also automatically get mail sent to <code>you+anythinghere@yourdomain.com</code>. Use this as a fast way to segment incoming mail for your own filtering rules without having to create aliases in this control panel.</p>
|
||||||
|
|
||||||
<h4>Use only this box to send as you</h4>
|
<h4>Use only this box to send as you</h4>
|
||||||
<p>Your box sets strict email sending policies for your domain names to make it harder for spam and other fraudulent mail to claim to be you. Only this machine is authorized to send email on behalf of your domain names. If you use any other service to send email as you, it will likely get spam filtered by recipients.</p>
|
<p>Your box sets strict email sending policies for your domain names to make it harder for spam and other fraudulent mail to claim to be you. Only this machine is authorized to send email on behalf of your domain names. If you use any other service to send email as you, it will likely get spam filtered by recipients.</p>
|
||||||
|
|||||||
@@ -110,7 +110,8 @@ function install_cert() {
|
|||||||
chain: $('#ssl_paste_chain').val()
|
chain: $('#ssl_paste_chain').val()
|
||||||
},
|
},
|
||||||
function(status) {
|
function(status) {
|
||||||
if (status == "") {
|
if (/^OK($|\n)/.test(status)) {
|
||||||
|
console.log(status)
|
||||||
show_modal_error("SSL Certificate Installation", "Certificate has been installed. Check that you have no connection problems to the domain.", function() { show_ssl(); $('#csr_info').slideUp(); });
|
show_modal_error("SSL Certificate Installation", "Certificate has been installed. Check that you have no connection problems to the domain.", function() { show_ssl(); $('#csr_info').slideUp(); });
|
||||||
} else {
|
} else {
|
||||||
show_modal_error("SSL Certificate Installation", status);
|
show_modal_error("SSL Certificate Installation", status);
|
||||||
|
|||||||
@@ -11,11 +11,11 @@
|
|||||||
|
|
||||||
<p>Many cloud providers make this easy by allowing you to take snapshots of the machine's disk.</p>
|
<p>Many cloud providers make this easy by allowing you to take snapshots of the machine's disk.</p>
|
||||||
|
|
||||||
<p>You can also use SFTP (FTP over SSH) to copy files from <tt id="backup-location"></tt>. These files are encrpyted, so they are safe to store anywhere. Copy the encryption password from <tt id="backup-encpassword-file"></tt> also but keep it in a safe location.</p>
|
<p>You can also use SFTP (FTP over SSH) to copy files from <tt id="backup-location"></tt>. These files are encrypted, so they are safe to store anywhere. Copy the encryption password from <tt id="backup-encpassword-file"></tt> also but keep it in a safe location.</p>
|
||||||
|
|
||||||
<h3>Current Backups</h3>
|
<h3>Current Backups</h3>
|
||||||
|
|
||||||
<p>The backup directory currently contains the backups listed below. The total size on disk of the backups is <span id="backup-total-size"></span>.</p>
|
<p>The backup directory currently contains the backups listed below. The total size on disk of the backups is currently <span id="backup-total-size"></span>.</p>
|
||||||
|
|
||||||
<table id="backup-status" class="table" style="width: auto">
|
<table id="backup-status" class="table" style="width: auto">
|
||||||
<thead>
|
<thead>
|
||||||
@@ -28,8 +28,6 @@
|
|||||||
</tbody>
|
</tbody>
|
||||||
</table>
|
</table>
|
||||||
|
|
||||||
<p style="margin-top: 2em"><small>The size column in the table indicates the size of the encrypted backup, but the total size on disk shown above includes storage for unencrypted intermediate files.</small></p>
|
|
||||||
|
|
||||||
<script>
|
<script>
|
||||||
function nice_size(bytes) {
|
function nice_size(bytes) {
|
||||||
var powers = ['bytes', 'KB', 'MB', 'GB', 'TB'];
|
var powers = ['bytes', 'KB', 'MB', 'GB', 'TB'];
|
||||||
@@ -54,7 +52,7 @@ function show_system_backup() {
|
|||||||
"GET",
|
"GET",
|
||||||
{ },
|
{ },
|
||||||
function(r) {
|
function(r) {
|
||||||
$('#backup-location').text(r.encdirectory);
|
$('#backup-location').text(r.directory);
|
||||||
$('#backup-encpassword-file').text(r.encpwfile);
|
$('#backup-encpassword-file').text(r.encpwfile);
|
||||||
|
|
||||||
$('#backup-status tbody').html("");
|
$('#backup-status tbody').html("");
|
||||||
@@ -72,15 +70,14 @@ function show_system_backup() {
|
|||||||
tr.append( $('<td/>').text(b.date_str + " " + r.tz) );
|
tr.append( $('<td/>').text(b.date_str + " " + r.tz) );
|
||||||
tr.append( $('<td/>').text(b.date_delta + " ago") );
|
tr.append( $('<td/>').text(b.date_delta + " ago") );
|
||||||
tr.append( $('<td/>').text(b.full ? "full" : "increment") );
|
tr.append( $('<td/>').text(b.full ? "full" : "increment") );
|
||||||
tr.append( $('<td style="text-align: right"/>').text( nice_size(b.encsize)) );
|
tr.append( $('<td style="text-align: right"/>').text( nice_size(b.size)) );
|
||||||
if (b.deleted_in)
|
if (b.deleted_in)
|
||||||
tr.append( $('<td/>').text(b.deleted_in) );
|
tr.append( $('<td/>').text(b.deleted_in) );
|
||||||
else
|
else
|
||||||
tr.append( $('<td class="text-muted">n/a</td>') );
|
tr.append( $('<td class="text-muted">unknown</td>') );
|
||||||
$('#backup-status tbody').append(tr);
|
$('#backup-status tbody').append(tr);
|
||||||
|
|
||||||
total_disk_size += b.size;
|
total_disk_size += b.size;
|
||||||
total_disk_size += b.encsize;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
$('#backup-total-size').text(nice_size(total_disk_size));
|
$('#backup-total-size').text(nice_size(total_disk_size));
|
||||||
|
|||||||
@@ -1,13 +1,12 @@
|
|||||||
<h2>Users</h2>
|
<h2>Users</h2>
|
||||||
|
|
||||||
<style>
|
<style>
|
||||||
|
#user_table h4 { margin: 1em 0 0 0; }
|
||||||
#user_table tr.account_inactive td.address { color: #888; text-decoration: line-through; }
|
#user_table tr.account_inactive td.address { color: #888; text-decoration: line-through; }
|
||||||
#user_table .aliases { font-size: 90%; }
|
|
||||||
#user_table .aliases div:before { content: "⇖ "; }
|
|
||||||
#user_table .aliases div { }
|
|
||||||
#user_table .actions { margin-top: .33em; font-size: 95%; }
|
#user_table .actions { margin-top: .33em; font-size: 95%; }
|
||||||
#user_table .account_inactive .if_active { display: none; }
|
#user_table .account_inactive .if_active { display: none; }
|
||||||
#user_table .account_active .if_inactive { display: none; }
|
#user_table .account_active .if_inactive { display: none; }
|
||||||
|
#user_table .account_active.if_inactive { display: none; }
|
||||||
</style>
|
</style>
|
||||||
|
|
||||||
<h3>Add a mail user</h3>
|
<h3>Add a mail user</h3>
|
||||||
@@ -77,11 +76,9 @@
|
|||||||
<td class='mailboxsize'>
|
<td class='mailboxsize'>
|
||||||
</td>
|
</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr id="user-extra-template">
|
<tr id="user-extra-template" class="if_inactive">
|
||||||
<td colspan="3" style="border-top: 0; padding-top: 0">
|
<td colspan="3" style="border: 0; padding-top: 0">
|
||||||
<div class='if_inactive restore_info' style='color: #888; font-size: 90%'>To restore account, create a new account with this email address. Or to permanently delete the mailbox, delete the directory <tt></tt> on the machine.</div>
|
<div class='restore_info' style='color: #888; font-size: 90%'>To restore account, create a new account with this email address. Or to permanently delete the mailbox, delete the directory <tt></tt> on the machine.</div>
|
||||||
|
|
||||||
<div class='aliases' style='display: none'> </div>
|
|
||||||
</td>
|
</td>
|
||||||
</tr>
|
</tr>
|
||||||
</table>
|
</table>
|
||||||
@@ -98,7 +95,7 @@ function show_users() {
|
|||||||
function(r) {
|
function(r) {
|
||||||
$('#user_table tbody').html("");
|
$('#user_table tbody').html("");
|
||||||
for (var i = 0; i < r.length; i++) {
|
for (var i = 0; i < r.length; i++) {
|
||||||
var hdr = $("<tr><td><h4/></td></tr>");
|
var hdr = $("<tr><td colspan='3'><h4/></td></tr>");
|
||||||
hdr.find('h4').text(r[i].domain);
|
hdr.find('h4').text(r[i].domain);
|
||||||
$('#user_table tbody').append(hdr);
|
$('#user_table tbody').append(hdr);
|
||||||
|
|
||||||
@@ -137,16 +134,6 @@ function show_users() {
|
|||||||
p.find('span.name').text(add_privs[j]);
|
p.find('span.name').text(add_privs[j]);
|
||||||
n.find('.add-privs').append(p);
|
n.find('.add-privs').append(p);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (user.aliases && user.aliases.length > 0) {
|
|
||||||
n2.find('.aliases').show();
|
|
||||||
for (var j = 0; j < user.aliases.length; j++) {
|
|
||||||
n2.find('.aliases').append($("<div/>").text(
|
|
||||||
user.aliases[j][0]
|
|
||||||
+ (user.aliases[j][1].length > 0 ? " ⇐ " + user.aliases[j][1].join(", ") : "")
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@@ -177,9 +164,14 @@ function do_add_user() {
|
|||||||
|
|
||||||
function users_set_password(elem) {
|
function users_set_password(elem) {
|
||||||
var email = $(elem).parents('tr').attr('data-email');
|
var email = $(elem).parents('tr').attr('data-email');
|
||||||
|
|
||||||
|
var yourpw = "";
|
||||||
|
if (api_credentials != null && email == api_credentials[0])
|
||||||
|
yourpw = "<p class='text-danger'>If you change your own password, you will be logged out of this control panel and will need to log in again.</p>";
|
||||||
|
|
||||||
show_modal_confirm(
|
show_modal_confirm(
|
||||||
"Archive User",
|
"Archive User",
|
||||||
$("<p>Set a new password for <b>" + email + "</b>?</p> <p><label for='users_set_password_pw' style='display: block; font-weight: normal'>New Password:</label><input type='password' id='users_set_password_pw'></p><p><small>Passwords must be at least four characters and may not contain spaces.</small></p>"),
|
$("<p>Set a new password for <b>" + email + "</b>?</p> <p><label for='users_set_password_pw' style='display: block; font-weight: normal'>New Password:</label><input type='password' id='users_set_password_pw'></p><p><small>Passwords must be at least four characters and may not contain spaces.</small>" + yourpw + "</p>"),
|
||||||
"Set Password",
|
"Set Password",
|
||||||
function() {
|
function() {
|
||||||
api(
|
api(
|
||||||
@@ -201,6 +193,13 @@ function users_set_password(elem) {
|
|||||||
|
|
||||||
function users_remove(elem) {
|
function users_remove(elem) {
|
||||||
var email = $(elem).parents('tr').attr('data-email');
|
var email = $(elem).parents('tr').attr('data-email');
|
||||||
|
|
||||||
|
// can't remove yourself
|
||||||
|
if (api_credentials != null && email == api_credentials[0]) {
|
||||||
|
show_modal_error("Archive User", "You cannot archive your own account.");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
show_modal_confirm(
|
show_modal_confirm(
|
||||||
"Archive User",
|
"Archive User",
|
||||||
$("<p>Are you sure you want to archive <b>" + email + "</b>?</p> <p>The user's mailboxes will not be deleted (you can do that later), but the user will no longer be able to log into any services on this machine.</p>"),
|
$("<p>Are you sure you want to archive <b>" + email + "</b>?</p> <p>The user's mailboxes will not be deleted (you can do that later), but the user will no longer be able to log into any services on this machine.</p>"),
|
||||||
@@ -236,7 +235,7 @@ function mod_priv(elem, add_remove) {
|
|||||||
var add_remove1 = add_remove.charAt(0).toUpperCase() + add_remove.substring(1);
|
var add_remove1 = add_remove.charAt(0).toUpperCase() + add_remove.substring(1);
|
||||||
show_modal_confirm(
|
show_modal_confirm(
|
||||||
"Modify Privileges",
|
"Modify Privileges",
|
||||||
"Are you sure you want to " + add_remove + " the " + priv + " privilege for <b>" + email + "</b>?",
|
$("<p>Are you sure you want to " + add_remove + " the " + priv + " privilege for <b>" + email + "</b>?</p>"),
|
||||||
add_remove1,
|
add_remove1,
|
||||||
function() {
|
function() {
|
||||||
api(
|
api(
|
||||||
|
|||||||
36
management/templates/version.html
Normal file
36
management/templates/version.html
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
<style>
|
||||||
|
</style>
|
||||||
|
|
||||||
|
<h2>Mail-in-a-Box Version</h2>
|
||||||
|
|
||||||
|
<p>You are running Mail-in-a-Box version <span id="miab-version" style="font-weight: bold">...</span>.</p>
|
||||||
|
|
||||||
|
<p>The latest version of Mail-in-a-Box is <button id="miab-get-latest-upstream" onclick="check_latest_version()">Check</button>.</p>
|
||||||
|
|
||||||
|
<p>To find the latest version and for upgrade instructions, see <a href="https://mailinabox.email/">https://mailinabox.email/</a>, <a href="https://github.com/mail-in-a-box/mailinabox/blob/master/CHANGELOG.md">release notes</a>, and <a href="https://mailinabox.email/maintenance.html#updating-mail-in-a-box">upgrade instructions</a>.</p>
|
||||||
|
|
||||||
|
<script>
|
||||||
|
function show_version() {
|
||||||
|
$('#miab-version').text('loading...');
|
||||||
|
api(
|
||||||
|
"/system/version",
|
||||||
|
"GET",
|
||||||
|
{
|
||||||
|
},
|
||||||
|
function(version) {
|
||||||
|
$('#miab-version').text(version);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function check_latest_version() {
|
||||||
|
$('#miab-get-latest-upstream').text('loading...');
|
||||||
|
api(
|
||||||
|
"/system/latest-upstream-version",
|
||||||
|
"POST",
|
||||||
|
{
|
||||||
|
},
|
||||||
|
function(version) {
|
||||||
|
$('#miab-get-latest-upstream').text(version);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
</script>
|
||||||
@@ -7,7 +7,7 @@
|
|||||||
|
|
||||||
<h3>Uploading web files</h3>
|
<h3>Uploading web files</h3>
|
||||||
|
|
||||||
<p>You can replace the default website with your own HTML pages and other static files. This control panel won’t help you design a website, but once you have <tt>.html</tt> files you can upload it following these instructions:</p>
|
<p>You can replace the default website with your own HTML pages and other static files. This control panel won’t help you design a website, but once you have <tt>.html</tt> files you can upload them following these instructions:</p>
|
||||||
|
|
||||||
<ol>
|
<ol>
|
||||||
<li>Ensure that any domains you are publishing a website for have no problems on the <a href="#system_status" onclick="return show_panel(this);">Status Checks</a> page.</li>
|
<li>Ensure that any domains you are publishing a website for have no problems on the <a href="#system_status" onclick="return show_panel(this);">Status Checks</a> page.</li>
|
||||||
@@ -18,41 +18,24 @@
|
|||||||
|
|
||||||
<li>Upload your <tt>.html</tt> or other files to the directory <tt>{{storage_root}}/www/default</tt> on this machine. They will appear directly and immediately on the web.</li>
|
<li>Upload your <tt>.html</tt> or other files to the directory <tt>{{storage_root}}/www/default</tt> on this machine. They will appear directly and immediately on the web.</li>
|
||||||
|
|
||||||
<li>The websites set up on this machine are listed in the table below with where to put the files for each website (if you have customized that, see next section).</li>
|
<li>The websites set up on this machine are listed in the table below with where to put the files for each website.</li>
|
||||||
|
|
||||||
<table id="web_domains_existing" class="table" style="margin-bottom: 2em; width: auto;">
|
<table id="web_domains_existing" class="table" style="margin-bottom: 1em; width: auto;">
|
||||||
<thead>
|
<thead>
|
||||||
<tr>
|
<tr>
|
||||||
<th>Site</th>
|
<th>Site</th>
|
||||||
<th>Directory for Files</th>
|
<th>Directory for Files</th>
|
||||||
|
<th/>
|
||||||
</tr>
|
</tr>
|
||||||
</thead>
|
</thead>
|
||||||
<tbody>
|
<tbody>
|
||||||
</tbody>
|
</tbody>
|
||||||
</table>
|
</table>
|
||||||
|
|
||||||
<li>If you want to have this box host a static website on a domain that is not listed in the table, create a dummy <a href="#users" onclick="return show_panel(this);">mail user</a> or <a href="#aliases" onclick="return show_panel(this);">alias</a> on the domain first.</li>
|
<p>To add a domain to this table, create a dummy <a href="#users" onclick="return show_panel(this);">mail user</a> or <a href="#aliases" onclick="return show_panel(this);">alias</a> on the domain first and see the <a href="https://mailinabox.email/guide.html#domain-name-configuration">setup guide</a> for adding nameserver records to the new domain at your registrar (but <i>not</i> glue records).</p>
|
||||||
|
|
||||||
</ol>
|
</ol>
|
||||||
|
|
||||||
<h3>Different sites for different domains</h3>
|
|
||||||
|
|
||||||
<p>Create one of the directories shown in the table below to create a space for different files for one of the websites.</p>
|
|
||||||
|
|
||||||
<p>After you create one of these directories, click <button id="web_update" class="btn btn-primary" onclick="do_web_update()">Web Update</button> to restart the box’s web server so that it sees the new website file location.</p>
|
|
||||||
|
|
||||||
<table id="web_domains_custom" class="table" style="margin-bottom: 2em; width: auto;">
|
|
||||||
<thead>
|
|
||||||
<tr>
|
|
||||||
<th>Site</th>
|
|
||||||
<th>Create Directory</th>
|
|
||||||
</tr>
|
|
||||||
</thead>
|
|
||||||
<tbody>
|
|
||||||
</tbody>
|
|
||||||
</table>
|
|
||||||
|
|
||||||
|
|
||||||
<script>
|
<script>
|
||||||
function show_web() {
|
function show_web() {
|
||||||
api(
|
api(
|
||||||
@@ -64,24 +47,18 @@ function show_web() {
|
|||||||
var tb = $('#web_domains_existing tbody');
|
var tb = $('#web_domains_existing tbody');
|
||||||
tb.text('');
|
tb.text('');
|
||||||
for (var i = 0; i < domains.length; i++) {
|
for (var i = 0; i < domains.length; i++) {
|
||||||
var row = $("<tr><th class='domain'><a href=''></a></th><td class='directory'><tt/></td></tr>");
|
if (!domains[i].static_enabled) continue;
|
||||||
|
var row = $("<tr><th class='domain'><a href=''></a></th><td class='directory'><tt/></td> <td class='change-root hidden'><button class='btn btn-default btn-xs' onclick='show_change_web_root(this)'>Change</button></td></tr>");
|
||||||
tb.append(row);
|
tb.append(row);
|
||||||
|
row.attr('data-domain', domains[i].domain);
|
||||||
|
row.attr('data-custom-web-root', domains[i].custom_root);
|
||||||
row.find('.domain a').text('https://' + domains[i].domain);
|
row.find('.domain a').text('https://' + domains[i].domain);
|
||||||
row.find('.domain a').attr('href', 'https://' + domains[i].domain);
|
row.find('.domain a').attr('href', 'https://' + domains[i].domain);
|
||||||
row.find('.directory tt').text(domains[i].root);
|
row.find('.directory tt').text(domains[i].root);
|
||||||
|
if (domains[i].root != domains[i].custom_root)
|
||||||
|
row.find('.change-root').removeClass('hidden');
|
||||||
}
|
}
|
||||||
|
|
||||||
tb = $('#web_domains_custom tbody');
|
|
||||||
tb.text('');
|
|
||||||
for (var i = 0; i < domains.length; i++) {
|
|
||||||
if (domains[i].root != domains[i].custom_root) {
|
|
||||||
var row = $("<tr><th class='domain'><a href=''></a></th><td class='directory'><tt></td></tr>");
|
|
||||||
tb.append(row);
|
|
||||||
row.find('.domain a').text('https://' + domains[i].domain);
|
|
||||||
row.find('.domain a').attr('href', 'https://' + domains[i].domain);
|
|
||||||
row.find('.directory tt').text(domains[i].custom_root);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -99,4 +76,14 @@ function do_web_update() {
|
|||||||
show_modal_error("Web Update", data, function() { show_web() });
|
show_modal_error("Web Update", data, function() { show_web() });
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function show_change_web_root(elem) {
|
||||||
|
var domain = $(elem).parents('tr').attr('data-domain');
|
||||||
|
var root = $(elem).parents('tr').attr('data-custom-web-root');
|
||||||
|
show_modal_confirm(
|
||||||
|
'Change Root Directory for ' + domain,
|
||||||
|
$('<p>You can change the static directory for <tt>' + domain + '</tt> to:</p> <p><tt>' + root + '</tt></p> <p>First create this directory on the server. Then click Update to scan for the directory and update web settings.'),
|
||||||
|
'Update',
|
||||||
|
function() { do_web_update(); });
|
||||||
|
}
|
||||||
</script>
|
</script>
|
||||||
|
|||||||
@@ -184,3 +184,19 @@ def du(path):
|
|||||||
seen.add(stat.st_ino)
|
seen.add(stat.st_ino)
|
||||||
total_size += stat.st_size
|
total_size += stat.st_size
|
||||||
return total_size
|
return total_size
|
||||||
|
|
||||||
|
def wait_for_service(port, public, env, timeout):
|
||||||
|
# Block until a service on a given port (bound privately or publicly)
|
||||||
|
# is taking connections, with a maximum timeout.
|
||||||
|
import socket, time
|
||||||
|
start = time.perf_counter()
|
||||||
|
while True:
|
||||||
|
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||||
|
s.settimeout(timeout/3)
|
||||||
|
try:
|
||||||
|
s.connect(("127.0.0.1" if not public else env['PUBLIC_IP'], port))
|
||||||
|
return True
|
||||||
|
except OSError:
|
||||||
|
if time.perf_counter() > start+timeout:
|
||||||
|
return False
|
||||||
|
time.sleep(min(timeout/4, 1))
|
||||||
|
|||||||
@@ -5,7 +5,7 @@
|
|||||||
import os, os.path, shutil, re, tempfile, rtyaml
|
import os, os.path, shutil, re, tempfile, rtyaml
|
||||||
|
|
||||||
from mailconfig import get_mail_domains
|
from mailconfig import get_mail_domains
|
||||||
from dns_update import get_custom_dns_config, do_dns_update
|
from dns_update import get_custom_dns_config, do_dns_update, get_dns_zones
|
||||||
from utils import shell, safe_domain_name, sort_domains
|
from utils import shell, safe_domain_name, sort_domains
|
||||||
|
|
||||||
def get_web_domains(env):
|
def get_web_domains(env):
|
||||||
@@ -17,36 +17,73 @@ def get_web_domains(env):
|
|||||||
domains.add(env['PRIMARY_HOSTNAME'])
|
domains.add(env['PRIMARY_HOSTNAME'])
|
||||||
|
|
||||||
# Also serve web for all mail domains so that we might at least
|
# Also serve web for all mail domains so that we might at least
|
||||||
# provide Webfinger and ActiveSync auto-discover of email settings
|
# provide auto-discover of email settings, and also a static website
|
||||||
# (though the latter isn't really working). These will require that
|
# if the user wants to make one. These will require an SSL cert.
|
||||||
# an SSL cert be installed.
|
|
||||||
domains |= get_mail_domains(env)
|
|
||||||
|
|
||||||
# ...Unless the domain has an A/AAAA record that maps it to a different
|
# ...Unless the domain has an A/AAAA record that maps it to a different
|
||||||
# IP address than this box. Remove those domains from our list.
|
# IP address than this box. Remove those domains from our list.
|
||||||
dns = get_custom_dns_config(env)
|
domains |= (get_mail_domains(env) - get_domains_with_a_records(env))
|
||||||
for domain, value in dns.items():
|
|
||||||
if domain not in domains: continue
|
|
||||||
if (isinstance(value, str) and (value != "local")) \
|
|
||||||
or (isinstance(value, dict) and ("A" in value) and (value["A"] != "local")) \
|
|
||||||
or (isinstance(value, dict) and ("AAAA" in value) and (value["AAAA"] != "local")):
|
|
||||||
domains.remove(domain)
|
|
||||||
|
|
||||||
# Sort the list. Put PRIMARY_HOSTNAME first so it becomes the
|
# Sort the list so the nginx conf gets written in a stable order.
|
||||||
# default server (nginx's default_server).
|
|
||||||
domains = sort_domains(domains, env)
|
domains = sort_domains(domains, env)
|
||||||
|
|
||||||
return domains
|
return domains
|
||||||
|
|
||||||
def do_web_update(env, ok_status="web updated\n"):
|
def get_domains_with_a_records(env):
|
||||||
|
domains = set()
|
||||||
|
dns = get_custom_dns_config(env)
|
||||||
|
for domain, rtype, value in dns:
|
||||||
|
if rtype == "CNAME" or (rtype in ("A", "AAAA") and value != "local"):
|
||||||
|
domains.add(domain)
|
||||||
|
return domains
|
||||||
|
|
||||||
|
def get_web_domains_with_root_overrides(env):
|
||||||
|
# Load custom settings so we can tell what domains have a redirect or proxy set up on '/',
|
||||||
|
# which means static hosting is not happening.
|
||||||
|
root_overrides = { }
|
||||||
|
nginx_conf_custom_fn = os.path.join(env["STORAGE_ROOT"], "www/custom.yaml")
|
||||||
|
if os.path.exists(nginx_conf_custom_fn):
|
||||||
|
custom_settings = rtyaml.load(open(nginx_conf_custom_fn))
|
||||||
|
for domain, settings in custom_settings.items():
|
||||||
|
for type, value in [('redirect', settings.get('redirects', {}).get('/')),
|
||||||
|
('proxy', settings.get('proxies', {}).get('/'))]:
|
||||||
|
if value:
|
||||||
|
root_overrides[domain] = (type, value)
|
||||||
|
return root_overrides
|
||||||
|
|
||||||
|
|
||||||
|
def get_default_www_redirects(env):
|
||||||
|
# Returns a list of www subdomains that we want to provide default redirects
|
||||||
|
# for, i.e. any www's that aren't domains the user has actually configured
|
||||||
|
# to serve for real. Which would be unusual.
|
||||||
|
web_domains = set(get_web_domains(env))
|
||||||
|
www_domains = set('www.' + zone for zone, zonefile in get_dns_zones(env))
|
||||||
|
return sort_domains(www_domains - web_domains - get_domains_with_a_records(env), env)
|
||||||
|
|
||||||
|
def do_web_update(env):
|
||||||
# Build an nginx configuration file.
|
# Build an nginx configuration file.
|
||||||
nginx_conf = open(os.path.join(os.path.dirname(__file__), "../conf/nginx-top.conf")).read()
|
nginx_conf = open(os.path.join(os.path.dirname(__file__), "../conf/nginx-top.conf")).read()
|
||||||
|
|
||||||
# Add configuration for each web domain.
|
# Load the templates.
|
||||||
template1 = open(os.path.join(os.path.dirname(__file__), "../conf/nginx.conf")).read()
|
template0 = open(os.path.join(os.path.dirname(__file__), "../conf/nginx.conf")).read()
|
||||||
|
template1 = open(os.path.join(os.path.dirname(__file__), "../conf/nginx-alldomains.conf")).read()
|
||||||
template2 = open(os.path.join(os.path.dirname(__file__), "../conf/nginx-primaryonly.conf")).read()
|
template2 = open(os.path.join(os.path.dirname(__file__), "../conf/nginx-primaryonly.conf")).read()
|
||||||
|
template3 = "\trewrite ^(.*) https://$REDIRECT_DOMAIN$1 permanent;\n"
|
||||||
|
|
||||||
|
# Add the PRIMARY_HOST configuration first so it becomes nginx's default server.
|
||||||
|
nginx_conf += make_domain_config(env['PRIMARY_HOSTNAME'], [template0, template1, template2], env)
|
||||||
|
|
||||||
|
# Add configuration all other web domains.
|
||||||
|
has_root_proxy_or_redirect = get_web_domains_with_root_overrides(env)
|
||||||
for domain in get_web_domains(env):
|
for domain in get_web_domains(env):
|
||||||
nginx_conf += make_domain_config(domain, template1, template2, env)
|
if domain == env['PRIMARY_HOSTNAME']: continue # handled above
|
||||||
|
if domain not in has_root_proxy_or_redirect:
|
||||||
|
nginx_conf += make_domain_config(domain, [template0, template1], env)
|
||||||
|
else:
|
||||||
|
nginx_conf += make_domain_config(domain, [template0], env)
|
||||||
|
|
||||||
|
# Add default www redirects.
|
||||||
|
for domain in get_default_www_redirects(env):
|
||||||
|
nginx_conf += make_domain_config(domain, [template0, template3], env)
|
||||||
|
|
||||||
# Did the file change? If not, don't bother writing & restarting nginx.
|
# Did the file change? If not, don't bother writing & restarting nginx.
|
||||||
nginx_conf_fn = "/etc/nginx/conf.d/local.conf"
|
nginx_conf_fn = "/etc/nginx/conf.d/local.conf"
|
||||||
@@ -65,34 +102,24 @@ def do_web_update(env, ok_status="web updated\n"):
|
|||||||
# enough and doesn't break any open connections.
|
# enough and doesn't break any open connections.
|
||||||
shell('check_call', ["/usr/sbin/service", "nginx", "reload"])
|
shell('check_call', ["/usr/sbin/service", "nginx", "reload"])
|
||||||
|
|
||||||
return ok_status
|
return "web updated\n"
|
||||||
|
|
||||||
def make_domain_config(domain, template, template_for_primaryhost, env):
|
def make_domain_config(domain, templates, env):
|
||||||
# How will we configure this domain.
|
# GET SOME VARIABLES
|
||||||
|
|
||||||
# Where will its root directory be for static files?
|
# Where will its root directory be for static files?
|
||||||
|
|
||||||
root = get_web_root(domain, env)
|
root = get_web_root(domain, env)
|
||||||
|
|
||||||
# What private key and SSL certificate will we use for this domain?
|
# What private key and SSL certificate will we use for this domain?
|
||||||
ssl_key, ssl_certificate = get_domain_ssl_files(domain, env)
|
ssl_key, ssl_certificate, ssl_via = get_domain_ssl_files(domain, env)
|
||||||
|
|
||||||
# For hostnames created after the initial setup, ensure we have an SSL certificate
|
# For hostnames created after the initial setup, ensure we have an SSL certificate
|
||||||
# available. Make a self-signed one now if one doesn't exist.
|
# available. Make a self-signed one now if one doesn't exist.
|
||||||
ensure_ssl_certificate_exists(domain, ssl_key, ssl_certificate, env)
|
ensure_ssl_certificate_exists(domain, ssl_key, ssl_certificate, env)
|
||||||
|
|
||||||
# Put pieces together.
|
# ADDITIONAL DIRECTIVES.
|
||||||
nginx_conf_parts = re.split("\s*# ADDITIONAL DIRECTIVES HERE\s*", template)
|
|
||||||
nginx_conf = nginx_conf_parts[0] + "\n"
|
|
||||||
if domain == env['PRIMARY_HOSTNAME']:
|
|
||||||
nginx_conf += template_for_primaryhost + "\n"
|
|
||||||
|
|
||||||
# Replace substitution strings in the template & return.
|
nginx_conf_extra = ""
|
||||||
nginx_conf = nginx_conf.replace("$STORAGE_ROOT", env['STORAGE_ROOT'])
|
|
||||||
nginx_conf = nginx_conf.replace("$HOSTNAME", domain.encode("idna").decode("ascii"))
|
|
||||||
nginx_conf = nginx_conf.replace("$ROOT", root)
|
|
||||||
nginx_conf = nginx_conf.replace("$SSL_KEY", ssl_key)
|
|
||||||
nginx_conf = nginx_conf.replace("$SSL_CERTIFICATE", ssl_certificate)
|
|
||||||
|
|
||||||
# Because the certificate may change, we should recognize this so we
|
# Because the certificate may change, we should recognize this so we
|
||||||
# can trigger an nginx update.
|
# can trigger an nginx update.
|
||||||
@@ -105,7 +132,7 @@ def make_domain_config(domain, template, template_for_primaryhost, env):
|
|||||||
finally:
|
finally:
|
||||||
f.close()
|
f.close()
|
||||||
return sha1.hexdigest()
|
return sha1.hexdigest()
|
||||||
nginx_conf += "# ssl files sha1: %s / %s\n" % (hashfile(ssl_key), hashfile(ssl_certificate))
|
nginx_conf_extra += "# ssl files sha1: %s / %s\n" % (hashfile(ssl_key), hashfile(ssl_certificate))
|
||||||
|
|
||||||
# Add in any user customizations in YAML format.
|
# Add in any user customizations in YAML format.
|
||||||
nginx_conf_custom_fn = os.path.join(env["STORAGE_ROOT"], "www/custom.yaml")
|
nginx_conf_custom_fn = os.path.join(env["STORAGE_ROOT"], "www/custom.yaml")
|
||||||
@@ -114,17 +141,29 @@ def make_domain_config(domain, template, template_for_primaryhost, env):
|
|||||||
if domain in yaml:
|
if domain in yaml:
|
||||||
yaml = yaml[domain]
|
yaml = yaml[domain]
|
||||||
for path, url in yaml.get("proxies", {}).items():
|
for path, url in yaml.get("proxies", {}).items():
|
||||||
nginx_conf += "\tlocation %s {\n\t\tproxy_pass %s;\n\t}\n" % (path, url)
|
nginx_conf_extra += "\tlocation %s {\n\t\tproxy_pass %s;\n\t}\n" % (path, url)
|
||||||
for path, url in yaml.get("redirects", {}).items():
|
for path, url in yaml.get("redirects", {}).items():
|
||||||
nginx_conf += "\trewrite %s %s permanent;\n" % (path, url)
|
nginx_conf_extra += "\trewrite %s %s permanent;\n" % (path, url)
|
||||||
|
|
||||||
# Add in any user customizations in the includes/ folder.
|
# Add in any user customizations in the includes/ folder.
|
||||||
nginx_conf_custom_include = os.path.join(env["STORAGE_ROOT"], "www", safe_domain_name(domain) + ".conf")
|
nginx_conf_custom_include = os.path.join(env["STORAGE_ROOT"], "www", safe_domain_name(domain) + ".conf")
|
||||||
if os.path.exists(nginx_conf_custom_include):
|
if os.path.exists(nginx_conf_custom_include):
|
||||||
nginx_conf += "\tinclude %s;\n" % (nginx_conf_custom_include)
|
nginx_conf_extra += "\tinclude %s;\n" % (nginx_conf_custom_include)
|
||||||
|
# PUT IT ALL TOGETHER
|
||||||
|
|
||||||
# Ending.
|
# Combine the pieces. Iteratively place each template into the "# ADDITIONAL DIRECTIVES HERE" placeholder
|
||||||
nginx_conf += nginx_conf_parts[1]
|
# of the previous template.
|
||||||
|
nginx_conf = "# ADDITIONAL DIRECTIVES HERE\n"
|
||||||
|
for t in templates + [nginx_conf_extra]:
|
||||||
|
nginx_conf = re.sub("[ \t]*# ADDITIONAL DIRECTIVES HERE *\n", t, nginx_conf)
|
||||||
|
|
||||||
|
# Replace substitution strings in the template & return.
|
||||||
|
nginx_conf = nginx_conf.replace("$STORAGE_ROOT", env['STORAGE_ROOT'])
|
||||||
|
nginx_conf = nginx_conf.replace("$HOSTNAME", domain)
|
||||||
|
nginx_conf = nginx_conf.replace("$ROOT", root)
|
||||||
|
nginx_conf = nginx_conf.replace("$SSL_KEY", ssl_key)
|
||||||
|
nginx_conf = nginx_conf.replace("$SSL_CERTIFICATE", ssl_certificate)
|
||||||
|
nginx_conf = nginx_conf.replace("$REDIRECT_DOMAIN", re.sub(r"^www\.", "", domain)) # for default www redirects to parent domain
|
||||||
|
|
||||||
return nginx_conf
|
return nginx_conf
|
||||||
|
|
||||||
@@ -149,6 +188,7 @@ def get_domain_ssl_files(domain, env, allow_shared_cert=True):
|
|||||||
|
|
||||||
# What SSL certificate will we use?
|
# What SSL certificate will we use?
|
||||||
ssl_certificate_primary = os.path.join(env["STORAGE_ROOT"], 'ssl/ssl_certificate.pem')
|
ssl_certificate_primary = os.path.join(env["STORAGE_ROOT"], 'ssl/ssl_certificate.pem')
|
||||||
|
ssl_via = None
|
||||||
if domain == env['PRIMARY_HOSTNAME']:
|
if domain == env['PRIMARY_HOSTNAME']:
|
||||||
# For PRIMARY_HOSTNAME, use the one we generated at set-up time.
|
# For PRIMARY_HOSTNAME, use the one we generated at set-up time.
|
||||||
ssl_certificate = ssl_certificate_primary
|
ssl_certificate = ssl_certificate_primary
|
||||||
@@ -161,10 +201,18 @@ def get_domain_ssl_files(domain, env, allow_shared_cert=True):
|
|||||||
# the user has uploaded a different private key for this domain.
|
# the user has uploaded a different private key for this domain.
|
||||||
if not ssl_key_is_alt and allow_shared_cert:
|
if not ssl_key_is_alt and allow_shared_cert:
|
||||||
from status_checks import check_certificate
|
from status_checks import check_certificate
|
||||||
if check_certificate(domain, ssl_certificate_primary, None)[0] == "OK":
|
if check_certificate(domain, ssl_certificate_primary, None, just_check_domain=True)[0] == "OK":
|
||||||
ssl_certificate = ssl_certificate_primary
|
ssl_certificate = ssl_certificate_primary
|
||||||
|
ssl_via = "Using multi/wildcard certificate of %s." % env['PRIMARY_HOSTNAME']
|
||||||
|
|
||||||
return ssl_key, ssl_certificate
|
# For a 'www.' domain, see if we can reuse the cert of the parent.
|
||||||
|
elif domain.startswith('www.'):
|
||||||
|
ssl_certificate_parent = os.path.join(env["STORAGE_ROOT"], 'ssl/%s/ssl_certificate.pem' % safe_domain_name(domain[4:]))
|
||||||
|
if os.path.exists(ssl_certificate_parent) and check_certificate(domain, ssl_certificate_parent, None, just_check_domain=True)[0] == "OK":
|
||||||
|
ssl_certificate = ssl_certificate_parent
|
||||||
|
ssl_via = "Using multi/wildcard certificate of %s." % domain[4:]
|
||||||
|
|
||||||
|
return ssl_key, ssl_certificate, ssl_via
|
||||||
|
|
||||||
def ensure_ssl_certificate_exists(domain, ssl_key, ssl_certificate, env):
|
def ensure_ssl_certificate_exists(domain, ssl_key, ssl_certificate, env):
|
||||||
# For domains besides PRIMARY_HOSTNAME, generate a self-signed certificate if
|
# For domains besides PRIMARY_HOSTNAME, generate a self-signed certificate if
|
||||||
@@ -202,9 +250,8 @@ def create_csr(domain, ssl_key, env):
|
|||||||
return shell("check_output", [
|
return shell("check_output", [
|
||||||
"openssl", "req", "-new",
|
"openssl", "req", "-new",
|
||||||
"-key", ssl_key,
|
"-key", ssl_key,
|
||||||
"-out", "/dev/stdout",
|
|
||||||
"-sha256",
|
"-sha256",
|
||||||
"-subj", "/C=%s/ST=/L=/O=/CN=%s" % (env["CSR_COUNTRY"], domain.encode("idna").decode("ascii"))])
|
"-subj", "/C=%s/ST=/L=/O=/CN=%s" % (env["CSR_COUNTRY"], domain)])
|
||||||
|
|
||||||
def install_cert(domain, ssl_cert, ssl_chain, env):
|
def install_cert(domain, ssl_cert, ssl_chain, env):
|
||||||
if domain not in get_web_domains(env):
|
if domain not in get_web_domains(env):
|
||||||
@@ -219,7 +266,7 @@ def install_cert(domain, ssl_cert, ssl_chain, env):
|
|||||||
|
|
||||||
# Do validation on the certificate before installing it.
|
# Do validation on the certificate before installing it.
|
||||||
from status_checks import check_certificate
|
from status_checks import check_certificate
|
||||||
ssl_key, ssl_certificate = get_domain_ssl_files(domain, env, allow_shared_cert=False)
|
ssl_key, ssl_certificate, ssl_via = get_domain_ssl_files(domain, env, allow_shared_cert=False)
|
||||||
cert_status, cert_status_details = check_certificate(domain, fn, ssl_key)
|
cert_status, cert_status_details = check_certificate(domain, fn, ssl_key)
|
||||||
if cert_status != "OK":
|
if cert_status != "OK":
|
||||||
if cert_status == "SELF-SIGNED":
|
if cert_status == "SELF-SIGNED":
|
||||||
@@ -233,7 +280,7 @@ def install_cert(domain, ssl_cert, ssl_chain, env):
|
|||||||
os.makedirs(os.path.dirname(ssl_certificate), exist_ok=True)
|
os.makedirs(os.path.dirname(ssl_certificate), exist_ok=True)
|
||||||
shutil.move(fn, ssl_certificate)
|
shutil.move(fn, ssl_certificate)
|
||||||
|
|
||||||
ret = []
|
ret = ["OK"]
|
||||||
|
|
||||||
# When updating the cert for PRIMARY_HOSTNAME, also update DNS because it is
|
# When updating the cert for PRIMARY_HOSTNAME, also update DNS because it is
|
||||||
# used in the DANE TLSA record and restart postfix and dovecot which use
|
# used in the DANE TLSA record and restart postfix and dovecot which use
|
||||||
@@ -246,22 +293,25 @@ def install_cert(domain, ssl_cert, ssl_chain, env):
|
|||||||
ret.append("mail services restarted")
|
ret.append("mail services restarted")
|
||||||
|
|
||||||
# Kick nginx so it sees the cert.
|
# Kick nginx so it sees the cert.
|
||||||
ret.append( do_web_update(env, ok_status="") )
|
ret.append( do_web_update(env) )
|
||||||
return "\n".join(r for r in ret if r.strip() != "")
|
return "\n".join(ret)
|
||||||
|
|
||||||
def get_web_domains_info(env):
|
def get_web_domains_info(env):
|
||||||
|
has_root_proxy_or_redirect = get_web_domains_with_root_overrides(env)
|
||||||
|
|
||||||
|
# for the SSL config panel, get cert status
|
||||||
def check_cert(domain):
|
def check_cert(domain):
|
||||||
from status_checks import check_certificate
|
from status_checks import check_certificate
|
||||||
ssl_key, ssl_certificate = get_domain_ssl_files(domain, env)
|
ssl_key, ssl_certificate, ssl_via = get_domain_ssl_files(domain, env)
|
||||||
if not os.path.exists(ssl_certificate):
|
if not os.path.exists(ssl_certificate):
|
||||||
return ("danger", "No Certificate Installed")
|
return ("danger", "No Certificate Installed")
|
||||||
cert_status, cert_status_details = check_certificate(domain, ssl_certificate, ssl_key)
|
cert_status, cert_status_details = check_certificate(domain, ssl_certificate, ssl_key)
|
||||||
if cert_status == "OK":
|
if cert_status == "OK":
|
||||||
if domain == env['PRIMARY_HOSTNAME'] or ssl_certificate != get_domain_ssl_files(env['PRIMARY_HOSTNAME'], env)[1]:
|
if not ssl_via:
|
||||||
return ("success", "Signed & valid. " + cert_status_details)
|
return ("success", "Signed & valid. " + cert_status_details)
|
||||||
else:
|
else:
|
||||||
# This is an alternate domain but using the same cert as the primary domain.
|
# This is an alternate domain but using the same cert as the primary domain.
|
||||||
return ("success", "Signed & valid. Using multi/wildcard certificate of %s." % env['PRIMARY_HOSTNAME'])
|
return ("success", "Signed & valid. " + ssl_via)
|
||||||
elif cert_status == "SELF-SIGNED":
|
elif cert_status == "SELF-SIGNED":
|
||||||
return ("warning", "Self-signed. Get a signed certificate to stop warnings.")
|
return ("warning", "Self-signed. Get a signed certificate to stop warnings.")
|
||||||
else:
|
else:
|
||||||
@@ -273,6 +323,15 @@ def get_web_domains_info(env):
|
|||||||
"root": get_web_root(domain, env),
|
"root": get_web_root(domain, env),
|
||||||
"custom_root": get_web_root(domain, env, test_exists=False),
|
"custom_root": get_web_root(domain, env, test_exists=False),
|
||||||
"ssl_certificate": check_cert(domain),
|
"ssl_certificate": check_cert(domain),
|
||||||
|
"static_enabled": domain not in has_root_proxy_or_redirect,
|
||||||
}
|
}
|
||||||
for domain in get_web_domains(env)
|
for domain in get_web_domains(env)
|
||||||
|
] + \
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"domain": domain,
|
||||||
|
"ssl_certificate": check_cert(domain),
|
||||||
|
"static_enabled": False,
|
||||||
|
}
|
||||||
|
for domain in get_default_www_redirects(env)
|
||||||
]
|
]
|
||||||
|
|||||||
62
ppa/Makefile
Executable file
62
ppa/Makefile
Executable file
@@ -0,0 +1,62 @@
|
|||||||
|
POSTGREY_VERSION=1.35-1+miab1
|
||||||
|
DOVECOT_VERSION=2.2.9-1ubuntu2.1+miab1
|
||||||
|
|
||||||
|
all: clean build_postgrey build_dovecot_lucene
|
||||||
|
|
||||||
|
clean:
|
||||||
|
# Clean.
|
||||||
|
rm -rf /tmp/build
|
||||||
|
mkdir -p /tmp/build
|
||||||
|
|
||||||
|
build_postgrey: clean
|
||||||
|
# Download the latest Debian postgrey package. It is ahead of Ubuntu,
|
||||||
|
# and we might as well jump ahead.
|
||||||
|
git clone git://git.debian.org/git/collab-maint/postgrey.git /tmp/build/postgrey
|
||||||
|
|
||||||
|
# Download the corresponding upstream package.
|
||||||
|
wget -O /tmp/build/postgrey_1.35.orig.tar.gz http://postgrey.schweikert.ch/pub/postgrey-1.35.tar.gz
|
||||||
|
|
||||||
|
# Add our source patch to the debian packaging listing.
|
||||||
|
cp postgrey_sources.diff /tmp/build/postgrey/debian/patches/mailinabox
|
||||||
|
|
||||||
|
# Patch the packaging to give it a new version.
|
||||||
|
patch -p1 -d /tmp/build/postgrey < postgrey.diff
|
||||||
|
|
||||||
|
# Build the source package.
|
||||||
|
(cd /tmp/build/postgrey; dpkg-buildpackage -S -us -uc -nc)
|
||||||
|
|
||||||
|
# Sign the packages.
|
||||||
|
debsign /tmp/build/postgrey_$(POSTGREY_VERSION)_source.changes
|
||||||
|
|
||||||
|
# Upload to PPA.
|
||||||
|
dput ppa:mail-in-a-box/ppa /tmp/build/postgrey_$(POSTGREY_VERSION)_source.changes
|
||||||
|
|
||||||
|
# Clear the intermediate files.
|
||||||
|
rm -rf /tmp/build/postgrey
|
||||||
|
|
||||||
|
# TESTING BINARY PACKAGE
|
||||||
|
#sudo apt-get build-dep -y postgrey
|
||||||
|
#(cd /tmp/build/postgrey; dpkg-buildpackage -us -uc -nc)
|
||||||
|
|
||||||
|
build_dovecot_lucene: clean
|
||||||
|
# Get the upstream source.
|
||||||
|
(cd /tmp/build; apt-get source dovecot)
|
||||||
|
|
||||||
|
# Patch it so that we build dovecot-lucene (and nothing else).
|
||||||
|
patch -p1 -d /tmp/build/dovecot-2.2.9 < dovecot_lucene.diff
|
||||||
|
|
||||||
|
# Build the source package.
|
||||||
|
(cd /tmp/build/dovecot-2.2.9; dpkg-buildpackage -S -us -uc -nc)
|
||||||
|
|
||||||
|
# Sign the packages.
|
||||||
|
debsign /tmp/build/dovecot_$(DOVECOT_VERSION)_source.changes
|
||||||
|
|
||||||
|
# Upload it.
|
||||||
|
dput ppa:mail-in-a-box/ppa /tmp/build/dovecot_$(DOVECOT_VERSION)_source.changes
|
||||||
|
|
||||||
|
# TESTING BINARY PACKAGE
|
||||||
|
# Install build dependencies and build dependencies we've added in our patch,
|
||||||
|
# and then build the binary package.
|
||||||
|
#sudo apt-get build-dep -y dovecot
|
||||||
|
#sudo apt-get install libclucene-dev liblzma-dev libexttextcat-dev libstemmer-dev
|
||||||
|
#(cd /tmp/build/dovecot-2.2.9; dpkg-buildpackage -us -uc -nc)
|
||||||
40
ppa/README.md
Normal file
40
ppa/README.md
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
ppa instructions
|
||||||
|
================
|
||||||
|
|
||||||
|
Mail-in-a-Box maintains a Launchpad.net PPA ([Mail-in-a-Box PPA](https://launchpad.net/~mail-in-a-box/+archive/ubuntu/ppa)) for additional deb's that we want to have installed on systems.
|
||||||
|
|
||||||
|
Packages
|
||||||
|
--------
|
||||||
|
|
||||||
|
* postgrey, a fork of [postgrey](http://postgrey.schweikert.ch/) based on the [latest Debian package](http://git.debian.org/?p=collab-maint/postgrey.git), with a modification to whitelist senders that are whitelisted by [dnswl.org](https://www.dnswl.org/) (i.e. don't greylist mail from known good senders).
|
||||||
|
|
||||||
|
* dovecot-lucene, [dovecot's lucene full text search plugin](http://wiki2.dovecot.org/Plugins/FTS/Lucene), which isn't built by Ubuntu's dovecot package maintainer unfortunately.
|
||||||
|
|
||||||
|
Building
|
||||||
|
--------
|
||||||
|
|
||||||
|
To rebuild the packages in the PPA, you'll need to be @JoshData.
|
||||||
|
|
||||||
|
First:
|
||||||
|
|
||||||
|
* You should have an account on Launchpad.net.
|
||||||
|
* Your account should have your GPG key set (to the fingerprint of a GPG key on your system matching the identity at the top of the debian/changelog files).
|
||||||
|
* You should have write permission to the PPA.
|
||||||
|
|
||||||
|
To build:
|
||||||
|
|
||||||
|
# Start a clean VM.
|
||||||
|
vagrant up
|
||||||
|
|
||||||
|
# Put your signing keys (on the host machine) into the VM (so it can sign the debs).
|
||||||
|
gpg --export-secret-keys | vagrant ssh -- gpg --import
|
||||||
|
|
||||||
|
# Build & upload to launchpad.
|
||||||
|
vagrant ssh -- "cd /vagrant && make"
|
||||||
|
|
||||||
|
Mail-in-a-Box adds our PPA during setup, but if you need to do that yourself for testing:
|
||||||
|
|
||||||
|
apt-add-repository ppa:mail-in-a-box/ppa
|
||||||
|
apt-get update
|
||||||
|
apt-get install postgrey dovecot-lucene
|
||||||
|
|
||||||
12
ppa/Vagrantfile
vendored
Normal file
12
ppa/Vagrantfile
vendored
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
# -*- mode: ruby -*-
|
||||||
|
# vi: set ft=ruby :
|
||||||
|
|
||||||
|
Vagrant.configure("2") do |config|
|
||||||
|
config.vm.box = "ubuntu14.04"
|
||||||
|
config.vm.box_url = "http://cloud-images.ubuntu.com/vagrant/trusty/current/trusty-server-cloudimg-amd64-vagrant-disk1.box"
|
||||||
|
|
||||||
|
config.vm.provision :shell, :inline => <<-SH
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y git dpkg-dev devscripts dput
|
||||||
|
SH
|
||||||
|
end
|
||||||
319
ppa/dovecot_lucene.diff
Normal file
319
ppa/dovecot_lucene.diff
Normal file
@@ -0,0 +1,319 @@
|
|||||||
|
--- a/debian/control
|
||||||
|
+++ b/debian/control
|
||||||
|
@@ -1,210 +1,23 @@
|
||||||
|
Source: dovecot
|
||||||
|
Section: mail
|
||||||
|
Priority: optional
|
||||||
|
-Maintainer: Ubuntu Developers <ubuntu-devel-discuss@lists.ubuntu.com>
|
||||||
|
-XSBC-Original-Maintainer: Dovecot Maintainers <jaldhar-dovecot@debian.org>
|
||||||
|
-Uploaders: Jaldhar H. Vyas <jaldhar@debian.org>, Fabio Tranchitella <kobold@debian.org>, Joel Johnson <mrjoel@lixil.net>, Marco Nenciarini <mnencia@debian.org>
|
||||||
|
-Build-Depends: debhelper (>= 7.2.3~), dpkg-dev (>= 1.16.1), pkg-config, libssl-dev, libpam0g-dev, libldap2-dev, libpq-dev, libmysqlclient-dev, libsqlite3-dev, libsasl2-dev, zlib1g-dev, libkrb5-dev, drac-dev (>= 1.12-5), libbz2-dev, libdb-dev, libcurl4-gnutls-dev, libexpat-dev, libwrap0-dev, dh-systemd, po-debconf, lsb-release, hardening-wrapper, dh-autoreconf, autotools-dev
|
||||||
|
+Maintainer: Joshua Tauberer <jt@occams.info>
|
||||||
|
+XSBC-Original-Maintainer: Ubuntu Developers <ubuntu-devel-discuss@lists.ubuntu.com>
|
||||||
|
+Build-Depends: debhelper (>= 7.2.3~), dpkg-dev (>= 1.16.1), pkg-config, libssl-dev, libpam0g-dev, libldap2-dev, libpq-dev, libmysqlclient-dev, libsqlite3-dev, libsasl2-dev, zlib1g-dev, libkrb5-dev, drac-dev (>= 1.12-5), libbz2-dev, libdb-dev, libcurl4-gnutls-dev, libexpat-dev, libwrap0-dev, dh-systemd, po-debconf, lsb-release, libclucene-dev (>= 2.3), liblzma-dev, libexttextcat-dev, libstemmer-dev, hardening-wrapper, dh-autoreconf, autotools-dev
|
||||||
|
Standards-Version: 3.9.4
|
||||||
|
Homepage: http://dovecot.org/
|
||||||
|
-Vcs-Git: git://git.debian.org/git/collab-maint/dovecot.git
|
||||||
|
-Vcs-Browser: http://git.debian.org/?p=collab-maint/dovecot.git
|
||||||
|
+Vcs-Git: https://github.com/mail-in-a-box/mailinabox
|
||||||
|
+Vcs-Browser: https://github.com/mail-in-a-box/mailinabox
|
||||||
|
|
||||||
|
-Package: dovecot-core
|
||||||
|
+Package: dovecot-lucene
|
||||||
|
Architecture: any
|
||||||
|
-Depends: ${shlibs:Depends}, ${misc:Depends}, libpam-runtime (>= 0.76-13.1), openssl, adduser, ucf (>= 2.0020), ssl-cert (>= 1.0-11ubuntu1), lsb-base (>= 3.2-12ubuntu3)
|
||||||
|
-Suggests: ntp, dovecot-gssapi, dovecot-sieve, dovecot-pgsql, dovecot-mysql, dovecot-sqlite, dovecot-ldap, dovecot-imapd, dovecot-pop3d, dovecot-lmtpd, dovecot-managesieved, dovecot-solr, ufw
|
||||||
|
-Recommends: ntpdate
|
||||||
|
-Provides: dovecot-common
|
||||||
|
-Replaces: dovecot-common (<< 1:2.0.14-2~), mailavenger (<< 0.8.1-4)
|
||||||
|
-Breaks: dovecot-common (<< 1:2.0.14-2~), mailavenger (<< 0.8.1-4)
|
||||||
|
-Description: secure POP3/IMAP server - core files
|
||||||
|
+Depends: ${shlibs:Depends}, ${misc:Depends}, dovecot-core (>= 1:2.2.9-1ubuntu2.1)
|
||||||
|
+Description: secure POP3/IMAP server - Lucene support
|
||||||
|
Dovecot is a mail server whose major goals are security and extreme
|
||||||
|
reliability. It tries very hard to handle all error conditions and verify
|
||||||
|
that all data is valid, making it nearly impossible to crash. It supports
|
||||||
|
mbox/Maildir and its own dbox/mdbox formats, and should also be pretty
|
||||||
|
fast, extensible, and portable.
|
||||||
|
.
|
||||||
|
- This package contains the Dovecot main server and its command line utility.
|
||||||
|
-
|
||||||
|
-Package: dovecot-dev
|
||||||
|
-Architecture: any
|
||||||
|
-Depends: ${shlibs:Depends}, ${misc:Depends}, dovecot-core (= ${binary:Version})
|
||||||
|
-Replaces: dovecot-common (<< 1:2.0.14-2~)
|
||||||
|
-Breaks: dovecot-common (<< 1:2.0.14-2~)
|
||||||
|
-Description: secure POP3/IMAP server - header files
|
||||||
|
- Dovecot is a mail server whose major goals are security and extreme
|
||||||
|
- reliability. It tries very hard to handle all error conditions and verify
|
||||||
|
- that all data is valid, making it nearly impossible to crash. It supports
|
||||||
|
- mbox/Maildir and its own dbox/mdbox formats, and should also be pretty
|
||||||
|
- fast, extensible, and portable.
|
||||||
|
- .
|
||||||
|
- This package contains header files needed to compile plugins for the Dovecot
|
||||||
|
- mail server.
|
||||||
|
-
|
||||||
|
-Package: dovecot-imapd
|
||||||
|
-Architecture: any
|
||||||
|
-Depends: ${shlibs:Depends}, ${misc:Depends}, dovecot-core (= ${binary:Version}), ucf (>= 2.0020)
|
||||||
|
-Provides: imap-server
|
||||||
|
-Description: secure POP3/IMAP server - IMAP daemon
|
||||||
|
- Dovecot is a mail server whose major goals are security and extreme
|
||||||
|
- reliability. It tries very hard to handle all error conditions and verify
|
||||||
|
- that all data is valid, making it nearly impossible to crash. It supports
|
||||||
|
- mbox/Maildir and its own dbox/mdbox formats, and should also be pretty
|
||||||
|
- fast, extensible, and portable.
|
||||||
|
- .
|
||||||
|
- This package contains the Dovecot IMAP server.
|
||||||
|
-
|
||||||
|
-Package: dovecot-pop3d
|
||||||
|
-Architecture: any
|
||||||
|
-Depends: ${shlibs:Depends}, ${misc:Depends}, dovecot-core (= ${binary:Version}), ucf (>= 2.0020)
|
||||||
|
-Provides: pop3-server
|
||||||
|
-Description: secure POP3/IMAP server - POP3 daemon
|
||||||
|
- Dovecot is a mail server whose major goals are security and extreme
|
||||||
|
- reliability. It tries very hard to handle all error conditions and verify
|
||||||
|
- that all data is valid, making it nearly impossible to crash. It supports
|
||||||
|
- mbox/Maildir and its own dbox/mdbox formats, and should also be pretty
|
||||||
|
- fast, extensible, and portable.
|
||||||
|
- .
|
||||||
|
- This package contains the Dovecot POP3 server.
|
||||||
|
-
|
||||||
|
-Package: dovecot-lmtpd
|
||||||
|
-Architecture: any
|
||||||
|
-Depends: ${shlibs:Depends}, ${misc:Depends}, dovecot-core (= ${binary:Version}), ucf (>= 2.0020)
|
||||||
|
-Replaces: dovecot-common (<< 1:2.0.14-2~)
|
||||||
|
-Breaks: dovecot-common (<< 1:2.0.14-2~)
|
||||||
|
-Description: secure POP3/IMAP server - LMTP server
|
||||||
|
- Dovecot is a mail server whose major goals are security and extreme
|
||||||
|
- reliability. It tries very hard to handle all error conditions and verify
|
||||||
|
- that all data is valid, making it nearly impossible to crash. It supports
|
||||||
|
- mbox/Maildir and its own dbox/mdbox formats, and should also be pretty
|
||||||
|
- fast, extensible, and portable.
|
||||||
|
- .
|
||||||
|
- This package contains the Dovecot LMTP server.
|
||||||
|
-
|
||||||
|
-Package: dovecot-managesieved
|
||||||
|
-Architecture: any
|
||||||
|
-Depends: ${shlibs:Depends}, ${misc:Depends}, dovecot-core (= ${binary:Version}), dovecot-sieve (= ${binary:Version}), ucf (>= 2.0020)
|
||||||
|
-Replaces: dovecot-common (<< 1:2.0.14-2~)
|
||||||
|
-Breaks: dovecot-common (<< 1:2.0.14-2~)
|
||||||
|
-Description: secure POP3/IMAP server - ManageSieve server
|
||||||
|
- Dovecot is a mail server whose major goals are security and extreme
|
||||||
|
- reliability. It tries very hard to handle all error conditions and verify
|
||||||
|
- that all data is valid, making it nearly impossible to crash. It supports
|
||||||
|
- mbox/Maildir and its own dbox/mdbox formats, and should also be pretty
|
||||||
|
- fast, extensible, and portable.
|
||||||
|
- .
|
||||||
|
- This package contains the Dovecot ManageSieve server.
|
||||||
|
-
|
||||||
|
-Package: dovecot-pgsql
|
||||||
|
-Architecture: any
|
||||||
|
-Depends: ${shlibs:Depends}, ${misc:Depends}, dovecot-core (= ${binary:Version})
|
||||||
|
-Description: secure POP3/IMAP server - PostgreSQL support
|
||||||
|
- Dovecot is a mail server whose major goals are security and extreme
|
||||||
|
- reliability. It tries very hard to handle all error conditions and verify
|
||||||
|
- that all data is valid, making it nearly impossible to crash. It supports
|
||||||
|
- mbox/Maildir and its own dbox/mdbox formats, and should also be pretty
|
||||||
|
- fast, extensible, and portable.
|
||||||
|
- .
|
||||||
|
- This package provides PostgreSQL support for Dovecot.
|
||||||
|
-
|
||||||
|
-Package: dovecot-mysql
|
||||||
|
-Architecture: any
|
||||||
|
-Depends: ${shlibs:Depends}, ${misc:Depends}, dovecot-core (= ${binary:Version})
|
||||||
|
-Description: secure POP3/IMAP server - MySQL support
|
||||||
|
- Dovecot is a mail server whose major goals are security and extreme
|
||||||
|
- reliability. It tries very hard to handle all error conditions and verify
|
||||||
|
- that all data is valid, making it nearly impossible to crash. It supports
|
||||||
|
- mbox/Maildir and its own dbox/mdbox formats, and should also be pretty
|
||||||
|
- fast, extensible, and portable.
|
||||||
|
- .
|
||||||
|
- This package provides MySQL support for Dovecot.
|
||||||
|
-
|
||||||
|
-Package: dovecot-sqlite
|
||||||
|
-Architecture: any
|
||||||
|
-Depends: ${shlibs:Depends}, ${misc:Depends}, dovecot-core (= ${binary:Version})
|
||||||
|
-Description: secure POP3/IMAP server - SQLite support
|
||||||
|
- Dovecot is a mail server whose major goals are security and extreme
|
||||||
|
- reliability. It tries very hard to handle all error conditions and verify
|
||||||
|
- that all data is valid, making it nearly impossible to crash. It supports
|
||||||
|
- mbox/Maildir and its own dbox/mdbox formats, and should also be pretty
|
||||||
|
- fast, extensible, and portable.
|
||||||
|
- .
|
||||||
|
- This package provides SQLite support for Dovecot.
|
||||||
|
-
|
||||||
|
-Package: dovecot-ldap
|
||||||
|
-Architecture: any
|
||||||
|
-Depends: ${shlibs:Depends}, ${misc:Depends}, dovecot-core (= ${binary:Version}), ucf (>= 2.0020)
|
||||||
|
-Description: secure POP3/IMAP server - LDAP support
|
||||||
|
- Dovecot is a mail server whose major goals are security and extreme
|
||||||
|
- reliability. It tries very hard to handle all error conditions and verify
|
||||||
|
- that all data is valid, making it nearly impossible to crash. It supports
|
||||||
|
- mbox/Maildir and its own dbox/mdbox formats, and should also be pretty
|
||||||
|
- fast, extensible, and portable.
|
||||||
|
- .
|
||||||
|
- This package provides LDAP support for Dovecot.
|
||||||
|
-
|
||||||
|
-Package: dovecot-gssapi
|
||||||
|
-Architecture: any
|
||||||
|
-Depends: ${shlibs:Depends}, ${misc:Depends}, dovecot-core (= ${binary:Version})
|
||||||
|
-Description: secure POP3/IMAP server - GSSAPI support
|
||||||
|
- Dovecot is a mail server whose major goals are security and extreme
|
||||||
|
- reliability. It tries very hard to handle all error conditions and verify
|
||||||
|
- that all data is valid, making it nearly impossible to crash. It supports
|
||||||
|
- mbox/Maildir and its own dbox/mdbox formats, and should also be pretty
|
||||||
|
- fast, extensible, and portable.
|
||||||
|
- .
|
||||||
|
- This package provides GSSAPI authentication support for Dovecot.
|
||||||
|
-
|
||||||
|
-Package: dovecot-sieve
|
||||||
|
-Architecture: any
|
||||||
|
-Depends: ${shlibs:Depends}, ${misc:Depends}, dovecot-core (= ${binary:Version}), ucf (>= 2.0020)
|
||||||
|
-Description: secure POP3/IMAP server - Sieve filters support
|
||||||
|
- Dovecot is a mail server whose major goals are security and extreme
|
||||||
|
- reliability. It tries very hard to handle all error conditions and verify
|
||||||
|
- that all data is valid, making it nearly impossible to crash. It supports
|
||||||
|
- mbox/Maildir and its own dbox/mdbox formats, and should also be pretty
|
||||||
|
- fast, extensible, and portable.
|
||||||
|
- .
|
||||||
|
- This package provides Sieve filters support for Dovecot.
|
||||||
|
-
|
||||||
|
-Package: dovecot-solr
|
||||||
|
-Architecture: any
|
||||||
|
-Depends: ${shlibs:Depends}, ${misc:Depends}, dovecot-core (= ${binary:Version})
|
||||||
|
-Description: secure POP3/IMAP server - Solr support
|
||||||
|
- Dovecot is a mail server whose major goals are security and extreme
|
||||||
|
- reliability. It tries very hard to handle all error conditions and verify
|
||||||
|
- that all data is valid, making it nearly impossible to crash. It supports
|
||||||
|
- mbox/Maildir and its own dbox/mdbox formats, and should also be pretty
|
||||||
|
- fast, extensible, and portable.
|
||||||
|
- .
|
||||||
|
- This package provides Solr full text search support for Dovecot.
|
||||||
|
-
|
||||||
|
-Package: dovecot-dbg
|
||||||
|
-Section: debug
|
||||||
|
-Priority: extra
|
||||||
|
-Architecture: any
|
||||||
|
-Depends: ${misc:Depends}, dovecot-core (= ${binary:Version})
|
||||||
|
-Description: secure POP3/IMAP server - debug symbols
|
||||||
|
- Dovecot is a mail server whose major goals are security and extreme
|
||||||
|
- reliability. It tries very hard to handle all error conditions and verify
|
||||||
|
- that all data is valid, making it nearly impossible to crash. It supports
|
||||||
|
- mbox/Maildir and its own dbox/mdbox formats, and should also be pretty
|
||||||
|
- fast, extensible, and portable.
|
||||||
|
- .
|
||||||
|
- This package contains debug symbols for Dovecot.
|
||||||
|
-
|
||||||
|
-Package: mail-stack-delivery
|
||||||
|
-Architecture: all
|
||||||
|
-Depends: dovecot-core, dovecot-imapd, dovecot-pop3d, dovecot-managesieved,
|
||||||
|
- postfix, ${misc:Depends}
|
||||||
|
-Replaces: dovecot-postfix (<< 1:1.2.12-0ubuntu1~)
|
||||||
|
-Description: mail server delivery agent stack provided by Ubuntu server team
|
||||||
|
- Ubuntu's mail stack provides fully operational delivery with
|
||||||
|
- safe defaults and additional options. Out of the box it supports IMAP,
|
||||||
|
- POP3 and SMTP services with SASL authentication and Maildir as default
|
||||||
|
- storage engine.
|
||||||
|
- .
|
||||||
|
- This package contains configuration files for dovecot.
|
||||||
|
- .
|
||||||
|
- This package modifies postfix's configuration to integrate with dovecot
|
||||||
|
+ This package provides Lucene full text search support for Dovecot. It has been modified by Mail-in-a-Box
|
||||||
|
+ to supply a dovecot-lucene package compatible with the official ubuntu trusty dovecot-core.
|
||||||
|
|
||||||
|
diff --git a/debian/dovecot-lucene.links b/debian/dovecot-lucene.links
|
||||||
|
new file mode 100644
|
||||||
|
index 0000000..6ffcbeb
|
||||||
|
--- /dev/null
|
||||||
|
+++ b/debian/dovecot-lucene.links
|
||||||
|
@@ -0,0 +1 @@
|
||||||
|
+/usr/share/bug/dovecot-core /usr/share/bug/dovecot-lucene
|
||||||
|
diff --git a/debian/dovecot-lucene.lintian-overrides b/debian/dovecot-lucene.lintian-overrides
|
||||||
|
new file mode 100644
|
||||||
|
index 0000000..60d90fd
|
||||||
|
--- /dev/null
|
||||||
|
+++ b/debian/dovecot-lucene.lintian-overrides
|
||||||
|
@@ -0,0 +1,2 @@
|
||||||
|
+dovecot-lucene: hardening-no-fortify-functions usr/lib/dovecot/modules/lib21_fts_lucene_plugin.so
|
||||||
|
+
|
||||||
|
diff --git a/debian/dovecot-lucene.substvars b/debian/dovecot-lucene.substvars
|
||||||
|
new file mode 100644
|
||||||
|
index 0000000..ed54f36
|
||||||
|
--- /dev/null
|
||||||
|
+++ b/debian/dovecot-lucene.substvars
|
||||||
|
@@ -0,0 +1,2 @@
|
||||||
|
+shlibs:Depends=libc6 (>= 2.4), libclucene-core1 (>= 2.3.3.4), libgcc1 (>= 1:4.1.1), libstdc++6 (>= 4.1.1), libstemmer0d (>= 0+svn527)
|
||||||
|
+misc:Depends=
|
||||||
|
diff --git a/debian/dovecot-lucene.triggers b/debian/dovecot-lucene.triggers
|
||||||
|
new file mode 100644
|
||||||
|
index 0000000..3d933a5
|
||||||
|
--- /dev/null
|
||||||
|
+++ b/debian/dovecot-lucene.triggers
|
||||||
|
@@ -0,0 +1 @@
|
||||||
|
+activate register-dovecot-plugin
|
||||||
|
--- a/debian/rules
|
||||||
|
+++ b/debian/rules
|
||||||
|
@@ -40,6 +40,7 @@
|
||||||
|
--with-solr \
|
||||||
|
--with-ioloop=best \
|
||||||
|
--with-libwrap \
|
||||||
|
+ --with-lucene \
|
||||||
|
--host=$(DEB_HOST_GNU_TYPE) \
|
||||||
|
--build=$(DEB_BUILD_GNU_TYPE) \
|
||||||
|
--prefix=/usr \
|
||||||
|
@@ -95,6 +96,10 @@
|
||||||
|
dh_testroot
|
||||||
|
dh_clean -k
|
||||||
|
dh_installdirs
|
||||||
|
+ mkdir -p $(CURDIR)/debian/dovecot-lucene/usr/lib/dovecot/modules
|
||||||
|
+ mv $(CURDIR)/src/plugins/fts-lucene/.libs/* $(CURDIR)/debian/dovecot-lucene/usr/lib/dovecot/modules/
|
||||||
|
+
|
||||||
|
+rest_disabled_by_miab:
|
||||||
|
$(MAKE) install DESTDIR=$(CURDIR)/debian/dovecot-core
|
||||||
|
$(MAKE) -C $(PIGEONHOLE_DIR) install DESTDIR=$(CURDIR)/debian/dovecot-core
|
||||||
|
rm `find $(CURDIR)/debian -name '*.la'`
|
||||||
|
@@ -209,7 +214,7 @@
|
||||||
|
dh_installdocs -a
|
||||||
|
dh_installexamples -a
|
||||||
|
dh_installpam -a
|
||||||
|
- mv $(CURDIR)/debian/dovecot-core/etc/pam.d/dovecot-core $(CURDIR)/debian/dovecot-core/etc/pam.d/dovecot
|
||||||
|
+ # mv $(CURDIR)/debian/dovecot-core/etc/pam.d/dovecot-core $(CURDIR)/debian/dovecot-core/etc/pam.d/dovecot
|
||||||
|
dh_systemd_enable
|
||||||
|
dh_installinit -pdovecot-core --name=dovecot
|
||||||
|
dh_systemd_start
|
||||||
|
@@ -220,10 +225,10 @@
|
||||||
|
dh_lintian -a
|
||||||
|
dh_installchangelogs -a ChangeLog
|
||||||
|
dh_link -a
|
||||||
|
- dh_strip -a --dbg-package=dovecot-dbg
|
||||||
|
+ #dh_strip -a --dbg-package=dovecot-dbg
|
||||||
|
dh_compress -a
|
||||||
|
dh_fixperms -a
|
||||||
|
- chmod 0700 debian/dovecot-core/etc/dovecot/private
|
||||||
|
+ #chmod 0700 debian/dovecot-core/etc/dovecot/private
|
||||||
|
dh_makeshlibs -a -n
|
||||||
|
dh_installdeb -a
|
||||||
|
dh_shlibdeps -a
|
||||||
|
--- a/debian/changelog
|
||||||
|
+++ a/debian/changelog
|
||||||
|
@@ -1,3 +1,9 @@
|
||||||
|
+dovecot (1:2.2.9-1ubuntu2.1+miab1) trusty; urgency=low
|
||||||
|
+
|
||||||
|
+ * Changed to just build dovecot-lucene for Mail-in-a-box PPA
|
||||||
|
+
|
||||||
|
+ -- Joshua Tauberer <jt@occams.info> Sat, 14 May 2015 16:13:00 -0400
|
||||||
|
+
|
||||||
|
dovecot (1:2.2.9-1ubuntu2.1) trusty-security; urgency=medium
|
||||||
|
|
||||||
|
* SECURITY UPDATE: denial of service via SSL connection exhaustion
|
||||||
|
--- a/debian/copyright 2014-03-07 07:26:37.000000000 -0500
|
||||||
|
+++ b/debian/copyright 2015-05-23 18:17:42.668005535 -0400
|
||||||
|
@@ -1,3 +1,7 @@
|
||||||
|
+This package is a fork by Mail-in-a-box (https://mailinabox.email). Original
|
||||||
|
+copyright statement follows:
|
||||||
|
+----------------------------------------------------------------------------
|
||||||
|
+
|
||||||
|
This package was debianized by Jaldhar H. Vyas <jaldhar@debian.org> on
|
||||||
|
Tue, 3 Dec 2002 01:10:07 -0500.
|
||||||
|
|
||||||
80
ppa/postgrey.diff
Normal file
80
ppa/postgrey.diff
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
diff --git a/debian/NEWS b/debian/NEWS
|
||||||
|
index dd09744..de7b640 100644
|
||||||
|
--- a/debian/NEWS
|
||||||
|
+++ b/debian/NEWS
|
||||||
|
@@ -1,3 +1,9 @@
|
||||||
|
+postgrey (1.35-1+miab1)
|
||||||
|
+
|
||||||
|
+ Added DNSWL.org whitelisting.
|
||||||
|
+
|
||||||
|
+ -- Joshua Tauberer <jt@occams.info> Mon May 18 18:58:40 EDT 2015
|
||||||
|
+
|
||||||
|
postgrey (1.32-1) unstable; urgency=low
|
||||||
|
|
||||||
|
Postgrey is now listening to port 10023 and not 60000. The latter was an
|
||||||
|
diff --git a/debian/changelog b/debian/changelog
|
||||||
|
index 1058e15..e5e3557 100644
|
||||||
|
--- a/debian/changelog
|
||||||
|
+++ b/debian/changelog
|
||||||
|
@@ -1,3 +1,9 @@
|
||||||
|
+postgrey (1.35-1+miab1) trusty; urgency=low
|
||||||
|
+
|
||||||
|
+ * Added DNSWL.org whitelisting.
|
||||||
|
+
|
||||||
|
+ -- Joshua Tauberer <jt@occams.info> Mon, 18 May 2015 21:58:40 +0000
|
||||||
|
+
|
||||||
|
postgrey (1.35-1) unstable; urgency=low
|
||||||
|
|
||||||
|
* New upstream release (Closes: 756486)
|
||||||
|
diff --git a/debian/control b/debian/control
|
||||||
|
index ce12ba6..0a82855 100644
|
||||||
|
--- a/debian/control
|
||||||
|
+++ b/debian/control
|
||||||
|
@@ -1,14 +1,11 @@
|
||||||
|
Source: postgrey
|
||||||
|
Section: mail
|
||||||
|
Priority: optional
|
||||||
|
-Maintainer: Antonio Radici <antonio@debian.org>
|
||||||
|
-Uploaders: Jon Daley <jondaley-guest@alioth.debian.org>
|
||||||
|
+Maintainer: Joshua Tauberer <jt@occams.info>
|
||||||
|
Build-Depends: debhelper (>= 7), quilt
|
||||||
|
Build-Depends-Indep: po-debconf
|
||||||
|
Standards-Version: 3.9.6
|
||||||
|
Homepage: http://postgrey.schweikert.ch/
|
||||||
|
-Vcs-Browser: http://git.debian.org/?p=collab-maint/postgrey.git
|
||||||
|
-Vcs-Git: git://git.debian.org/git/collab-maint/postgrey.git
|
||||||
|
|
||||||
|
Package: postgrey
|
||||||
|
Architecture: all
|
||||||
|
@@ -25,3 +22,6 @@ Description: greylisting implementation for Postfix
|
||||||
|
.
|
||||||
|
While Postgrey is designed for use with Postfix, it can also be used
|
||||||
|
with Exim.
|
||||||
|
+ .
|
||||||
|
+ This version has been modified by Mail-in-a-Box to whitelist senders
|
||||||
|
+ in the DNSWL.org list. See https://mailinabox.email.
|
||||||
|
diff --git a/debian/copyright b/debian/copyright
|
||||||
|
index 3cbe377..bf09b89 100644
|
||||||
|
--- a/debian/copyright
|
||||||
|
+++ b/debian/copyright
|
||||||
|
@@ -1,6 +1,10 @@
|
||||||
|
+This package is a fork by Mail-in-a-Box (https://mailinabox.email). Original
|
||||||
|
+copyright statement follows:
|
||||||
|
+----------------------------------------------------------------------------
|
||||||
|
+
|
||||||
|
This Debian package was prepared by Adrian von Bidder <cmot@debian.org> in
|
||||||
|
July 2004, then the package was adopted by Antonio Radici <antonio@dyne.org>
|
||||||
|
-in Sept 2009
|
||||||
|
+in Sept 2009.
|
||||||
|
|
||||||
|
It was downloaded from http://postgrey.schweikert.ch/
|
||||||
|
|
||||||
|
diff --git a/debian/patches/series b/debian/patches/series
|
||||||
|
index f4c5e31..3cd62b8 100644
|
||||||
|
--- a/debian/patches/series
|
||||||
|
+++ b/debian/patches/series
|
||||||
|
@@ -1,3 +1,3 @@
|
||||||
|
imported-upstream-diff
|
||||||
|
disable-transaction-logic
|
||||||
|
-
|
||||||
|
+mailinabox
|
||||||
100
ppa/postgrey_sources.diff
Normal file
100
ppa/postgrey_sources.diff
Normal file
@@ -0,0 +1,100 @@
|
|||||||
|
Description: whitelist whatever dnswl.org whitelists
|
||||||
|
.
|
||||||
|
postgrey (1.35-1+miab1) unstable; urgency=low
|
||||||
|
.
|
||||||
|
* Added DNSWL.org whitelisting.
|
||||||
|
Author: Joshua Tauberer <jt@occams.info>
|
||||||
|
|
||||||
|
--- postgrey-1.35.orig/README
|
||||||
|
+++ postgrey-1.35/README
|
||||||
|
@@ -13,7 +13,7 @@ Requirements
|
||||||
|
- BerkeleyDB (Perl Module)
|
||||||
|
- Berkeley DB >= 4.1 (Library)
|
||||||
|
- Digest::SHA (Perl Module, only for --privacy option)
|
||||||
|
-
|
||||||
|
+- Net::DNS (Perl Module)
|
||||||
|
|
||||||
|
Documentation
|
||||||
|
-------------
|
||||||
|
--- postgrey-1.35.orig/postgrey
|
||||||
|
+++ postgrey-1.35/postgrey
|
||||||
|
@@ -18,6 +18,7 @@ use Fcntl ':flock'; # import LOCK_* cons
|
||||||
|
use Sys::Hostname;
|
||||||
|
use Sys::Syslog; # used only to find out which version we use
|
||||||
|
use POSIX qw(strftime setlocale LC_ALL);
|
||||||
|
+use Net::DNS; # for DNSWL.org whitelisting
|
||||||
|
|
||||||
|
use vars qw(@ISA);
|
||||||
|
@ISA = qw(Net::Server::Multiplex);
|
||||||
|
@@ -26,6 +27,8 @@ my $VERSION = '1.35';
|
||||||
|
my $DEFAULT_DBDIR = '/var/lib/postgrey';
|
||||||
|
my $CONFIG_DIR = '/etc/postgrey';
|
||||||
|
|
||||||
|
+my $dns_resolver = Net::DNS::Resolver->new;
|
||||||
|
+
|
||||||
|
sub cidr_parse($)
|
||||||
|
{
|
||||||
|
defined $_[0] or return undef;
|
||||||
|
@@ -48,6 +51,36 @@ sub cidr_match($$$)
|
||||||
|
return ($addr & $mask) == $net;
|
||||||
|
}
|
||||||
|
|
||||||
|
+sub reverseDottedQuad {
|
||||||
|
+ # This is the sub _chkValidPublicIP from Net::DNSBL by PJ Goodwin
|
||||||
|
+ # at http://www.the42.net/net-dnsbl.
|
||||||
|
+ my ($quad) = @_;
|
||||||
|
+ if ($quad =~ /^(\d+)\.(\d+)\.(\d+)\.(\d+)$/) {
|
||||||
|
+ my ($ip1,$ip2,$ip3,$ip4) = ($1, $2, $3, $4);
|
||||||
|
+ if (
|
||||||
|
+ $ip1 == 10 || #10.0.0.0/8 (10/8)
|
||||||
|
+ ($ip1 == 172 && $ip2 >= 16 && $ip2 <= 31) || #172.16.0.0/12 (172.16/12)
|
||||||
|
+ ($ip1 == 192 && $ip2 == 168) || #192.168.0.0/16 (192.168/16)
|
||||||
|
+ $quad eq '127.0.0.1' # localhost
|
||||||
|
+ ) {
|
||||||
|
+ # toss the RFC1918 specified privates
|
||||||
|
+ return undef;
|
||||||
|
+ } elsif (
|
||||||
|
+ ($ip1 <= 1 || $ip1 > 254) ||
|
||||||
|
+ ($ip2 < 0 || $ip2 > 255) ||
|
||||||
|
+ ($ip3 < 0 || $ip3 > 255) ||
|
||||||
|
+ ($ip4 < 0 || $ip4 > 255)
|
||||||
|
+ ) {
|
||||||
|
+ #invalid oct, toss it;
|
||||||
|
+ return undef;
|
||||||
|
+ }
|
||||||
|
+ my $revquad = $ip4 . "." . $ip3 . "." . $ip2 . "." . $ip1;
|
||||||
|
+ return $revquad;
|
||||||
|
+ } else { # invalid quad
|
||||||
|
+ return undef;
|
||||||
|
+ }
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
sub read_clients_whitelists($)
|
||||||
|
{
|
||||||
|
my ($self) = @_;
|
||||||
|
@@ -361,6 +394,25 @@ sub smtpd_access_policy($$)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
+ # whitelist clients in dnswl.org
|
||||||
|
+ my $revip = reverseDottedQuad($attr->{client_address});
|
||||||
|
+ if ($revip) { # valid IP / plausibly in DNSWL
|
||||||
|
+ my $answer = $dns_resolver->send($revip . '.list.dnswl.org');
|
||||||
|
+ if ($answer && scalar($answer->answer) > 0) {
|
||||||
|
+ my @rrs = $answer->answer;
|
||||||
|
+ if ($rrs[0]->type eq 'A' && $rrs[0]->address ne '127.0.0.255') {
|
||||||
|
+ # Address appears in DNSWL. (127.0.0.255 means we were rate-limited.)
|
||||||
|
+ my $code = $rrs[0]->address;
|
||||||
|
+ if ($code =~ /^127.0.(\d+)\.([0-3])$/) {
|
||||||
|
+ my %dnswltrust = (0 => 'legitimate', 1 => 'occasional spam', 2 => 'rare spam', 3 => 'highly unlikely to send spam');
|
||||||
|
+ $code = $2 . '/' . $dnswltrust{$2};
|
||||||
|
+ }
|
||||||
|
+ $self->mylog_action($attr, 'pass', 'client whitelisted by dnswl.org (' . $code . ')');
|
||||||
|
+ return 'DUNNO';
|
||||||
|
+ }
|
||||||
|
+ }
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
# auto whitelist clients (see below for explanation)
|
||||||
|
my ($cawl_db, $cawl_key, $cawl_count, $cawl_last);
|
||||||
|
if($self->{postgrey}{awl_clients}) {
|
||||||
107
security.md
Normal file
107
security.md
Normal file
@@ -0,0 +1,107 @@
|
|||||||
|
Mail-in-a-Box Security Guide
|
||||||
|
============================
|
||||||
|
|
||||||
|
Mail-in-a-Box turns a fresh Ubuntu 14.04 LTS 64-bit machine into a mail server appliance by installing and configuring various components.
|
||||||
|
|
||||||
|
This page documents the security features of Mail-in-a-Box. The term “box” is used below to mean a configured Mail-in-a-Box.
|
||||||
|
|
||||||
|
Threat Model
|
||||||
|
------------
|
||||||
|
|
||||||
|
Nothing is perfectly secure, and an adversary with sufficient resources can always penetrate a system.
|
||||||
|
|
||||||
|
The primary goal of Mail-in-a-Box is to make deploying a good mail server easy, so we balance ― as everyone does ― privacy and security concerns with the practicality of actually deploying the system. That means we make certain assumptions about adversaries. We assume that adversaries . . .
|
||||||
|
|
||||||
|
* Do not have physical access to the box (i.e., we do not aim to protect the box from physical access).
|
||||||
|
* Have not been given Unix accounts on the box (i.e., we assume all users with shell access are trusted).
|
||||||
|
|
||||||
|
On the other hand, we do assume that adversaries are performing passive surveillance and, possibly, active man-in-the-middle attacks. And so:
|
||||||
|
|
||||||
|
* User credentials are always sent through SSH/TLS, never in the clear.
|
||||||
|
* Outbound mail is sent with the highest level of TLS possible (more on that below).
|
||||||
|
|
||||||
|
User Credentials
|
||||||
|
----------------
|
||||||
|
|
||||||
|
The box's administrator and its (non-administrative) mail users must sometimes communicate their credentials to the box.
|
||||||
|
|
||||||
|
### Services behind TLS
|
||||||
|
|
||||||
|
These services are protected by [TLS](https://en.wikipedia.org/wiki/Transport_Layer_Security):
|
||||||
|
|
||||||
|
* SMTP Submission (port 587). Mail users submit outbound mail through SMTP with STARTTLS on port 587.
|
||||||
|
* IMAP/POP (ports 993, 995). Mail users check for incoming mail through IMAP or POP over TLS.
|
||||||
|
* HTTPS (port 443). Webmail, the Exchange/ActiveSync protocol, the administrative control panel, and any static hosted websites are accessed over HTTPS.
|
||||||
|
|
||||||
|
The services all follow these rules:
|
||||||
|
|
||||||
|
* SSL certificates are generated with 2048-bit RSA keys and SHA-256 fingerprints. The box provides a self-signed certificate by default. The [setup guide](https://mailinabox.email/guide.html) explains how to verify the certificate fingerprint on first login. Users are encouraged to replace the certificate with a proper CA-signed one. ([source](setup/ssl.sh))
|
||||||
|
* Only TLSv1, TLSv1.1 and TLSv1.2 are offered (the older SSL protocols are not offered).
|
||||||
|
* Export-grade ciphers, the anonymous DH/ECDH algorithms (aNULL), and clear-text ciphers (eNULL) are not offered.
|
||||||
|
* The minimum cipher key length offered is 112 bits. The maximum is 256 bits. Diffie-Hellman ciphers use a 2048-bit key for forward secrecy.
|
||||||
|
|
||||||
|
Additionally:
|
||||||
|
|
||||||
|
* SMTP Submission (port 587) will not accept user credentials without STARTTLS (true also of SMTP on port 25 in case of client misconfiguration), and the submission port won't accept mail without encryption. The minimum cipher key length is 128 bits. (The box is of course configured not to be an open relay. User credentials are required to send outbound mail.) ([source](setup/mail-postfix.sh))
|
||||||
|
* HTTPS (port 443): The HTTPS Strict Transport Security header is set. A redirect from HTTP to HTTPS is offered. The [Qualys SSL Labs test](https://www.ssllabs.com/ssltest) should report an A+ grade. ([source 1](conf/nginx-ssl.conf), [source 2](conf/nginx.conf))
|
||||||
|
|
||||||
|
For more details, see the [output of SSLyze for these ports](tests/tls_results.txt).
|
||||||
|
|
||||||
|
The cipher and protocol selection are chosen to support the following clients:
|
||||||
|
|
||||||
|
* For HTTPS: Firefox 1, Chrome 1, IE 7, Opera 5, Safari 1, Windows XP IE8, Android 2.3, Java 7.
|
||||||
|
* For other protocols: TBD.
|
||||||
|
|
||||||
|
### Password Storage
|
||||||
|
|
||||||
|
The passwords for mail users are stored on disk using the [SHA512-CRYPT](http://man7.org/linux/man-pages/man3/crypt.3.html) hashing scheme. ([source](management/mailconfig.py))
|
||||||
|
|
||||||
|
When using the web-based administrative control panel, after logging in an API key is placed in the browser's local storage (rather than, say, the user's actual password). The API key is an HMAC based on the user's email address and current password, and it is keyed by a secret known only to the control panel service. By resetting an administrator's password, any HMACs previously generated for that user will expire.
|
||||||
|
|
||||||
|
### Console access
|
||||||
|
|
||||||
|
Console access (e.g. via SSH) is configured by the system image used to create the box, typically from by a cloud virtual machine provider (e.g. Digital Ocean). Mail-in-a-Box does not set any console access settings, although it will warn the administrator in the System Status Checks if password-based login is turned on.
|
||||||
|
|
||||||
|
The [setup guide video](https://mailinabox.email/) explains how to verify the host key fingerprint on first login.
|
||||||
|
|
||||||
|
If DNSSEC is enabled at the box's domain name's registrar, the SSHFP record that the box automatically puts into DNS can also be used to verify the host key fingerprint by setting `VerifyHostKeyDNS yes` in your `ssh/.config` file or by logging in with `ssh -o VerifyHostKeyDNS=yes`. ([source](management/dns_update.py))
|
||||||
|
|
||||||
|
Outbound Mail
|
||||||
|
-------------
|
||||||
|
|
||||||
|
The basic protocols of email delivery did not plan for the presence of adversaries on the network. For a number of reasons it is not possible in most cases to guarantee that a connection to a recipient server is secure.
|
||||||
|
|
||||||
|
### DNSSEC
|
||||||
|
|
||||||
|
The first step in resolving the destination server for an email address is performing a DNS look-up for the MX record of the domain name. The box uses a locally-running [DNSSEC](https://en.wikipedia.org/wiki/DNSSEC)-aware nameserver to perform the lookup. If the domain name has DNSSEC enabled, DNSSEC guards against DNS records being tampered with.
|
||||||
|
|
||||||
|
### Encryption
|
||||||
|
|
||||||
|
The box (along with the vast majority of mail servers) uses [opportunistic encryption](https://en.wikipedia.org/wiki/Opportunistic_encryption), meaning the mail is encrypted in transit and protected from passive eavesdropping, but it is not protected from an active man-in-the-middle attack. Modern encryption settings will be used to the extent the recipient server supports them. ([source](setup/mail-postfix.sh))
|
||||||
|
|
||||||
|
### DANE
|
||||||
|
|
||||||
|
If the recipient's domain name supports DNSSEC and has published a [DANE TLSA](https://en.wikipedia.org/wiki/DNS-based_Authentication_of_Named_Entities) record, then on-the-wire encryption is forced between the box and the recipient MTA and this encryption is not subject to a man-in-the-middle attack. The TLSA record contains a certificate fingerprint which the receiving MTA (server) must present to the box. ([source](setup/mail-postfix.sh))
|
||||||
|
|
||||||
|
### Domain Policy Records
|
||||||
|
|
||||||
|
Domain policy records allow recipient MTAs to detect when the _domain_ part of of the sender address in incoming mail has been spoofed. All outbound mail is signed with [DKIM](https://en.wikipedia.org/wiki/DomainKeys_Identified_Mail) and "quarantine" [DMARC](https://en.wikipedia.org/wiki/DMARC) records are automatically set in DNS. Receiving MTAs that implement DMARC will automatically quarantine mail that is "From:" a domain hosted by the box but which was not sent by the box. (Strong [SPF](https://en.wikipedia.org/wiki/Sender_Policy_Framework) records are also automatically set in DNS.) ([source](management/dns_update.py))
|
||||||
|
|
||||||
|
### User Policy
|
||||||
|
|
||||||
|
While domain policy records prevent other servers from sending mail with a "From:" header that matches a domain hosted on the box (see above), those policy records do not guarnatee that the user portion of the sender email address matches the actual sender. In enterprise environments where the box may host the mail of untrusted users, it is important to guard against users impersonating other users. The box restricts the envelope sender address that users may put into outbound mail to either a) their own email address (their SMTP login username) or b) any alias that they are listed as a direct recipient of. Note that the envelope sender address is not the same as the "From:" header.
|
||||||
|
|
||||||
|
Incoming Mail
|
||||||
|
-------------
|
||||||
|
|
||||||
|
### Encryption
|
||||||
|
|
||||||
|
As discussed above, there is no way to require on-the-wire encryption of mail. When the box receives an incoming email (SMTP on port 25), it offers encryption (STARTTLS) but cannot require that senders use it because some senders may not support STARTTLS at all and other senders may support STARTTLS but not with the latest protocols/ciphers. To give senders the best chance at making use of encryption, the box offers protocols back to SSLv3 and ciphers with key lengths as low as 112 bits. Modern clients (senders) will make use of the 256-bit ciphers and Diffie-Hellman ciphers with a 2048-bit key for forward secrecy, however. ([source](setup/mail-postfix.sh))
|
||||||
|
|
||||||
|
### DANE
|
||||||
|
|
||||||
|
When DNSSEC is enabled at the box's domain name's registrar, [DANE TLSA](https://en.wikipedia.org/wiki/DNS-based_Authentication_of_Named_Entities) records are automatically published in DNS. Senders supporting DANE will enforce encryption on-the-wire between them and the box --- see the section on DANE for outgoing mail above. ([source](management/dns_update.py))
|
||||||
|
|
||||||
|
### Filters
|
||||||
|
|
||||||
|
Incoming mail is run through several filters. Email is bounced if the sender's IP address is listed in the [Spamhaus Zen blacklist](http://www.spamhaus.org/zen/) or if the sender's domain is listed in the [Spamhaus Domain Block List](http://www.spamhaus.org/dbl/). Greylisting (with [postgrey](http://postgrey.schweikert.ch/)) is also used to cut down on spam. ([source](setup/mail-postfix.sh))
|
||||||
@@ -7,7 +7,7 @@
|
|||||||
#########################################################
|
#########################################################
|
||||||
|
|
||||||
if [ -z "$TAG" ]; then
|
if [ -z "$TAG" ]; then
|
||||||
TAG=v0.06
|
TAG=v0.11
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Are we running as root?
|
# Are we running as root?
|
||||||
@@ -18,9 +18,12 @@ fi
|
|||||||
|
|
||||||
# Clone the Mail-in-a-Box repository if it doesn't exist.
|
# Clone the Mail-in-a-Box repository if it doesn't exist.
|
||||||
if [ ! -d $HOME/mailinabox ]; then
|
if [ ! -d $HOME/mailinabox ]; then
|
||||||
echo Installing git . . .
|
if [ ! -f /usr/bin/git ]; then
|
||||||
DEBIAN_FRONTEND=noninteractive apt-get -q -q install -y git < /dev/null
|
echo Installing git . . .
|
||||||
echo
|
apt-get -q -q update
|
||||||
|
DEBIAN_FRONTEND=noninteractive apt-get -q -q install -y git < /dev/null
|
||||||
|
echo
|
||||||
|
fi
|
||||||
|
|
||||||
echo Downloading Mail-in-a-Box $TAG. . .
|
echo Downloading Mail-in-a-Box $TAG. . .
|
||||||
git clone \
|
git clone \
|
||||||
|
|||||||
41
setup/dkim.sh
Normal file → Executable file
41
setup/dkim.sh
Normal file → Executable file
@@ -10,7 +10,7 @@ source setup/functions.sh # load our functions
|
|||||||
source /etc/mailinabox.conf # load global vars
|
source /etc/mailinabox.conf # load global vars
|
||||||
|
|
||||||
# Install DKIM...
|
# Install DKIM...
|
||||||
apt_install opendkim opendkim-tools
|
apt_install opendkim opendkim-tools opendmarc
|
||||||
|
|
||||||
# Make sure configuration directories exist.
|
# Make sure configuration directories exist.
|
||||||
mkdir -p /etc/opendkim;
|
mkdir -p /etc/opendkim;
|
||||||
@@ -35,28 +35,47 @@ RequireSafeKeys false
|
|||||||
EOF
|
EOF
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Create a new DKIM key. This creates
|
# Create a new DKIM key. This creates mail.private and mail.txt
|
||||||
# mail.private and mail.txt in $STORAGE_ROOT/mail/dkim. The former
|
# in $STORAGE_ROOT/mail/dkim. The former is the private key and
|
||||||
# is the actual private key and the latter is the suggested DNS TXT
|
# the latter is the suggested DNS TXT entry which we'll include
|
||||||
# entry which we'll want to include in our DNS setup.
|
# in our DNS setup. Note tha the files are named after the
|
||||||
|
# 'selector' of the key, which we can change later on to support
|
||||||
|
# key rotation.
|
||||||
|
#
|
||||||
|
# A 1024-bit key is seen as a minimum standard by several providers
|
||||||
|
# such as Google. But they and others use a 2048 bit key, so we'll
|
||||||
|
# do the same. Keys beyond 2048 bits may exceed DNS record limits.
|
||||||
if [ ! -f "$STORAGE_ROOT/mail/dkim/mail.private" ]; then
|
if [ ! -f "$STORAGE_ROOT/mail/dkim/mail.private" ]; then
|
||||||
# Should we specify -h rsa-sha256?
|
opendkim-genkey -b 2048 -r -s mail -D $STORAGE_ROOT/mail/dkim
|
||||||
opendkim-genkey -r -s mail -D $STORAGE_ROOT/mail/dkim
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Ensure files are owned by the opendkim user and are private otherwise.
|
# Ensure files are owned by the opendkim user and are private otherwise.
|
||||||
chown -R opendkim:opendkim $STORAGE_ROOT/mail/dkim
|
chown -R opendkim:opendkim $STORAGE_ROOT/mail/dkim
|
||||||
chmod go-rwx $STORAGE_ROOT/mail/dkim
|
chmod go-rwx $STORAGE_ROOT/mail/dkim
|
||||||
|
|
||||||
# Add OpenDKIM as a milter to postfix, which is how it intercepts outgoing
|
tools/editconf.py /etc/opendmarc.conf -s \
|
||||||
# mail to perform the signing (by adding a mail header).
|
"Syslog=true" \
|
||||||
# Be careful. If we add other milters later, it needs to be concatenated on the smtpd_milters line. #NODOC
|
"Socket=inet:8893@[127.0.0.1]"
|
||||||
|
|
||||||
|
# Add OpenDKIM and OpenDMARC as milters to postfix, which is how OpenDKIM
|
||||||
|
# intercepts outgoing mail to perform the signing (by adding a mail header)
|
||||||
|
# and how they both intercept incoming mail to add Authentication-Results
|
||||||
|
# headers. The order possibly/probably matters: OpenDMARC relies on the
|
||||||
|
# OpenDKIM Authentication-Results header already being present.
|
||||||
|
#
|
||||||
|
# Be careful. If we add other milters later, this needs to be concatenated
|
||||||
|
# on the smtpd_milters line.
|
||||||
|
#
|
||||||
|
# The OpenDMARC milter is skipped in the SMTP submission listener by
|
||||||
|
# configuring smtpd_milters there to only list the OpenDKIM milter
|
||||||
|
# (see mail-postfix.sh).
|
||||||
tools/editconf.py /etc/postfix/main.cf \
|
tools/editconf.py /etc/postfix/main.cf \
|
||||||
smtpd_milters=inet:127.0.0.1:8891 \
|
"smtpd_milters=inet:127.0.0.1:8891 inet:127.0.0.1:8893"\
|
||||||
non_smtpd_milters=\$smtpd_milters \
|
non_smtpd_milters=\$smtpd_milters \
|
||||||
milter_default_action=accept
|
milter_default_action=accept
|
||||||
|
|
||||||
# Restart services.
|
# Restart services.
|
||||||
restart_service opendkim
|
restart_service opendkim
|
||||||
|
restart_service opendmarc
|
||||||
restart_service postfix
|
restart_service postfix
|
||||||
|
|
||||||
|
|||||||
43
setup/dns.sh
43
setup/dns.sh
@@ -10,19 +10,7 @@
|
|||||||
source setup/functions.sh # load our functions
|
source setup/functions.sh # load our functions
|
||||||
source /etc/mailinabox.conf # load global vars
|
source /etc/mailinabox.conf # load global vars
|
||||||
|
|
||||||
# Install `nsd`, our DNS server software, and `ldnsutils` which helps
|
# Install the packages.
|
||||||
# us sign zones for DNSSEC.
|
|
||||||
|
|
||||||
# ...but first, we have to create the user because the
|
|
||||||
# current Ubuntu forgets to do so in the .deb
|
|
||||||
# (see issue #25 and https://bugs.launchpad.net/ubuntu/+source/nsd/+bug/1311886)
|
|
||||||
if id nsd > /dev/null 2>&1; then
|
|
||||||
true #echo "nsd user exists... good"; #NODOC
|
|
||||||
else
|
|
||||||
useradd nsd;
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Okay now install the packages.
|
|
||||||
#
|
#
|
||||||
# * nsd: The non-recursive nameserver that publishes our DNS records.
|
# * nsd: The non-recursive nameserver that publishes our DNS records.
|
||||||
# * ldnsutils: Helper utilities for signing DNSSEC zones.
|
# * ldnsutils: Helper utilities for signing DNSSEC zones.
|
||||||
@@ -34,6 +22,35 @@ apt_install nsd ldnsutils openssh-client
|
|||||||
|
|
||||||
mkdir -p /var/run/nsd
|
mkdir -p /var/run/nsd
|
||||||
|
|
||||||
|
cat > /etc/nsd/nsd.conf << EOF;
|
||||||
|
# No not edit. Overwritten by Mail-in-a-Box setup.
|
||||||
|
server:
|
||||||
|
hide-version: yes
|
||||||
|
|
||||||
|
# identify the server (CH TXT ID.SERVER entry).
|
||||||
|
identity: ""
|
||||||
|
|
||||||
|
# The directory for zonefile: files.
|
||||||
|
zonesdir: "/etc/nsd/zones"
|
||||||
|
|
||||||
|
# Allows NSD to bind to IP addresses that are not (yet) added to the
|
||||||
|
# network interface. This allows nsd to start even if the network stack
|
||||||
|
# isn't fully ready, which apparently happens in some cases.
|
||||||
|
# See https://www.nlnetlabs.nl/projects/nsd/nsd.conf.5.html.
|
||||||
|
ip-transparent: yes
|
||||||
|
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Since we have bind9 listening on localhost for locally-generated
|
||||||
|
# DNS queries that require a recursive nameserver, and the system
|
||||||
|
# might have other network interfaces for e.g. tunnelling, we have
|
||||||
|
# to be specific about the network interfaces that nsd binds to.
|
||||||
|
for ip in $PRIVATE_IP $PRIVATE_IPV6; do
|
||||||
|
echo " ip-address: $ip" >> /etc/nsd/nsd.conf;
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "include: /etc/nsd/zones.conf" >> /etc/nsd/nsd.conf;
|
||||||
|
|
||||||
# Create DNSSEC signing keys.
|
# Create DNSSEC signing keys.
|
||||||
|
|
||||||
mkdir -p "$STORAGE_ROOT/dns/dnssec";
|
mkdir -p "$STORAGE_ROOT/dns/dnssec";
|
||||||
|
|||||||
@@ -9,19 +9,35 @@ function hide_output {
|
|||||||
$@ &> $OUTPUT
|
$@ &> $OUTPUT
|
||||||
|
|
||||||
# If the command failed, show the output that was captured in the temporary file.
|
# If the command failed, show the output that was captured in the temporary file.
|
||||||
if [ $? != 0 ]; then
|
E=$?
|
||||||
|
if [ $E != 0 ]; then
|
||||||
# Something failed.
|
# Something failed.
|
||||||
echo
|
echo
|
||||||
echo FAILED: $@
|
echo FAILED: $@
|
||||||
echo -----------------------------------------
|
echo -----------------------------------------
|
||||||
cat $OUTPUT
|
cat $OUTPUT
|
||||||
echo -----------------------------------------
|
echo -----------------------------------------
|
||||||
|
exit $E
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Remove temporary file.
|
# Remove temporary file.
|
||||||
rm -f $OUTPUT
|
rm -f $OUTPUT
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function apt_get_quiet {
|
||||||
|
# Run apt-get in a totally non-interactive mode.
|
||||||
|
#
|
||||||
|
# Somehow all of these options are needed to get it to not ask the user
|
||||||
|
# questions about a) whether to proceed (-y), b) package options (noninteractive),
|
||||||
|
# and c) what to do about files changed locally (we don't cause that to happen but
|
||||||
|
# some VM providers muck with their images; -o).
|
||||||
|
#
|
||||||
|
# Although we could pass -qq to apt-get to make output quieter, many packages write to stdout
|
||||||
|
# and stderr things that aren't really important. Use our hide_output function to capture
|
||||||
|
# all of that and only show it if there is a problem (i.e. if apt_get returns a failure exit status).
|
||||||
|
DEBIAN_FRONTEND=noninteractive hide_output apt-get -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confnew" "$@"
|
||||||
|
}
|
||||||
|
|
||||||
function apt_install {
|
function apt_install {
|
||||||
# Report any packages already installed.
|
# Report any packages already installed.
|
||||||
PACKAGES=$@
|
PACKAGES=$@
|
||||||
@@ -46,18 +62,10 @@ function apt_install {
|
|||||||
echo installing $TO_INSTALL...
|
echo installing $TO_INSTALL...
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# 'DEBIAN_FRONTEND=noninteractive' is to prevent dbconfig-common from asking you questions.
|
# We still include the whole original package list in the apt-get command in
|
||||||
#
|
|
||||||
# Although we could pass -qq to apt-get to make output quieter, many packages write to stdout
|
|
||||||
# and stderr things that aren't really important. Use our hide_output function to capture
|
|
||||||
# all of that and only show it if there is a problem (i.e. if apt_get returns a failure exit status).
|
|
||||||
#
|
|
||||||
# Also note that we still include the whole original package list in the apt-get command in
|
|
||||||
# case it wants to upgrade anything, I guess? Maybe we can remove it. Doesn't normally make
|
# case it wants to upgrade anything, I guess? Maybe we can remove it. Doesn't normally make
|
||||||
# a difference.
|
# a difference.
|
||||||
DEBIAN_FRONTEND=noninteractive \
|
apt_get_quiet install $PACKAGES
|
||||||
hide_output \
|
|
||||||
apt-get -y install $PACKAGES
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function get_default_hostname {
|
function get_default_hostname {
|
||||||
@@ -173,3 +181,43 @@ function input_menu {
|
|||||||
result=$(dialog --stdout --title "$1" --menu "$2" 0 0 0 $3)
|
result=$(dialog --stdout --title "$1" --menu "$2" 0 0 0 $3)
|
||||||
result_code=$?
|
result_code=$?
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function wget_verify {
|
||||||
|
# Downloads a file from the web and checks that it matches
|
||||||
|
# a provided hash. If the comparison fails, exit immediately.
|
||||||
|
URL=$1
|
||||||
|
HASH=$2
|
||||||
|
DEST=$3
|
||||||
|
CHECKSUM="$HASH $DEST"
|
||||||
|
rm -f $DEST
|
||||||
|
wget -q -O $DEST $URL || exit 1
|
||||||
|
if ! echo "$CHECKSUM" | sha1sum --check --strict > /dev/null; then
|
||||||
|
echo "------------------------------------------------------------"
|
||||||
|
echo "Download of $URL did not match expected checksum."
|
||||||
|
echo "Found:"
|
||||||
|
sha1sum $DEST
|
||||||
|
echo
|
||||||
|
echo "Expected:"
|
||||||
|
echo "$CHECKSUM"
|
||||||
|
rm -f $DEST
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function git_clone {
|
||||||
|
# Clones a git repository, checks out a particular commit or tag,
|
||||||
|
# and moves the repository (or a subdirectory in it) to some path.
|
||||||
|
# We use separate clone and checkout because -b only supports tags
|
||||||
|
# and branches, but we sometimes want to reference a commit hash
|
||||||
|
# directly when the repo doesn't provide a tag.
|
||||||
|
REPO=$1
|
||||||
|
TREEISH=$2
|
||||||
|
SUBDIR=$3
|
||||||
|
TARGETPATH=$4
|
||||||
|
TMPPATH=/tmp/git-clone-$$
|
||||||
|
rm -rf $TMPPATH $TARGETPATH
|
||||||
|
git clone -q $REPO $TMPPATH || exit 1
|
||||||
|
(cd $TMPPATH; git checkout -q $TREEISH;) || exit 1
|
||||||
|
mv $TMPPATH/$SUBDIR $TARGETPATH
|
||||||
|
rm -rf $TMPPATH
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
#
|
#
|
||||||
# Dovecot (IMAP and LDA)
|
# Dovecot (IMAP/POP and LDA)
|
||||||
# ----------------------
|
# ----------------------
|
||||||
#
|
#
|
||||||
# Dovecot is *both* the IMAP server (the protocol that email applications
|
# Dovecot is *both* the IMAP/POP server (the protocol that email applications
|
||||||
# use to query a mailbox) as well as the local delivery agent (LDA),
|
# use to query a mailbox) as well as the local delivery agent (LDA),
|
||||||
# meaning it is responsible for writing emails to mailbox storage on disk.
|
# meaning it is responsible for writing emails to mailbox storage on disk.
|
||||||
# You could imagine why these things would be bundled together.
|
# You could imagine why these things would be bundled together.
|
||||||
@@ -18,13 +18,17 @@
|
|||||||
source setup/functions.sh # load our functions
|
source setup/functions.sh # load our functions
|
||||||
source /etc/mailinabox.conf # load global vars
|
source /etc/mailinabox.conf # load global vars
|
||||||
|
|
||||||
# Install packages...
|
|
||||||
|
# Install packages for dovecot. These are all core dovecot plugins,
|
||||||
|
# but dovecot-lucene is packaged by *us* in the Mail-in-a-Box PPA,
|
||||||
|
# not by Ubuntu.
|
||||||
|
|
||||||
apt_install \
|
apt_install \
|
||||||
dovecot-core dovecot-imapd dovecot-lmtpd dovecot-sqlite sqlite3 \
|
dovecot-core dovecot-imapd dovecot-pop3d dovecot-lmtpd dovecot-sqlite sqlite3 \
|
||||||
dovecot-sieve dovecot-managesieved
|
dovecot-sieve dovecot-managesieved dovecot-lucene
|
||||||
|
|
||||||
# The `dovecot-imapd` and `dovecot-lmtpd` packages automatically enable IMAP and LMTP protocols.
|
# The `dovecot-imapd`, `dovecot-pop3d`, and `dovecot-lmtpd` packages automatically
|
||||||
|
# enable IMAP, POP and LMTP protocols.
|
||||||
|
|
||||||
# Set basic daemon options.
|
# Set basic daemon options.
|
||||||
|
|
||||||
@@ -51,7 +55,7 @@ tools/editconf.py /etc/dovecot/conf.d/10-mail.conf \
|
|||||||
mail_privileged_group=mail \
|
mail_privileged_group=mail \
|
||||||
first_valid_uid=0
|
first_valid_uid=0
|
||||||
|
|
||||||
# ### IMAP
|
# ### IMAP/POP
|
||||||
|
|
||||||
# Require that passwords are sent over SSL only, and allow the usual IMAP authentication mechanisms.
|
# Require that passwords are sent over SSL only, and allow the usual IMAP authentication mechanisms.
|
||||||
# The LOGIN mechanism is supposedly for Microsoft products like Outlook to do SMTP login (I guess
|
# The LOGIN mechanism is supposedly for Microsoft products like Outlook to do SMTP login (I guess
|
||||||
@@ -69,9 +73,9 @@ tools/editconf.py /etc/dovecot/conf.d/10-ssl.conf \
|
|||||||
"ssl_protocols=!SSLv3 !SSLv2" \
|
"ssl_protocols=!SSLv3 !SSLv2" \
|
||||||
"ssl_cipher_list=TLSv1+HIGH !SSLv2 !RC4 !aNULL !eNULL !3DES @STRENGTH"
|
"ssl_cipher_list=TLSv1+HIGH !SSLv2 !RC4 !aNULL !eNULL !3DES @STRENGTH"
|
||||||
|
|
||||||
# Disable in-the-clear IMAP because there is no reason for a user to transmit
|
# Disable in-the-clear IMAP/POP because there is no reason for a user to transmit
|
||||||
# login credentials outside of an encrypted connection. Although we haven't
|
# login credentials outside of an encrypted connection. Only the over-TLS versions
|
||||||
# even installed the POP server, ensure it is disabled too.
|
# are made available (IMAPS on port 993; POP3S on port 995).
|
||||||
sed -i "s/#port = 143/port = 0/" /etc/dovecot/conf.d/10-master.conf
|
sed -i "s/#port = 143/port = 0/" /etc/dovecot/conf.d/10-master.conf
|
||||||
sed -i "s/#port = 110/port = 0/" /etc/dovecot/conf.d/10-master.conf
|
sed -i "s/#port = 110/port = 0/" /etc/dovecot/conf.d/10-master.conf
|
||||||
|
|
||||||
@@ -84,6 +88,24 @@ sed -i "s/#port = 110/port = 0/" /etc/dovecot/conf.d/10-master.conf
|
|||||||
tools/editconf.py /etc/dovecot/conf.d/20-imap.conf \
|
tools/editconf.py /etc/dovecot/conf.d/20-imap.conf \
|
||||||
imap_idle_notify_interval="4 mins"
|
imap_idle_notify_interval="4 mins"
|
||||||
|
|
||||||
|
# Set POP3 UIDL
|
||||||
|
# UIDLs are used by POP3 clients to keep track of what messages they've downloaded.
|
||||||
|
# For new POP3 servers, the easiest way to set up UIDLs is to use IMAP's UIDVALIDITY
|
||||||
|
# and UID values, the default in Dovecot.
|
||||||
|
tools/editconf.py /etc/dovecot/conf.d/20-pop3.conf \
|
||||||
|
pop3_uidl_format="%08Xu%08Xv"
|
||||||
|
|
||||||
|
# Full Text Search - Enable full text search of mail using dovecot's lucene plugin,
|
||||||
|
# which *we* package and distribute (dovecot-lucene package).
|
||||||
|
tools/editconf.py /etc/dovecot/conf.d/10-mail.conf \
|
||||||
|
mail_plugins="\$mail_plugins fts fts_lucene"
|
||||||
|
cat > /etc/dovecot/conf.d/90-plugin-fts.conf << EOF;
|
||||||
|
plugin {
|
||||||
|
fts = lucene
|
||||||
|
fts_lucene = whitespace_chars=@.
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
|
||||||
# ### LDA (LMTP)
|
# ### LDA (LMTP)
|
||||||
|
|
||||||
# Enable Dovecot's LDA service with the LMTP protocol. It will listen
|
# Enable Dovecot's LDA service with the LMTP protocol. It will listen
|
||||||
@@ -162,8 +184,9 @@ chown -R mail.mail $STORAGE_ROOT/mail/mailboxes
|
|||||||
mkdir -p $STORAGE_ROOT/mail/sieve
|
mkdir -p $STORAGE_ROOT/mail/sieve
|
||||||
chown -R mail.mail $STORAGE_ROOT/mail/sieve
|
chown -R mail.mail $STORAGE_ROOT/mail/sieve
|
||||||
|
|
||||||
# Allow the IMAP port in the firewall.
|
# Allow the IMAP/POP ports in the firewall.
|
||||||
ufw_allow imaps
|
ufw_allow imaps
|
||||||
|
ufw_allow pop3s
|
||||||
|
|
||||||
# Restart services.
|
# Restart services.
|
||||||
restart_service dovecot
|
restart_service dovecot
|
||||||
|
|||||||
@@ -41,6 +41,13 @@ source /etc/mailinabox.conf # load global vars
|
|||||||
# always will.
|
# always will.
|
||||||
# * `ca-certificates`: A trust store used to squelch postfix warnings about
|
# * `ca-certificates`: A trust store used to squelch postfix warnings about
|
||||||
# untrusted opportunistically-encrypted connections.
|
# untrusted opportunistically-encrypted connections.
|
||||||
|
#
|
||||||
|
# postgrey is going to come in via the Mail-in-a-Box PPA, which publishes
|
||||||
|
# a modified version of postgrey that lets senders whitelisted by dnswl.org
|
||||||
|
# pass through without being greylisted. So please note [dnswl's license terms](https://www.dnswl.org/?page_id=9):
|
||||||
|
# > Every user with more than 100’000 queries per day on the public nameserver
|
||||||
|
# > infrastructure and every commercial vendor of dnswl.org data (eg through
|
||||||
|
# > anti-spam solutions) must register with dnswl.org and purchase a subscription.
|
||||||
|
|
||||||
apt_install postfix postfix-pcre postgrey ca-certificates
|
apt_install postfix postfix-pcre postgrey ca-certificates
|
||||||
|
|
||||||
@@ -62,6 +69,11 @@ tools/editconf.py /etc/postfix/main.cf \
|
|||||||
|
|
||||||
# Enable the 'submission' port 587 smtpd server and tweak its settings.
|
# Enable the 'submission' port 587 smtpd server and tweak its settings.
|
||||||
#
|
#
|
||||||
|
# * Do not add the OpenDMAC Authentication-Results header. That should only be added
|
||||||
|
# on incoming mail. Omit the OpenDMARC milter by re-setting smtpd_milters to the
|
||||||
|
# OpenDKIM milter only. See dkim.sh.
|
||||||
|
# * Even though we dont allow auth over non-TLS connections (smtpd_tls_auth_only below, and without auth the client cant
|
||||||
|
# send outbound mail), don't allow non-TLS mail submission on this port anyway to prevent accidental misconfiguration.
|
||||||
# * Require the best ciphers for incoming connections per http://baldric.net/2013/12/07/tls-ciphers-in-postfix-and-dovecot/.
|
# * Require the best ciphers for incoming connections per http://baldric.net/2013/12/07/tls-ciphers-in-postfix-and-dovecot/.
|
||||||
# By putting this setting here we leave opportunistic TLS on incoming mail at default cipher settings (any cipher is better than none).
|
# By putting this setting here we leave opportunistic TLS on incoming mail at default cipher settings (any cipher is better than none).
|
||||||
# * Give it a different name in syslog to distinguish it from the port 25 smtpd server.
|
# * Give it a different name in syslog to distinguish it from the port 25 smtpd server.
|
||||||
@@ -71,7 +83,9 @@ tools/editconf.py /etc/postfix/main.cf \
|
|||||||
tools/editconf.py /etc/postfix/master.cf -s -w \
|
tools/editconf.py /etc/postfix/master.cf -s -w \
|
||||||
"submission=inet n - - - - smtpd
|
"submission=inet n - - - - smtpd
|
||||||
-o syslog_name=postfix/submission
|
-o syslog_name=postfix/submission
|
||||||
-o smtpd_tls_ciphers=high -o smtpd_tls_protocols=!SSLv2,!SSLv3
|
-o smtpd_milters=inet:127.0.0.1:8891
|
||||||
|
-o smtpd_tls_security_level=encrypt
|
||||||
|
-o smtpd_tls_ciphers=high -o smtpd_tls_exclude_ciphers=aNULL,DES,3DES,MD5,DES+MD5,RC4 -o smtpd_tls_mandatory_protocols=!SSLv2,!SSLv3
|
||||||
-o cleanup_service_name=authclean" \
|
-o cleanup_service_name=authclean" \
|
||||||
"authclean=unix n - - - 0 cleanup
|
"authclean=unix n - - - 0 cleanup
|
||||||
-o header_checks=pcre:/etc/postfix/outgoing_mail_header_filters"
|
-o header_checks=pcre:/etc/postfix/outgoing_mail_header_filters"
|
||||||
@@ -90,6 +104,8 @@ tools/editconf.py /etc/postfix/main.cf \
|
|||||||
smtpd_tls_cert_file=$STORAGE_ROOT/ssl/ssl_certificate.pem \
|
smtpd_tls_cert_file=$STORAGE_ROOT/ssl/ssl_certificate.pem \
|
||||||
smtpd_tls_key_file=$STORAGE_ROOT/ssl/ssl_private_key.pem \
|
smtpd_tls_key_file=$STORAGE_ROOT/ssl/ssl_private_key.pem \
|
||||||
smtpd_tls_dh1024_param_file=$STORAGE_ROOT/ssl/dh2048.pem \
|
smtpd_tls_dh1024_param_file=$STORAGE_ROOT/ssl/dh2048.pem \
|
||||||
|
smtpd_tls_ciphers=medium \
|
||||||
|
smtpd_tls_exclude_ciphers=aNULL \
|
||||||
smtpd_tls_received_header=yes
|
smtpd_tls_received_header=yes
|
||||||
|
|
||||||
# Prevent non-authenticated users from sending mail that requires being
|
# Prevent non-authenticated users from sending mail that requires being
|
||||||
@@ -144,6 +160,7 @@ tools/editconf.py /etc/postfix/main.cf virtual_transport=lmtp:[127.0.0.1]:10025
|
|||||||
#
|
#
|
||||||
# * `reject_non_fqdn_sender`: Reject not-nice-looking return paths.
|
# * `reject_non_fqdn_sender`: Reject not-nice-looking return paths.
|
||||||
# * `reject_unknown_sender_domain`: Reject return paths with invalid domains.
|
# * `reject_unknown_sender_domain`: Reject return paths with invalid domains.
|
||||||
|
# * `reject_authenticated_sender_login_mismatch`: Reject if mail FROM address does not match the client SASL login
|
||||||
# * `reject_rhsbl_sender`: Reject return paths that use blacklisted domains.
|
# * `reject_rhsbl_sender`: Reject return paths that use blacklisted domains.
|
||||||
# * `permit_sasl_authenticated`: Authenticated users (i.e. on port 587) can skip further checks.
|
# * `permit_sasl_authenticated`: Authenticated users (i.e. on port 587) can skip further checks.
|
||||||
# * `permit_mynetworks`: Mail that originates locally can skip further checks.
|
# * `permit_mynetworks`: Mail that originates locally can skip further checks.
|
||||||
@@ -157,13 +174,18 @@ tools/editconf.py /etc/postfix/main.cf virtual_transport=lmtp:[127.0.0.1]:10025
|
|||||||
# whitelisted) then postfix does a DEFER_IF_REJECT, which results in all "unknown user" sorts of messages turning into #NODOC
|
# whitelisted) then postfix does a DEFER_IF_REJECT, which results in all "unknown user" sorts of messages turning into #NODOC
|
||||||
# "450 4.7.1 Client host rejected: Service unavailable". This is a retry code, so the mail doesn't properly bounce. #NODOC
|
# "450 4.7.1 Client host rejected: Service unavailable". This is a retry code, so the mail doesn't properly bounce. #NODOC
|
||||||
tools/editconf.py /etc/postfix/main.cf \
|
tools/editconf.py /etc/postfix/main.cf \
|
||||||
smtpd_sender_restrictions="reject_non_fqdn_sender,reject_unknown_sender_domain,reject_rhsbl_sender dbl.spamhaus.org" \
|
smtpd_sender_restrictions="reject_non_fqdn_sender,reject_unknown_sender_domain,reject_authenticated_sender_login_mismatch,reject_rhsbl_sender dbl.spamhaus.org" \
|
||||||
smtpd_recipient_restrictions=permit_sasl_authenticated,permit_mynetworks,"reject_rbl_client zen.spamhaus.org",reject_unlisted_recipient,"check_policy_service inet:127.0.0.1:10023"
|
smtpd_recipient_restrictions=permit_sasl_authenticated,permit_mynetworks,"reject_rbl_client zen.spamhaus.org",reject_unlisted_recipient,"check_policy_service inet:127.0.0.1:10023"
|
||||||
|
|
||||||
# Postfix connects to Postgrey on the 127.0.0.1 interface specifically. Ensure that
|
# Postfix connects to Postgrey on the 127.0.0.1 interface specifically. Ensure that
|
||||||
# Postgrey listens on the same interface (and not IPv6, for instance).
|
# Postgrey listens on the same interface (and not IPv6, for instance).
|
||||||
|
# A lot of legit mail servers try to resend before 300 seconds.
|
||||||
|
# As a matter of fact RFC is not strict about retry timer so postfix and
|
||||||
|
# other MTA have their own intervals. To fix the problem of receiving
|
||||||
|
# e-mails really latter, delay of greylisting has been set to
|
||||||
|
# 180 seconds (default is 300 seconds).
|
||||||
tools/editconf.py /etc/default/postgrey \
|
tools/editconf.py /etc/default/postgrey \
|
||||||
POSTGREY_OPTS=\"--inet=127.0.0.1:10023\"
|
POSTGREY_OPTS=\"'--inet=127.0.0.1:10023 --delay=180'\"
|
||||||
|
|
||||||
# Increase the message size limit from 10MB to 128MB.
|
# Increase the message size limit from 10MB to 128MB.
|
||||||
# The same limit is specified in nginx.conf for mail submitted via webmail and Z-Push.
|
# The same limit is specified in nginx.conf for mail submitted via webmail and Z-Push.
|
||||||
@@ -178,3 +200,4 @@ ufw_allow submission
|
|||||||
# Restart services
|
# Restart services
|
||||||
|
|
||||||
restart_service postfix
|
restart_service postfix
|
||||||
|
restart_service postgrey
|
||||||
|
|||||||
@@ -69,6 +69,22 @@ tools/editconf.py /etc/postfix/main.cf \
|
|||||||
smtpd_sasl_path=private/auth \
|
smtpd_sasl_path=private/auth \
|
||||||
smtpd_sasl_auth_enable=yes
|
smtpd_sasl_auth_enable=yes
|
||||||
|
|
||||||
|
# ### Sender Validation
|
||||||
|
|
||||||
|
# Use a Sqlite3 database to set login maps. This is used with
|
||||||
|
# reject_authenticated_sender_login_mismatch to see if user is
|
||||||
|
# allowed to send mail using FROM field specified in the request.
|
||||||
|
tools/editconf.py /etc/postfix/main.cf \
|
||||||
|
smtpd_sender_login_maps=sqlite:/etc/postfix/sender-login-maps.cf
|
||||||
|
|
||||||
|
# SQL statement to set login map which includes the case when user is
|
||||||
|
# sending email using a valid alias.
|
||||||
|
# This is the same as virtual-alias-maps.cf, See below
|
||||||
|
cat > /etc/postfix/sender-login-maps.cf << EOF;
|
||||||
|
dbpath=$db_path
|
||||||
|
query = SELECT destination from (SELECT destination, 0 as priority FROM aliases WHERE source='%s' UNION SELECT email as destination, 1 as priority FROM users WHERE email='%s') ORDER BY priority LIMIT 1;
|
||||||
|
EOF
|
||||||
|
|
||||||
# ### Destination Validation
|
# ### Destination Validation
|
||||||
|
|
||||||
# Use a Sqlite3 database to check whether a destination email address exists,
|
# Use a Sqlite3 database to check whether a destination email address exists,
|
||||||
@@ -92,13 +108,25 @@ query = SELECT 1 FROM users WHERE email='%s'
|
|||||||
EOF
|
EOF
|
||||||
|
|
||||||
# SQL statement to rewrite an email address if an alias is present.
|
# SQL statement to rewrite an email address if an alias is present.
|
||||||
# Aliases have precedence over users, but that's counter-intuitive for
|
#
|
||||||
# catch-all aliases ("@domain.com") which should *not* catch mail users.
|
# Postfix makes multiple queries for each incoming mail. It first
|
||||||
# To fix this, not only query the aliases table but also the users
|
# queries the whole email address, then just the user part in certain
|
||||||
# table, i.e. turn users into aliases from themselves to themselves.
|
# locally-directed cases (but we don't use this), then just `@`+the
|
||||||
|
# domain part. The first query that returns something wins. See
|
||||||
|
# http://www.postfix.org/virtual.5.html.
|
||||||
|
#
|
||||||
|
# virtual-alias-maps has precedence over virtual-mailbox-maps, but
|
||||||
|
# we don't want catch-alls and domain aliases to catch mail for users
|
||||||
|
# that have been defined on those domains. To fix this, we not only
|
||||||
|
# query the aliases table but also the users table when resolving
|
||||||
|
# aliases, i.e. we turn users into aliases from themselves to
|
||||||
|
# themselves. That means users will match in postfix's first query
|
||||||
|
# before postfix gets to the third query for catch-alls/domain alises.
|
||||||
|
#
|
||||||
# If there is both an alias and a user for the same address either
|
# If there is both an alias and a user for the same address either
|
||||||
# might be returned by the UNION, so the whole query is wrapped in
|
# might be returned by the UNION, so the whole query is wrapped in
|
||||||
# another select that prioritizes the alias definition.
|
# another select that prioritizes the alias definition to preserve
|
||||||
|
# postfix's preference for aliases for whole email addresses.
|
||||||
cat > /etc/postfix/virtual-alias-maps.cf << EOF;
|
cat > /etc/postfix/virtual-alias-maps.cf << EOF;
|
||||||
dbpath=$db_path
|
dbpath=$db_path
|
||||||
query = SELECT destination from (SELECT destination, 0 as priority FROM aliases WHERE source='%s' UNION SELECT email as destination, 1 as priority FROM users WHERE email='%s') ORDER BY priority LIMIT 1;
|
query = SELECT destination from (SELECT destination, 0 as priority FROM aliases WHERE source='%s' UNION SELECT email as destination, 1 as priority FROM users WHERE email='%s') ORDER BY priority LIMIT 1;
|
||||||
|
|||||||
@@ -2,8 +2,11 @@
|
|||||||
|
|
||||||
source setup/functions.sh
|
source setup/functions.sh
|
||||||
|
|
||||||
apt_install python3-flask links duplicity libyaml-dev python3-dnspython python3-dateutil
|
# build-essential libssl-dev libffi-dev python3-dev: Required to pip install cryptography.
|
||||||
hide_output pip3 install rtyaml
|
apt_install python3-flask links duplicity libyaml-dev python3-dnspython python3-dateutil \
|
||||||
|
build-essential libssl-dev libffi-dev python3-dev
|
||||||
|
hide_output pip3 install rtyaml email_validator cryptography
|
||||||
|
# email_validator is repeated in setup/questions.sh
|
||||||
|
|
||||||
# Create a backup directory and a random key for encrypting backups.
|
# Create a backup directory and a random key for encrypting backups.
|
||||||
mkdir -p $STORAGE_ROOT/backup
|
mkdir -p $STORAGE_ROOT/backup
|
||||||
@@ -30,5 +33,16 @@ $(pwd)/management/backup.py
|
|||||||
EOF
|
EOF
|
||||||
chmod +x /etc/cron.daily/mailinabox-backup
|
chmod +x /etc/cron.daily/mailinabox-backup
|
||||||
|
|
||||||
|
# Perform daily status checks. Compare each day to the previous
|
||||||
|
# for changes and mail the changes to the administrator.
|
||||||
|
cat > /etc/cron.daily/mailinabox-statuschecks << EOF;
|
||||||
|
#!/bin/bash
|
||||||
|
# Mail-in-a-Box --- Do not edit / will be overwritten on update.
|
||||||
|
# Run status checks.
|
||||||
|
$(pwd)/management/status_checks.py --show-changes --smtp
|
||||||
|
EOF
|
||||||
|
chmod +x /etc/cron.daily/mailinabox-statuschecks
|
||||||
|
|
||||||
|
|
||||||
# Start it.
|
# Start it.
|
||||||
restart_service mailinabox
|
restart_service mailinabox
|
||||||
|
|||||||
@@ -67,6 +67,40 @@ def migration_6(env):
|
|||||||
basepath = os.path.join(env["STORAGE_ROOT"], 'dns/dnssec')
|
basepath = os.path.join(env["STORAGE_ROOT"], 'dns/dnssec')
|
||||||
shutil.move(os.path.join(basepath, 'keys.conf'), os.path.join(basepath, 'RSASHA1-NSEC3-SHA1.conf'))
|
shutil.move(os.path.join(basepath, 'keys.conf'), os.path.join(basepath, 'RSASHA1-NSEC3-SHA1.conf'))
|
||||||
|
|
||||||
|
def migration_7(env):
|
||||||
|
# I previously wanted domain names to be stored in Unicode in the database. Now I want them
|
||||||
|
# to be in IDNA. Affects aliases only.
|
||||||
|
import sqlite3
|
||||||
|
conn = sqlite3.connect(os.path.join(env["STORAGE_ROOT"], "mail/users.sqlite"))
|
||||||
|
|
||||||
|
# Get existing alias source addresses.
|
||||||
|
c = conn.cursor()
|
||||||
|
c.execute('SELECT source FROM aliases')
|
||||||
|
aliases = [ row[0] for row in c.fetchall() ]
|
||||||
|
|
||||||
|
# Update to IDNA-encoded domains.
|
||||||
|
for email in aliases:
|
||||||
|
try:
|
||||||
|
localpart, domainpart = email.split("@")
|
||||||
|
domainpart = domainpart.encode("idna").decode("ascii")
|
||||||
|
newemail = localpart + "@" + domainpart
|
||||||
|
if newemail != email:
|
||||||
|
c = conn.cursor()
|
||||||
|
c.execute("UPDATE aliases SET source=? WHERE source=?", (newemail, email))
|
||||||
|
if c.rowcount != 1: raise ValueError("Alias not found.")
|
||||||
|
print("Updated alias", email, "to", newemail)
|
||||||
|
except Exception as e:
|
||||||
|
print("Error updating IDNA alias", email, e)
|
||||||
|
|
||||||
|
# Save.
|
||||||
|
conn.commit()
|
||||||
|
|
||||||
|
def migration_8(env):
|
||||||
|
# Delete DKIM keys. We had generated 1024-bit DKIM keys.
|
||||||
|
# By deleting the key file we'll automatically generate
|
||||||
|
# a new key, which will be 2048 bits.
|
||||||
|
os.unlink(os.path.join(env['STORAGE_ROOT'], 'mail/dkim/mail.private'))
|
||||||
|
|
||||||
def get_current_migration():
|
def get_current_migration():
|
||||||
ver = 0
|
ver = 0
|
||||||
while True:
|
while True:
|
||||||
@@ -84,13 +118,22 @@ def run_migrations():
|
|||||||
env = load_environment()
|
env = load_environment()
|
||||||
|
|
||||||
migration_id_file = os.path.join(env['STORAGE_ROOT'], 'mailinabox.version')
|
migration_id_file = os.path.join(env['STORAGE_ROOT'], 'mailinabox.version')
|
||||||
|
migration_id = None
|
||||||
if os.path.exists(migration_id_file):
|
if os.path.exists(migration_id_file):
|
||||||
with open(migration_id_file) as f:
|
with open(migration_id_file) as f:
|
||||||
ourver = int(f.read().strip())
|
migration_id = f.read().strip();
|
||||||
else:
|
|
||||||
|
if migration_id is None:
|
||||||
# Load the legacy location of the migration ID. We'll drop support
|
# Load the legacy location of the migration ID. We'll drop support
|
||||||
# for this eventually.
|
# for this eventually.
|
||||||
ourver = int(env.get("MIGRATIONID", "0"))
|
migration_id = env.get("MIGRATIONID")
|
||||||
|
|
||||||
|
if migration_id is None:
|
||||||
|
print()
|
||||||
|
print("%s file doesn't exists. Skipping migration..." % (migration_id_file,))
|
||||||
|
return
|
||||||
|
|
||||||
|
ourver = int(migration_id)
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
next_ver = (ourver + 1)
|
next_ver = (ourver + 1)
|
||||||
|
|||||||
32
setup/munin.sh
Executable file
32
setup/munin.sh
Executable file
@@ -0,0 +1,32 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Munin: resource monitoring tool
|
||||||
|
#################################################
|
||||||
|
|
||||||
|
source setup/functions.sh # load our functions
|
||||||
|
source /etc/mailinabox.conf # load global vars
|
||||||
|
|
||||||
|
# install Munin
|
||||||
|
apt_install munin munin-node
|
||||||
|
|
||||||
|
# edit config
|
||||||
|
cat > /etc/munin/munin.conf <<EOF;
|
||||||
|
dbdir /var/lib/munin
|
||||||
|
htmldir /var/cache/munin/www
|
||||||
|
logdir /var/log/munin
|
||||||
|
rundir /var/run/munin
|
||||||
|
tmpldir /etc/munin/templates
|
||||||
|
|
||||||
|
includedir /etc/munin/munin-conf.d
|
||||||
|
|
||||||
|
# a simple host tree
|
||||||
|
[$PRIMARY_HOSTNAME]
|
||||||
|
address 127.0.0.1
|
||||||
|
|
||||||
|
# send alerts to the following address
|
||||||
|
contacts admin
|
||||||
|
contact.admin.command mail -s "Munin notification ${var:host}" administrator@$PRIMARY_HOSTNAME
|
||||||
|
contact.admin.always_send warning critical
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# generate initial statistics so the directory isn't empty
|
||||||
|
sudo -u munin munin-cron
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
# Install the 'host', 'sed', and and 'nc' tools. This script is run before
|
# Install the 'host', 'sed', and and 'nc' tools. This script is run before
|
||||||
# the rest of the system setup so we may not yet have things installed.
|
# the rest of the system setup so we may not yet have things installed.
|
||||||
hide_output apt-get -y install bind9-host sed netcat-openbsd
|
apt_get_quiet install bind9-host sed netcat-openbsd
|
||||||
|
|
||||||
# Stop if the PRIMARY_HOSTNAME is listed in the Spamhaus Domain Block List.
|
# Stop if the PRIMARY_HOSTNAME is listed in the Spamhaus Domain Block List.
|
||||||
# The user might have chosen a name that was previously in use by a spammer
|
# The user might have chosen a name that was previously in use by a spammer
|
||||||
|
|||||||
@@ -15,18 +15,59 @@ apt_install \
|
|||||||
apt-get purge -qq -y owncloud*
|
apt-get purge -qq -y owncloud*
|
||||||
|
|
||||||
# Install ownCloud from source of this version:
|
# Install ownCloud from source of this version:
|
||||||
owncloud_ver=7.0.4
|
owncloud_ver=8.0.4
|
||||||
|
owncloud_hash=625b1c561ea51426047a3e79eda51ca05e9f978a
|
||||||
|
|
||||||
|
# Migrate <= v0.10 setups that stored the ownCloud config.php in /usr/local rather than
|
||||||
|
# in STORAGE_ROOT. Move the file to STORAGE_ROOT.
|
||||||
|
if [ ! -f $STORAGE_ROOT/owncloud/config.php ] \
|
||||||
|
&& [ -f /usr/local/lib/owncloud/config/config.php ]; then
|
||||||
|
|
||||||
|
# Move config.php and symlink back into previous location.
|
||||||
|
echo "Migrating owncloud/config.php to new location."
|
||||||
|
mv /usr/local/lib/owncloud/config/config.php $STORAGE_ROOT/owncloud/config.php \
|
||||||
|
&& \
|
||||||
|
ln -sf $STORAGE_ROOT/owncloud/config.php /usr/local/lib/owncloud/config/config.php
|
||||||
|
fi
|
||||||
|
|
||||||
# Check if ownCloud dir exist, and check if version matches owncloud_ver (if either doesn't - install/upgrade)
|
# Check if ownCloud dir exist, and check if version matches owncloud_ver (if either doesn't - install/upgrade)
|
||||||
if [ ! -d /usr/local/lib/owncloud/ ] \
|
if [ ! -d /usr/local/lib/owncloud/ ] \
|
||||||
|| ! grep -q $owncloud_ver /usr/local/lib/owncloud/version.php; then
|
|| ! grep -q $owncloud_ver /usr/local/lib/owncloud/version.php; then
|
||||||
|
|
||||||
echo installing ownCloud...
|
# Download and verify
|
||||||
rm -f /tmp/owncloud.zip
|
echo "installing ownCloud..."
|
||||||
wget -qO /tmp/owncloud.zip https://download.owncloud.org/community/owncloud-$owncloud_ver.zip
|
wget_verify https://download.owncloud.org/community/owncloud-$owncloud_ver.zip $owncloud_hash /tmp/owncloud.zip
|
||||||
|
|
||||||
|
# Clear out the existing ownCloud.
|
||||||
|
if [ -d /usr/local/lib/owncloud/ ]; then
|
||||||
|
echo "upgrading ownCloud to $owncloud_ver (backing up existing ownCloud directory to /tmp/owncloud-backup-$$)..."
|
||||||
|
mv /usr/local/lib/owncloud /tmp/owncloud-backup-$$
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Extract ownCloud
|
||||||
unzip -u -o -q /tmp/owncloud.zip -d /usr/local/lib #either extracts new or replaces current files
|
unzip -u -o -q /tmp/owncloud.zip -d /usr/local/lib #either extracts new or replaces current files
|
||||||
hide_output php /usr/local/lib/owncloud/occ upgrade #if OC is up-to-date it wont matter
|
|
||||||
rm -f /tmp/owncloud.zip
|
rm -f /tmp/owncloud.zip
|
||||||
|
|
||||||
|
# The two apps we actually want are not in ownCloud core. Clone them from
|
||||||
|
# their github repositories.
|
||||||
|
mkdir -p /usr/local/lib/owncloud/apps
|
||||||
|
git_clone https://github.com/owncloud/contacts v$owncloud_ver '' /usr/local/lib/owncloud/apps/contacts
|
||||||
|
git_clone https://github.com/owncloud/calendar v$owncloud_ver '' /usr/local/lib/owncloud/apps/calendar
|
||||||
|
|
||||||
|
# Fix weird permissions.
|
||||||
|
chmod 750 /usr/local/lib/owncloud/{apps,config}
|
||||||
|
|
||||||
|
# Create a symlink to the config.php in STORAGE_ROOT (for upgrades we're restoring the symlink we previously
|
||||||
|
# put in, and in new installs we're creating a symlink and will create the actual config later).
|
||||||
|
ln -sf $STORAGE_ROOT/owncloud/config.php /usr/local/lib/owncloud/config/config.php
|
||||||
|
|
||||||
|
# Make sure permissions are correct or the upgrade step won't run.
|
||||||
|
# $STORAGE_ROOT/owncloud may not yet exist, so use -f to suppress
|
||||||
|
# that error.
|
||||||
|
chown -f -R www-data.www-data $STORAGE_ROOT/owncloud /usr/local/lib/owncloud
|
||||||
|
|
||||||
|
# Run the upgrade script (if ownCloud is already up-to-date it wont matter).
|
||||||
|
hide_output sudo -u www-data php /usr/local/lib/owncloud/occ upgrade
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# ### Configuring ownCloud
|
# ### Configuring ownCloud
|
||||||
@@ -34,17 +75,20 @@ fi
|
|||||||
# Setup ownCloud if the ownCloud database does not yet exist. Running setup when
|
# Setup ownCloud if the ownCloud database does not yet exist. Running setup when
|
||||||
# the database does exist wipes the database and user data.
|
# the database does exist wipes the database and user data.
|
||||||
if [ ! -f $STORAGE_ROOT/owncloud/owncloud.db ]; then
|
if [ ! -f $STORAGE_ROOT/owncloud/owncloud.db ]; then
|
||||||
|
# Create user data directory
|
||||||
|
mkdir -p $STORAGE_ROOT/owncloud
|
||||||
|
|
||||||
# Create a configuration file.
|
# Create a configuration file.
|
||||||
TIMEZONE=$(cat /etc/timezone)
|
TIMEZONE=$(cat /etc/timezone)
|
||||||
instanceid=oc$(echo $PRIMARY_HOSTNAME | sha1sum | fold -w 10 | head -n 1)
|
instanceid=oc$(echo $PRIMARY_HOSTNAME | sha1sum | fold -w 10 | head -n 1)
|
||||||
cat > /usr/local/lib/owncloud/config/config.php <<EOF;
|
cat > $STORAGE_ROOT/owncloud/config.php <<EOF;
|
||||||
<?php
|
<?php
|
||||||
\$CONFIG = array (
|
\$CONFIG = array (
|
||||||
'datadirectory' => '$STORAGE_ROOT/owncloud',
|
'datadirectory' => '$STORAGE_ROOT/owncloud',
|
||||||
|
|
||||||
'instanceid' => '$instanceid',
|
'instanceid' => '$instanceid',
|
||||||
|
|
||||||
'trusted_domains' =>
|
'trusted_domains' =>
|
||||||
array (
|
array (
|
||||||
0 => '$PRIMARY_HOSTNAME',
|
0 => '$PRIMARY_HOSTNAME',
|
||||||
),
|
),
|
||||||
@@ -94,20 +138,23 @@ EOF
|
|||||||
?>
|
?>
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
# Create user data directory and set permissions
|
# Set permissions
|
||||||
mkdir -p $STORAGE_ROOT/owncloud
|
|
||||||
chown -R www-data.www-data $STORAGE_ROOT/owncloud /usr/local/lib/owncloud
|
chown -R www-data.www-data $STORAGE_ROOT/owncloud /usr/local/lib/owncloud
|
||||||
|
|
||||||
# Execute ownCloud's setup step, which creates the ownCloud sqlite database.
|
# Execute ownCloud's setup step, which creates the ownCloud sqlite database.
|
||||||
# It also wipes it if it exists. And it deletes the autoconfig.php file.
|
# It also wipes it if it exists. And it updates config.php with database
|
||||||
|
# settings and deletes the autoconfig.php file.
|
||||||
(cd /usr/local/lib/owncloud; sudo -u www-data php /usr/local/lib/owncloud/index.php;)
|
(cd /usr/local/lib/owncloud; sudo -u www-data php /usr/local/lib/owncloud/index.php;)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Enable/disable apps. Note that this must be done after the ownCloud setup.
|
# Enable/disable apps. Note that this must be done after the ownCloud setup.
|
||||||
# The firstrunwizard gave Josh all sorts of problems, so disabling that.
|
# The firstrunwizard gave Josh all sorts of problems, so disabling that.
|
||||||
# user_external is what allows ownCloud to use IMAP for login.
|
# user_external is what allows ownCloud to use IMAP for login. The contacts
|
||||||
hide_output php /usr/local/lib/owncloud/console.php app:disable firstrunwizard
|
# and calendar apps are the extensions we really care about here.
|
||||||
hide_output php /usr/local/lib/owncloud/console.php app:enable user_external
|
hide_output sudo -u www-data php /usr/local/lib/owncloud/console.php app:disable firstrunwizard
|
||||||
|
hide_output sudo -u www-data php /usr/local/lib/owncloud/console.php app:enable user_external
|
||||||
|
hide_output sudo -u www-data php /usr/local/lib/owncloud/console.php app:enable contacts
|
||||||
|
hide_output sudo -u www-data php /usr/local/lib/owncloud/console.php app:enable calendar
|
||||||
|
|
||||||
# Set PHP FPM values to support large file uploads
|
# Set PHP FPM values to support large file uploads
|
||||||
# (semicolon is the comment character in this file, hashes produce deprecation warnings)
|
# (semicolon is the comment character in this file, hashes produce deprecation warnings)
|
||||||
|
|||||||
@@ -4,7 +4,17 @@ if [ -z "$NONINTERACTIVE" ]; then
|
|||||||
# e.g. if we piped a bootstrapping install script to bash to get started. In that
|
# e.g. if we piped a bootstrapping install script to bash to get started. In that
|
||||||
# case, the nifty '[ -t 0 ]' test won't work. But with Vagrant we must suppress so we
|
# case, the nifty '[ -t 0 ]' test won't work. But with Vagrant we must suppress so we
|
||||||
# use a shell flag instead. Really supress any output from installing dialog.
|
# use a shell flag instead. Really supress any output from installing dialog.
|
||||||
hide_output apt-get -y install dialog
|
#
|
||||||
|
# Also install depencies needed to validate the email address.
|
||||||
|
if [ ! -f /usr/bin/dialog ] || [ ! -f /usr/bin/python3 ] || [ ! -f /usr/bin/pip3 ]; then
|
||||||
|
echo Installing packages needed for setup...
|
||||||
|
apt-get -q -q update
|
||||||
|
apt_get_quiet install dialog python3 python3-pip || exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# email_validator is repeated in setup/management.sh
|
||||||
|
hide_output pip3 install email_validator || exit 1
|
||||||
|
|
||||||
message_box "Mail-in-a-Box Installation" \
|
message_box "Mail-in-a-Box Installation" \
|
||||||
"Hello and thanks for deploying a Mail-in-a-Box!
|
"Hello and thanks for deploying a Mail-in-a-Box!
|
||||||
\n\nI'm going to ask you a few questions.
|
\n\nI'm going to ask you a few questions.
|
||||||
@@ -186,3 +196,48 @@ if [ -z "$CSR_COUNTRY" ]; then
|
|||||||
exit
|
exit
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Automatic configuration, e.g. as used in our Vagrant configuration.
|
||||||
|
if [ "$PUBLIC_IP" = "auto" ]; then
|
||||||
|
# Use a public API to get our public IP address, or fall back to local network configuration.
|
||||||
|
PUBLIC_IP=$(get_publicip_from_web_service 4 || get_default_privateip 4)
|
||||||
|
fi
|
||||||
|
if [ "$PUBLIC_IPV6" = "auto" ]; then
|
||||||
|
# Use a public API to get our public IPv6 address, or fall back to local network configuration.
|
||||||
|
PUBLIC_IPV6=$(get_publicip_from_web_service 6 || get_default_privateip 6)
|
||||||
|
fi
|
||||||
|
if [ "$PRIMARY_HOSTNAME" = "auto" ]; then
|
||||||
|
# Use reverse DNS to get this machine's hostname. Install bind9-host early.
|
||||||
|
hide_output apt-get -y install bind9-host
|
||||||
|
PRIMARY_HOSTNAME=$(get_default_hostname)
|
||||||
|
elif [ "$PRIMARY_HOSTNAME" = "auto-easy" ]; then
|
||||||
|
# Generate a probably-unique subdomain under our justtesting.email domain.
|
||||||
|
PRIMARY_HOSTNAME=`echo $PUBLIC_IP | sha1sum | cut -c1-5`.justtesting.email
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Set STORAGE_USER and STORAGE_ROOT to default values (user-data and /home/user-data), unless
|
||||||
|
# we've already got those values from a previous run.
|
||||||
|
if [ -z "$STORAGE_USER" ]; then
|
||||||
|
STORAGE_USER=$([[ -z "$DEFAULT_STORAGE_USER" ]] && echo "user-data" || echo "$DEFAULT_STORAGE_USER")
|
||||||
|
fi
|
||||||
|
if [ -z "$STORAGE_ROOT" ]; then
|
||||||
|
STORAGE_ROOT=$([[ -z "$DEFAULT_STORAGE_ROOT" ]] && echo "/home/$STORAGE_USER" || echo "$DEFAULT_STORAGE_ROOT")
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Show the configuration, since the user may have not entered it manually.
|
||||||
|
echo
|
||||||
|
echo "Primary Hostname: $PRIMARY_HOSTNAME"
|
||||||
|
echo "Public IP Address: $PUBLIC_IP"
|
||||||
|
if [ ! -z "$PUBLIC_IPV6" ]; then
|
||||||
|
echo "Public IPv6 Address: $PUBLIC_IPV6"
|
||||||
|
fi
|
||||||
|
if [ "$PRIVATE_IP" != "$PUBLIC_IP" ]; then
|
||||||
|
echo "Private IP Address: $PRIVATE_IP"
|
||||||
|
fi
|
||||||
|
if [ "$PRIVATE_IPV6" != "$PUBLIC_IPV6" ]; then
|
||||||
|
echo "Private IPv6 Address: $PRIVATE_IPV6"
|
||||||
|
fi
|
||||||
|
if [ -f /usr/bin/git ] && [ -d .git ]; then
|
||||||
|
echo "Mail-in-a-Box Version: " $(git describe)
|
||||||
|
fi
|
||||||
|
echo
|
||||||
|
|||||||
@@ -25,10 +25,14 @@ tools/editconf.py /etc/default/spamassassin \
|
|||||||
# Configure pyzor.
|
# Configure pyzor.
|
||||||
hide_output pyzor discover
|
hide_output pyzor discover
|
||||||
|
|
||||||
# Pass messages on to docevot on port 10026.
|
# Configure spampd:
|
||||||
# This is actually the default setting but we don't want to lose track of it.
|
# * Pass messages on to docevot on port 10026. This is actually the default setting but we don't
|
||||||
# We've already configured Dovecot to listen on this port.
|
# want to lose track of it. (We've configured Dovecot to listen on this port elsewhere.)
|
||||||
tools/editconf.py /etc/default/spampd DESTPORT=10026
|
# * Increase the maximum message size of scanned messages from the default of 64KB to 500KB, which
|
||||||
|
# is Spamassassin (spamc)'s own default. Specified in KBytes.
|
||||||
|
tools/editconf.py /etc/default/spampd \
|
||||||
|
DESTPORT=10026 \
|
||||||
|
ADDOPTS="\"--maxsize=500\""
|
||||||
|
|
||||||
# Spamassassin normally wraps spam as an attachment inside a fresh
|
# Spamassassin normally wraps spam as an attachment inside a fresh
|
||||||
# email with a report about the message. This also protects the user
|
# email with a report about the message. This also protects the user
|
||||||
@@ -71,6 +75,7 @@ chown -R spampd:spampd $STORAGE_ROOT/mail/spamassassin
|
|||||||
# Enable the Dovecot antispam plugin.
|
# Enable the Dovecot antispam plugin.
|
||||||
# (Be careful if we use multiple plugins later.) #NODOC
|
# (Be careful if we use multiple plugins later.) #NODOC
|
||||||
sed -i "s/#mail_plugins = .*/mail_plugins = \$mail_plugins antispam/" /etc/dovecot/conf.d/20-imap.conf
|
sed -i "s/#mail_plugins = .*/mail_plugins = \$mail_plugins antispam/" /etc/dovecot/conf.d/20-imap.conf
|
||||||
|
sed -i "s/#mail_plugins = .*/mail_plugins = \$mail_plugins antispam/" /etc/dovecot/conf.d/20-pop3.conf
|
||||||
|
|
||||||
# Configure the antispam plugin to call sa-learn-pipe.sh.
|
# Configure the antispam plugin to call sa-learn-pipe.sh.
|
||||||
cat > /etc/dovecot/conf.d/99-local-spampd.conf << EOF;
|
cat > /etc/dovecot/conf.d/99-local-spampd.conf << EOF;
|
||||||
|
|||||||
@@ -27,7 +27,7 @@ export LC_TYPE=en_US.UTF-8
|
|||||||
if [ -f /etc/mailinabox.conf ]; then
|
if [ -f /etc/mailinabox.conf ]; then
|
||||||
# Run any system migrations before proceeding. Since this is a second run,
|
# Run any system migrations before proceeding. Since this is a second run,
|
||||||
# we assume we have Python already installed.
|
# we assume we have Python already installed.
|
||||||
setup/migrate.py --migrate
|
setup/migrate.py --migrate || exit 1
|
||||||
|
|
||||||
# Load the old .conf file to get existing configuration options loaded
|
# Load the old .conf file to get existing configuration options loaded
|
||||||
# into variables with a DEFAULT_ prefix.
|
# into variables with a DEFAULT_ prefix.
|
||||||
@@ -47,57 +47,32 @@ chmod +x /usr/local/bin/mailinabox
|
|||||||
|
|
||||||
# Ask the user for the PRIMARY_HOSTNAME, PUBLIC_IP, PUBLIC_IPV6, and CSR_COUNTRY
|
# Ask the user for the PRIMARY_HOSTNAME, PUBLIC_IP, PUBLIC_IPV6, and CSR_COUNTRY
|
||||||
# if values have not already been set in environment variables. When running
|
# if values have not already been set in environment variables. When running
|
||||||
# non-interactively, be sure to set values for all!
|
# non-interactively, be sure to set values for all! Also sets STORAGE_USER and
|
||||||
|
# STORAGE_ROOT.
|
||||||
source setup/questions.sh
|
source setup/questions.sh
|
||||||
|
|
||||||
# Automatic configuration, e.g. as used in our Vagrant configuration.
|
|
||||||
if [ "$PUBLIC_IP" = "auto" ]; then
|
|
||||||
# Use a public API to get our public IP address, or fall back to local network configuration.
|
|
||||||
PUBLIC_IP=$(get_publicip_from_web_service 4 || get_default_privateip 4)
|
|
||||||
fi
|
|
||||||
if [ "$PUBLIC_IPV6" = "auto" ]; then
|
|
||||||
# Use a public API to get our public IPv6 address, or fall back to local network configuration.
|
|
||||||
PUBLIC_IPV6=$(get_publicip_from_web_service 6 || get_default_privateip 6)
|
|
||||||
fi
|
|
||||||
if [ "$PRIMARY_HOSTNAME" = "auto-easy" ]; then
|
|
||||||
# Generate a probably-unique subdomain under our justtesting.email domain.
|
|
||||||
PRIMARY_HOSTNAME=`echo $PUBLIC_IP | sha1sum | cut -c1-5`.justtesting.email
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Show the configuration, since the user may have not entered it manually.
|
|
||||||
echo
|
|
||||||
echo "Primary Hostname: $PRIMARY_HOSTNAME"
|
|
||||||
echo "Public IP Address: $PUBLIC_IP"
|
|
||||||
if [ ! -z "$PUBLIC_IPV6" ]; then
|
|
||||||
echo "Public IPv6 Address: $PUBLIC_IPV6"
|
|
||||||
fi
|
|
||||||
if [ "$PRIVATE_IP" != "$PUBLIC_IP" ]; then
|
|
||||||
echo "Private IP Address: $PRIVATE_IP"
|
|
||||||
fi
|
|
||||||
if [ "$PRIVATE_IPV6" != "$PUBLIC_IPV6" ]; then
|
|
||||||
echo "Private IPv6 Address: $PRIVATE_IPV6"
|
|
||||||
fi
|
|
||||||
if [ -f /usr/bin/git ]; then
|
|
||||||
echo "Mail-in-a-Box Version: " $(git describe)
|
|
||||||
fi
|
|
||||||
echo
|
|
||||||
|
|
||||||
# Run some network checks to make sure setup on this machine makes sense.
|
# Run some network checks to make sure setup on this machine makes sense.
|
||||||
if [ -z "$SKIP_NETWORK_CHECKS" ]; then
|
if [ -z "$SKIP_NETWORK_CHECKS" ]; then
|
||||||
. setup/network-checks.sh
|
source setup/network-checks.sh
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Create the user named "user-data" and store all persistent user
|
# Create the STORAGE_USER and STORAGE_ROOT directory if they don't already exist.
|
||||||
# data (mailboxes, etc.) in that user's home directory.
|
# If the STORAGE_ROOT is missing the mailinabox.version file that lists a
|
||||||
if [ -z "$STORAGE_ROOT" ]; then
|
# migration (schema) number for the files stored there, assume this is a fresh
|
||||||
STORAGE_USER=user-data
|
# installation to that directory and write the file to contain the current
|
||||||
if [ ! -d /home/$STORAGE_USER ]; then useradd -m $STORAGE_USER; fi
|
# migration number for this version of Mail-in-a-Box.
|
||||||
STORAGE_ROOT=/home/$STORAGE_USER
|
if ! id -u $STORAGE_USER >/dev/null 2>&1; then
|
||||||
|
useradd -m $STORAGE_USER
|
||||||
|
fi
|
||||||
|
if [ ! -d $STORAGE_ROOT ]; then
|
||||||
mkdir -p $STORAGE_ROOT
|
mkdir -p $STORAGE_ROOT
|
||||||
|
fi
|
||||||
|
if [ ! -f $STORAGE_ROOT/mailinabox.version ]; then
|
||||||
echo $(setup/migrate.py --current) > $STORAGE_ROOT/mailinabox.version
|
echo $(setup/migrate.py --current) > $STORAGE_ROOT/mailinabox.version
|
||||||
chown $STORAGE_USER.$STORAGE_USER $STORAGE_ROOT/mailinabox.version
|
chown $STORAGE_USER.$STORAGE_USER $STORAGE_ROOT/mailinabox.version
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
# Save the global options in /etc/mailinabox.conf so that standalone
|
# Save the global options in /etc/mailinabox.conf so that standalone
|
||||||
# tools know where to look for data.
|
# tools know where to look for data.
|
||||||
cat > /etc/mailinabox.conf << EOF;
|
cat > /etc/mailinabox.conf << EOF;
|
||||||
@@ -125,11 +100,16 @@ source setup/webmail.sh
|
|||||||
source setup/owncloud.sh
|
source setup/owncloud.sh
|
||||||
source setup/zpush.sh
|
source setup/zpush.sh
|
||||||
source setup/management.sh
|
source setup/management.sh
|
||||||
|
source setup/munin.sh
|
||||||
|
|
||||||
# Write the DNS and nginx configuration files.
|
# Ping the management daemon to write the DNS and nginx configuration files.
|
||||||
sleep 5 # wait for the daemon to start
|
until nc -z -w 4 localhost 10222
|
||||||
curl -s -d POSTDATA --user $(</var/lib/mailinabox/api.key): http://127.0.0.1:10222/dns/update
|
do
|
||||||
curl -s -d POSTDATA --user $(</var/lib/mailinabox/api.key): http://127.0.0.1:10222/web/update
|
echo Waiting for the Mail-in-a-Box management daemon to start...
|
||||||
|
sleep 2
|
||||||
|
done
|
||||||
|
tools/dns_update
|
||||||
|
tools/web_update
|
||||||
|
|
||||||
# If there aren't any mail users yet, create one.
|
# If there aren't any mail users yet, create one.
|
||||||
source setup/firstuser.sh
|
source setup/firstuser.sh
|
||||||
|
|||||||
@@ -3,13 +3,29 @@ source setup/functions.sh # load our functions
|
|||||||
# Basic System Configuration
|
# Basic System Configuration
|
||||||
# -------------------------
|
# -------------------------
|
||||||
|
|
||||||
# ### Install Packages
|
# ### Add Mail-in-a-Box's PPA.
|
||||||
|
|
||||||
|
# We've built several .deb packages on our own that we want to include.
|
||||||
|
# One is a replacement for Ubuntu's stock postgrey package that makes
|
||||||
|
# some enhancements. The other is dovecot-lucene, a Lucene-based full
|
||||||
|
# text search plugin for (and by) dovecot, which is not available in
|
||||||
|
# Ubuntu currently.
|
||||||
|
#
|
||||||
|
# Add that to the system's list of repositories:
|
||||||
|
|
||||||
|
hide_output add-apt-repository -y ppa:mail-in-a-box/ppa
|
||||||
|
|
||||||
|
# The apt-get update in the next step will pull in the PPA's index.
|
||||||
|
|
||||||
|
# ### Update Packages
|
||||||
|
|
||||||
# Update system packages to make sure we have the latest upstream versions of things from Ubuntu.
|
# Update system packages to make sure we have the latest upstream versions of things from Ubuntu.
|
||||||
|
|
||||||
echo Updating system packages...
|
echo Updating system packages...
|
||||||
hide_output apt-get update
|
hide_output apt-get update
|
||||||
hide_output apt-get -y upgrade
|
apt_get_quiet upgrade
|
||||||
|
|
||||||
|
# ### Install System Packages
|
||||||
|
|
||||||
# Install basic utilities.
|
# Install basic utilities.
|
||||||
#
|
#
|
||||||
@@ -20,12 +36,14 @@ hide_output apt-get -y upgrade
|
|||||||
# * cron: Runs background processes periodically.
|
# * cron: Runs background processes periodically.
|
||||||
# * ntp: keeps the system time correct
|
# * ntp: keeps the system time correct
|
||||||
# * fail2ban: scans log files for repeated failed login attempts and blocks the remote IP at the firewall
|
# * fail2ban: scans log files for repeated failed login attempts and blocks the remote IP at the firewall
|
||||||
|
# * netcat-openbsd: `nc` command line networking tool
|
||||||
|
# * git: we install some things directly from github
|
||||||
# * sudo: allows privileged users to execute commands as root without being root
|
# * sudo: allows privileged users to execute commands as root without being root
|
||||||
# * coreutils: includes `nproc` tool to report number of processors
|
# * coreutils: includes `nproc` tool to report number of processors
|
||||||
# * bc: allows us to do math to compute sane defaults
|
# * bc: allows us to do math to compute sane defaults
|
||||||
|
|
||||||
apt_install python3 python3-dev python3-pip \
|
apt_install python3 python3-dev python3-pip \
|
||||||
wget curl sudo coreutils bc \
|
netcat-openbsd wget curl git sudo coreutils bc \
|
||||||
haveged unattended-upgrades cron ntp fail2ban
|
haveged unattended-upgrades cron ntp fail2ban
|
||||||
|
|
||||||
# Allow apt to install system updates automatically every day.
|
# Allow apt to install system updates automatically every day.
|
||||||
@@ -106,3 +124,11 @@ fi
|
|||||||
|
|
||||||
restart_service bind9
|
restart_service bind9
|
||||||
restart_service resolvconf
|
restart_service resolvconf
|
||||||
|
|
||||||
|
# ### Fail2Ban Service
|
||||||
|
|
||||||
|
# Configure the Fail2Ban installation to prevent dumb bruce-force attacks against dovecot, postfix and ssh
|
||||||
|
cp conf/fail2ban/jail.local /etc/fail2ban/jail.local
|
||||||
|
cp conf/fail2ban/dovecotimap.conf /etc/fail2ban/filter.d/dovecotimap.conf
|
||||||
|
|
||||||
|
restart_service fail2ban
|
||||||
|
|||||||
14
setup/web.sh
14
setup/web.sh
@@ -32,6 +32,10 @@ sed "s#STORAGE_ROOT#$STORAGE_ROOT#" \
|
|||||||
tools/editconf.py /etc/nginx/nginx.conf -s \
|
tools/editconf.py /etc/nginx/nginx.conf -s \
|
||||||
server_names_hash_bucket_size="64;"
|
server_names_hash_bucket_size="64;"
|
||||||
|
|
||||||
|
# Tell PHP not to expose its version number in the X-Powered-By header.
|
||||||
|
tools/editconf.py /etc/php5/fpm/php.ini -c ';' \
|
||||||
|
expose_php=Off
|
||||||
|
|
||||||
# Bump up PHP's max_children to support more concurrent connections
|
# Bump up PHP's max_children to support more concurrent connections
|
||||||
tools/editconf.py /etc/php5/fpm/pool.d/www.conf -c ';' \
|
tools/editconf.py /etc/php5/fpm/pool.d/www.conf -c ';' \
|
||||||
pm.max_children=8
|
pm.max_children=8
|
||||||
@@ -53,6 +57,16 @@ cat conf/ios-profile.xml \
|
|||||||
> /var/lib/mailinabox/mobileconfig.xml
|
> /var/lib/mailinabox/mobileconfig.xml
|
||||||
chmod a+r /var/lib/mailinabox/mobileconfig.xml
|
chmod a+r /var/lib/mailinabox/mobileconfig.xml
|
||||||
|
|
||||||
|
# Create the Mozilla Auto-configuration file which is exposed via the
|
||||||
|
# nginx configuration at /.well-known/autoconfig/mail/config-v1.1.xml.
|
||||||
|
# The format of the file is documented at:
|
||||||
|
# https://wiki.mozilla.org/Thunderbird:Autoconfiguration:ConfigFileFormat
|
||||||
|
# and https://developer.mozilla.org/en-US/docs/Mozilla/Thunderbird/Autoconfiguration/FileFormat/HowTo.
|
||||||
|
cat conf/mozilla-autoconfig.xml \
|
||||||
|
| sed "s/PRIMARY_HOSTNAME/$PRIMARY_HOSTNAME/" \
|
||||||
|
> /var/lib/mailinabox/mozilla-autoconfig.xml
|
||||||
|
chmod a+r /var/lib/mailinabox/mozilla-autoconfig.xml
|
||||||
|
|
||||||
# make a default homepage
|
# make a default homepage
|
||||||
if [ -d $STORAGE_ROOT/www/static ]; then mv $STORAGE_ROOT/www/static $STORAGE_ROOT/www/default; fi # migration #NODOC
|
if [ -d $STORAGE_ROOT/www/static ]; then mv $STORAGE_ROOT/www/static $STORAGE_ROOT/www/default; fi # migration #NODOC
|
||||||
mkdir -p $STORAGE_ROOT/www/default
|
mkdir -p $STORAGE_ROOT/www/default
|
||||||
|
|||||||
@@ -30,24 +30,41 @@ apt_install \
|
|||||||
apt-get purge -qq -y roundcube* #NODOC
|
apt-get purge -qq -y roundcube* #NODOC
|
||||||
|
|
||||||
# Install Roundcube from source if it is not already present or if it is out of date.
|
# Install Roundcube from source if it is not already present or if it is out of date.
|
||||||
VERSION=1.0.3
|
# Combine the Roundcube version number with the commit hash of vacation_sieve to track
|
||||||
|
# whether we have the latest version.
|
||||||
|
VERSION=1.1.2
|
||||||
|
HASH=df88deae691da3ecf3e9f0aee674c1f3042ea1eb
|
||||||
|
VACATION_SIEVE_VERSION=91ea6f52216390073d1f5b70b5f6bea0bfaee7e5
|
||||||
|
PERSISTENT_LOGIN_VERSION=117fbd8f93b56b2bf72ad055193464803ef3bc36
|
||||||
|
UPDATE_KEY=$VERSION:$VACATION_SIEVE_VERSION:$PERSISTENT_LOGIN_VERSION
|
||||||
needs_update=0 #NODOC
|
needs_update=0 #NODOC
|
||||||
if [ ! -f /usr/local/lib/roundcubemail/version ]; then
|
if [ ! -f /usr/local/lib/roundcubemail/version ]; then
|
||||||
# not installed yet #NODOC
|
# not installed yet #NODOC
|
||||||
needs_update=1 #NODOC
|
needs_update=1 #NODOC
|
||||||
elif [[ $VERSION != `cat /usr/local/lib/roundcubemail/version` ]]; then
|
elif [[ "$UPDATE_KEY" != `cat /usr/local/lib/roundcubemail/version` ]]; then
|
||||||
# checks if the version is what we want
|
# checks if the version is what we want
|
||||||
needs_update=1 #NODOC
|
needs_update=1 #NODOC
|
||||||
fi
|
fi
|
||||||
if [ $needs_update == 1 ]; then
|
if [ $needs_update == 1 ]; then
|
||||||
echo installing roudcube webmail $VERSION...
|
# install roundcube
|
||||||
rm -f /tmp/roundcube.tgz
|
echo installing Roundcube webmail $VERSION...
|
||||||
wget -qO /tmp/roundcube.tgz http://downloads.sourceforge.net/project/roundcubemail/roundcubemail/$VERSION/roundcubemail-$VERSION.tar.gz
|
wget_verify \
|
||||||
|
http://downloads.sourceforge.net/project/roundcubemail/roundcubemail/$VERSION/roundcubemail-$VERSION.tar.gz \
|
||||||
|
$HASH \
|
||||||
|
/tmp/roundcube.tgz
|
||||||
tar -C /usr/local/lib -zxf /tmp/roundcube.tgz
|
tar -C /usr/local/lib -zxf /tmp/roundcube.tgz
|
||||||
rm -rf /usr/local/lib/roundcubemail
|
rm -rf /usr/local/lib/roundcubemail
|
||||||
mv /usr/local/lib/roundcubemail-$VERSION/ /usr/local/lib/roundcubemail
|
mv /usr/local/lib/roundcubemail-$VERSION/ /usr/local/lib/roundcubemail
|
||||||
rm -f /tmp/roundcube.tgz
|
rm -f /tmp/roundcube.tgz
|
||||||
echo $VERSION > /usr/local/lib/roundcubemail/version
|
|
||||||
|
# install roundcube autoreply/vacation plugin
|
||||||
|
git_clone https://github.com/arodier/Roundcube-Plugins.git $VACATION_SIEVE_VERSION plugins/vacation_sieve /usr/local/lib/roundcubemail/plugins/vacation_sieve
|
||||||
|
|
||||||
|
# install roundcube persistent_login plugin
|
||||||
|
git_clone https://github.com/mfreiholz/Roundcube-Persistent-Login-Plugin.git $PERSISTENT_LOGIN_VERSION '' /usr/local/lib/roundcubemail/plugins/persistent_login
|
||||||
|
|
||||||
|
# record the version we've installed
|
||||||
|
echo $UPDATE_KEY > /usr/local/lib/roundcubemail/version
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# ### Configuring Roundcube
|
# ### Configuring Roundcube
|
||||||
@@ -79,7 +96,7 @@ cat > /usr/local/lib/roundcubemail/config/config.inc.php <<EOF;
|
|||||||
\$config['support_url'] = 'https://mailinabox.email/';
|
\$config['support_url'] = 'https://mailinabox.email/';
|
||||||
\$config['product_name'] = 'Mail-in-a-Box/Roundcube Webmail';
|
\$config['product_name'] = 'Mail-in-a-Box/Roundcube Webmail';
|
||||||
\$config['des_key'] = '$SECRET_KEY';
|
\$config['des_key'] = '$SECRET_KEY';
|
||||||
\$config['plugins'] = array('archive', 'zipdownload', 'password', 'managesieve');
|
\$config['plugins'] = array('archive', 'zipdownload', 'password', 'managesieve', 'jqueryui', 'vacation_sieve', 'persistent_login');
|
||||||
\$config['skin'] = 'classic';
|
\$config['skin'] = 'classic';
|
||||||
\$config['login_autocomplete'] = 2;
|
\$config['login_autocomplete'] = 2;
|
||||||
\$config['password_charset'] = 'UTF-8';
|
\$config['password_charset'] = 'UTF-8';
|
||||||
@@ -87,6 +104,26 @@ cat > /usr/local/lib/roundcubemail/config/config.inc.php <<EOF;
|
|||||||
?>
|
?>
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
|
# Configure vaction_sieve.
|
||||||
|
cat > /usr/local/lib/roundcubemail/plugins/vacation_sieve/config.inc.php <<EOF;
|
||||||
|
<?php
|
||||||
|
/* Do not edit. Written by Mail-in-a-Box. Regenerated on updates. */
|
||||||
|
\$rcmail_config['vacation_sieve'] = array(
|
||||||
|
'date_format' => 'd/m/Y',
|
||||||
|
'working_hours' => array(8,18),
|
||||||
|
'msg_format' => 'text',
|
||||||
|
'logon_transform' => array('#([a-z])[a-z]+(\.|\s)([a-z])#i', '\$1\$3'),
|
||||||
|
'transfer' => array(
|
||||||
|
'mode' => 'managesieve',
|
||||||
|
'ms_activate_script' => true,
|
||||||
|
'host' => 'localhost',
|
||||||
|
'port' => '4190',
|
||||||
|
'usetls' => false,
|
||||||
|
'path' => 'vacation',
|
||||||
|
)
|
||||||
|
);
|
||||||
|
EOF
|
||||||
|
|
||||||
# Create writable directories.
|
# Create writable directories.
|
||||||
mkdir -p /var/log/roundcubemail /tmp/roundcubemail $STORAGE_ROOT/mail/roundcube
|
mkdir -p /var/log/roundcubemail /tmp/roundcubemail $STORAGE_ROOT/mail/roundcube
|
||||||
chown -R www-data.www-data /var/log/roundcubemail /tmp/roundcubemail $STORAGE_ROOT/mail/roundcube
|
chown -R www-data.www-data /var/log/roundcubemail /tmp/roundcubemail $STORAGE_ROOT/mail/roundcube
|
||||||
|
|||||||
@@ -30,17 +30,11 @@ elif [[ $TARGETHASH != `cat /usr/local/lib/z-push/version` ]]; then
|
|||||||
needs_update=1 #NODOC
|
needs_update=1 #NODOC
|
||||||
fi
|
fi
|
||||||
if [ $needs_update == 1 ]; then
|
if [ $needs_update == 1 ]; then
|
||||||
rm -rf /usr/local/lib/z-push
|
|
||||||
rm -f /tmp/zpush-repo
|
|
||||||
echo installing z-push \(fmbiete fork\)...
|
echo installing z-push \(fmbiete fork\)...
|
||||||
git clone -q https://github.com/fmbiete/Z-Push-contrib /tmp/zpush-repo
|
git_clone https://github.com/fmbiete/Z-Push-contrib $TARGETHASH '' /usr/local/lib/z-push
|
||||||
(cd /tmp/zpush-repo/; git checkout -q $TARGETHASH;)
|
|
||||||
rm -rf /tmp/zpush-repo/.git
|
|
||||||
mv /tmp/zpush-repo /usr/local/lib/z-push
|
|
||||||
rm -f /usr/sbin/z-push-{admin,top}
|
rm -f /usr/sbin/z-push-{admin,top}
|
||||||
ln -s /usr/local/lib/z-push/z-push-admin.php /usr/sbin/z-push-admin
|
ln -s /usr/local/lib/z-push/z-push-admin.php /usr/sbin/z-push-admin
|
||||||
ln -s /usr/local/lib/z-push/z-push-top.php /usr/sbin/z-push-top
|
ln -s /usr/local/lib/z-push/z-push-top.php /usr/sbin/z-push-top
|
||||||
rm -f /tmp/zpush-repo
|
|
||||||
echo $TARGETHASH > /usr/local/lib/z-push/version
|
echo $TARGETHASH > /usr/local/lib/z-push/version
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|||||||
164
tests/tls.py
Normal file
164
tests/tls.py
Normal file
@@ -0,0 +1,164 @@
|
|||||||
|
#!/usr/bin/python3
|
||||||
|
|
||||||
|
# Runs SSLyze on the TLS endpoints of a box and outputs
|
||||||
|
# the results so we can inspect the settings and compare
|
||||||
|
# against a known good version in tls_results.txt.
|
||||||
|
#
|
||||||
|
# Make sure you have SSLyze available:
|
||||||
|
# wget https://github.com/nabla-c0d3/sslyze/releases/download/release-0.11/sslyze-0_11-linux64.zip
|
||||||
|
# unzip sslyze-0_11-linux64.zip
|
||||||
|
#
|
||||||
|
# Then run:
|
||||||
|
#
|
||||||
|
# python3 tls.py yourservername
|
||||||
|
#
|
||||||
|
# If you are on a residential network that blocks outbound
|
||||||
|
# port 25 connections, then you can proxy the connections
|
||||||
|
# through some other host you can ssh into (maybe the box
|
||||||
|
# itself?):
|
||||||
|
#
|
||||||
|
# python3 --proxy user@ssh_host yourservername
|
||||||
|
#
|
||||||
|
# (This will launch "ssh -N -L10023:yourservername:testport user@ssh_host"
|
||||||
|
# to create a tunnel.)
|
||||||
|
|
||||||
|
import sys, subprocess, re, time, json, csv, io, urllib.request
|
||||||
|
|
||||||
|
######################################################################
|
||||||
|
|
||||||
|
# PARSE COMMAND LINE
|
||||||
|
|
||||||
|
proxy = None
|
||||||
|
args = list(sys.argv[1:])
|
||||||
|
while len(args) > 0:
|
||||||
|
if args[0] == "--proxy":
|
||||||
|
args.pop(0)
|
||||||
|
proxy = args.pop(0)
|
||||||
|
break
|
||||||
|
|
||||||
|
if len(args) == 0:
|
||||||
|
print("Usage: python3 tls.py [--proxy ssh_host] hostname")
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
host = args[0]
|
||||||
|
|
||||||
|
######################################################################
|
||||||
|
|
||||||
|
SSLYZE = "sslyze-0_11-linux64/sslyze/sslyze.py"
|
||||||
|
|
||||||
|
common_opts = ["--sslv2", "--sslv3", "--tlsv1", "--tlsv1_1", "--tlsv1_2", "--reneg", "--resum",
|
||||||
|
"--hide_rejected_ciphers", "--compression", "--heartbleed"]
|
||||||
|
|
||||||
|
# Recommendations from Mozilla as of May 20, 2015 at
|
||||||
|
# https://wiki.mozilla.org/Security/Server_Side_TLS.
|
||||||
|
#
|
||||||
|
# The 'modern' ciphers support Firefox 27, Chrome 22, IE 11,
|
||||||
|
# Opera 14, Safari 7, Android 4.4, Java 8. Assumes TLSv1.1,
|
||||||
|
# TLSv1.2 only, though we may also be allowing TLSv3.
|
||||||
|
#
|
||||||
|
# The 'intermediate' ciphers support Firefox 1, Chrome 1, IE 7,
|
||||||
|
# Opera 5, Safari 1, Windows XP IE8, Android 2.3, Java 7.
|
||||||
|
# Assumes TLSv1, TLSv1.1, TLSv1.2.
|
||||||
|
#
|
||||||
|
# The 'old' ciphers bring compatibility back to Win XP IE 6.
|
||||||
|
MOZILLA_CIPHERS_MODERN = "ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!3DES:!MD5:!PSK"
|
||||||
|
MOZILLA_CIPHERS_INTERMEDIATE = "ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA"
|
||||||
|
MOZILLA_CIPHERS_OLD = "ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:ECDHE-RSA-DES-CBC3-SHA:ECDHE-ECDSA-DES-CBC3-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:DES-CBC3-SHA:HIGH:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA"
|
||||||
|
|
||||||
|
######################################################################
|
||||||
|
|
||||||
|
def sslyze(opts, port, ok_ciphers):
|
||||||
|
# Print header.
|
||||||
|
header = ("PORT %d" % port)
|
||||||
|
print(header)
|
||||||
|
print("-" * (len(header)))
|
||||||
|
|
||||||
|
# What ciphers should we expect?
|
||||||
|
ok_ciphers = subprocess.check_output(["openssl", "ciphers", ok_ciphers]).decode("utf8").strip().split(":")
|
||||||
|
|
||||||
|
# Form the SSLyze connection string.
|
||||||
|
connection_string = host + ":" + str(port)
|
||||||
|
|
||||||
|
# Proxy via SSH.
|
||||||
|
proxy_proc = None
|
||||||
|
if proxy:
|
||||||
|
connection_string = "localhost:10023"
|
||||||
|
proxy_proc = subprocess.Popen(["ssh", "-N", "-L10023:%s:%d" % (host, port), proxy])
|
||||||
|
time.sleep(3)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Execute SSLyze.
|
||||||
|
out = subprocess.check_output([SSLYZE] + common_opts + opts + [connection_string])
|
||||||
|
out = out.decode("utf8")
|
||||||
|
|
||||||
|
# Trim output to make better for storing in git.
|
||||||
|
if "SCAN RESULTS FOR" not in out:
|
||||||
|
# Failed. Just output the error.
|
||||||
|
out = re.sub("[\w\W]*CHECKING HOST\(S\) AVAILABILITY\n\s*-+\n", "", out) # chop off header that shows the host we queried
|
||||||
|
out = re.sub("[\w\W]*SCAN RESULTS FOR.*\n\s*-+\n", "", out) # chop off header that shows the host we queried
|
||||||
|
out = re.sub("SCAN COMPLETED IN .*", "", out)
|
||||||
|
out = out.rstrip(" \n-") + "\n"
|
||||||
|
|
||||||
|
# Print.
|
||||||
|
print(out)
|
||||||
|
|
||||||
|
# Pull out the accepted ciphers list for each SSL/TLS protocol
|
||||||
|
# version outputted.
|
||||||
|
accepted_ciphers = set()
|
||||||
|
for ciphers in re.findall(" Accepted:([\w\W]*?)\n *\n", out):
|
||||||
|
accepted_ciphers |= set(re.findall("\n\s*(\S*)", ciphers))
|
||||||
|
|
||||||
|
# Compare to what Mozilla recommends, for a given modernness-level.
|
||||||
|
print(" Should Not Offer: " + (", ".join(sorted(accepted_ciphers-set(ok_ciphers))) or "(none -- good)"))
|
||||||
|
print(" Could Also Offer: " + (", ".join(sorted(set(ok_ciphers)-accepted_ciphers)) or "(none -- good)"))
|
||||||
|
|
||||||
|
# What clients does that mean we support on this protocol?
|
||||||
|
supported_clients = { }
|
||||||
|
for cipher in accepted_ciphers:
|
||||||
|
if cipher in cipher_clients:
|
||||||
|
for client in cipher_clients[cipher]:
|
||||||
|
supported_clients[client] = supported_clients.get(client, 0) + 1
|
||||||
|
print(" Supported Clients: " + (", ".join(sorted(supported_clients.keys(), key = lambda client : -supported_clients[client]))))
|
||||||
|
|
||||||
|
# Blank line.
|
||||||
|
print()
|
||||||
|
|
||||||
|
finally:
|
||||||
|
if proxy_proc:
|
||||||
|
proxy_proc.terminate()
|
||||||
|
try:
|
||||||
|
proxy_proc.wait(5)
|
||||||
|
except TimeoutExpired:
|
||||||
|
proxy_proc.kill()
|
||||||
|
|
||||||
|
# Get a list of OpenSSL cipher names.
|
||||||
|
cipher_names = { }
|
||||||
|
for cipher in csv.DictReader(io.StringIO(urllib.request.urlopen("https://raw.githubusercontent.com/mail-in-a-box/user-agent-tls-capabilities/master/cipher_names.csv").read().decode("utf8"))):
|
||||||
|
# not sure why there are some multi-line values, use first line:
|
||||||
|
cipher["OpenSSL"] = cipher["OpenSSL"].split("\n")[0]
|
||||||
|
cipher_names[cipher["IANA"]] = cipher["OpenSSL"]
|
||||||
|
|
||||||
|
# Get a list of what clients support what ciphers, using OpenSSL cipher names.
|
||||||
|
client_compatibility = json.loads(urllib.request.urlopen("https://raw.githubusercontent.com/mail-in-a-box/user-agent-tls-capabilities/master/clients.json").read().decode("utf8"))
|
||||||
|
cipher_clients = { }
|
||||||
|
for client in client_compatibility:
|
||||||
|
if len(set(client['protocols']) & set(["TLS 1.0", "TLS 1.1", "TLS 1.2"])) == 0: continue # does not support TLS
|
||||||
|
for cipher in client['ciphers']:
|
||||||
|
cipher_clients.setdefault(cipher_names.get(cipher), set()).add("/".join(x for x in [client['client']['name'], client['client']['version'], client['client']['platform']] if x))
|
||||||
|
|
||||||
|
# Run SSLyze on various ports.
|
||||||
|
|
||||||
|
# SMTP
|
||||||
|
sslyze(["--starttls=smtp"], 25, MOZILLA_CIPHERS_OLD)
|
||||||
|
|
||||||
|
# SMTP Submission
|
||||||
|
sslyze(["--starttls=smtp"], 587, MOZILLA_CIPHERS_MODERN)
|
||||||
|
|
||||||
|
# HTTPS
|
||||||
|
sslyze(["--http_get", "--chrome_sha1", "--hsts"], 443, MOZILLA_CIPHERS_INTERMEDIATE)
|
||||||
|
|
||||||
|
# IMAP
|
||||||
|
sslyze([], 993, MOZILLA_CIPHERS_MODERN)
|
||||||
|
|
||||||
|
# POP3
|
||||||
|
sslyze([], 995, MOZILLA_CIPHERS_MODERN)
|
||||||
431
tests/tls_results.txt
Normal file
431
tests/tls_results.txt
Normal file
@@ -0,0 +1,431 @@
|
|||||||
|
PORT 25
|
||||||
|
-------
|
||||||
|
|
||||||
|
* Deflate Compression:
|
||||||
|
OK - Compression disabled
|
||||||
|
|
||||||
|
* Session Renegotiation:
|
||||||
|
Client-initiated Renegotiations: VULNERABLE - Server honors client-initiated renegotiations
|
||||||
|
Secure Renegotiation: OK - Supported
|
||||||
|
|
||||||
|
* OpenSSL Heartbleed:
|
||||||
|
OK - Not vulnerable to Heartbleed
|
||||||
|
|
||||||
|
* Session Resumption:
|
||||||
|
With Session IDs: OK - Supported (5 successful, 0 failed, 0 errors, 5 total attempts).
|
||||||
|
With TLS Session Tickets: NOT SUPPORTED - TLS ticket not assigned.
|
||||||
|
|
||||||
|
* SSLV2 Cipher Suites:
|
||||||
|
Server rejected all cipher suites.
|
||||||
|
|
||||||
|
* TLSV1_2 Cipher Suites:
|
||||||
|
Preferred:
|
||||||
|
ECDHE-RSA-AES256-GCM-SHA384 ECDH-256 bits 256 bits 250 2.0.0 Ok
|
||||||
|
Accepted:
|
||||||
|
ECDHE-RSA-AES256-SHA384 ECDH-256 bits 256 bits 250 2.0.0 Ok
|
||||||
|
ECDHE-RSA-AES256-SHA ECDH-256 bits 256 bits 250 2.0.0 Ok
|
||||||
|
ECDHE-RSA-AES256-GCM-SHA384 ECDH-256 bits 256 bits 250 2.0.0 Ok
|
||||||
|
DHE-RSA-CAMELLIA256-SHA DH-2048 bits 256 bits 250 2.0.0 Ok
|
||||||
|
DHE-RSA-AES256-SHA256 DH-2048 bits 256 bits 250 2.0.0 Ok
|
||||||
|
DHE-RSA-AES256-SHA DH-2048 bits 256 bits 250 2.0.0 Ok
|
||||||
|
DHE-RSA-AES256-GCM-SHA384 DH-2048 bits 256 bits 250 2.0.0 Ok
|
||||||
|
CAMELLIA256-SHA - 256 bits 250 2.0.0 Ok
|
||||||
|
AES256-SHA256 - 256 bits 250 2.0.0 Ok
|
||||||
|
AES256-SHA - 256 bits 250 2.0.0 Ok
|
||||||
|
AES256-GCM-SHA384 - 256 bits 250 2.0.0 Ok
|
||||||
|
ECDHE-RSA-RC4-SHA ECDH-256 bits 128 bits 250 2.0.0 Ok
|
||||||
|
ECDHE-RSA-AES128-SHA256 ECDH-256 bits 128 bits 250 2.0.0 Ok
|
||||||
|
ECDHE-RSA-AES128-SHA ECDH-256 bits 128 bits 250 2.0.0 Ok
|
||||||
|
ECDHE-RSA-AES128-GCM-SHA256 ECDH-256 bits 128 bits 250 2.0.0 Ok
|
||||||
|
DHE-RSA-SEED-SHA DH-2048 bits 128 bits 250 2.0.0 Ok
|
||||||
|
DHE-RSA-CAMELLIA128-SHA DH-2048 bits 128 bits 250 2.0.0 Ok
|
||||||
|
DHE-RSA-AES128-SHA256 DH-2048 bits 128 bits 250 2.0.0 Ok
|
||||||
|
DHE-RSA-AES128-SHA DH-2048 bits 128 bits 250 2.0.0 Ok
|
||||||
|
DHE-RSA-AES128-GCM-SHA256 DH-2048 bits 128 bits 250 2.0.0 Ok
|
||||||
|
SEED-SHA - 128 bits 250 2.0.0 Ok
|
||||||
|
RC4-SHA - 128 bits 250 2.0.0 Ok
|
||||||
|
RC4-MD5 - 128 bits 250 2.0.0 Ok
|
||||||
|
CAMELLIA128-SHA - 128 bits 250 2.0.0 Ok
|
||||||
|
AES128-SHA256 - 128 bits 250 2.0.0 Ok
|
||||||
|
AES128-SHA - 128 bits 250 2.0.0 Ok
|
||||||
|
AES128-GCM-SHA256 - 128 bits 250 2.0.0 Ok
|
||||||
|
ECDHE-RSA-DES-CBC3-SHA ECDH-256 bits 112 bits 250 2.0.0 Ok
|
||||||
|
EDH-RSA-DES-CBC3-SHA DH-2048 bits 112 bits 250 2.0.0 Ok
|
||||||
|
DES-CBC3-SHA - 112 bits 250 2.0.0 Ok
|
||||||
|
|
||||||
|
* TLSV1_1 Cipher Suites:
|
||||||
|
Preferred:
|
||||||
|
ECDHE-RSA-AES256-SHA ECDH-256 bits 256 bits 250 2.0.0 Ok
|
||||||
|
Accepted:
|
||||||
|
ECDHE-RSA-AES256-SHA ECDH-256 bits 256 bits 250 2.0.0 Ok
|
||||||
|
DHE-RSA-CAMELLIA256-SHA DH-2048 bits 256 bits 250 2.0.0 Ok
|
||||||
|
DHE-RSA-AES256-SHA DH-2048 bits 256 bits 250 2.0.0 Ok
|
||||||
|
CAMELLIA256-SHA - 256 bits 250 2.0.0 Ok
|
||||||
|
AES256-SHA - 256 bits 250 2.0.0 Ok
|
||||||
|
ECDHE-RSA-RC4-SHA ECDH-256 bits 128 bits 250 2.0.0 Ok
|
||||||
|
ECDHE-RSA-AES128-SHA ECDH-256 bits 128 bits 250 2.0.0 Ok
|
||||||
|
DHE-RSA-SEED-SHA DH-2048 bits 128 bits 250 2.0.0 Ok
|
||||||
|
DHE-RSA-CAMELLIA128-SHA DH-2048 bits 128 bits 250 2.0.0 Ok
|
||||||
|
DHE-RSA-AES128-SHA DH-2048 bits 128 bits 250 2.0.0 Ok
|
||||||
|
SEED-SHA - 128 bits 250 2.0.0 Ok
|
||||||
|
RC4-SHA - 128 bits 250 2.0.0 Ok
|
||||||
|
RC4-MD5 - 128 bits 250 2.0.0 Ok
|
||||||
|
CAMELLIA128-SHA - 128 bits 250 2.0.0 Ok
|
||||||
|
AES128-SHA - 128 bits 250 2.0.0 Ok
|
||||||
|
ECDHE-RSA-DES-CBC3-SHA ECDH-256 bits 112 bits 250 2.0.0 Ok
|
||||||
|
EDH-RSA-DES-CBC3-SHA DH-2048 bits 112 bits 250 2.0.0 Ok
|
||||||
|
DES-CBC3-SHA - 112 bits 250 2.0.0 Ok
|
||||||
|
|
||||||
|
* SSLV3 Cipher Suites:
|
||||||
|
Preferred:
|
||||||
|
ECDHE-RSA-AES256-SHA ECDH-256 bits 256 bits 250 2.0.0 Ok
|
||||||
|
Accepted:
|
||||||
|
ECDHE-RSA-AES256-SHA ECDH-256 bits 256 bits 250 2.0.0 Ok
|
||||||
|
DHE-RSA-CAMELLIA256-SHA DH-2048 bits 256 bits 250 2.0.0 Ok
|
||||||
|
DHE-RSA-AES256-SHA DH-2048 bits 256 bits 250 2.0.0 Ok
|
||||||
|
CAMELLIA256-SHA - 256 bits 250 2.0.0 Ok
|
||||||
|
AES256-SHA - 256 bits 250 2.0.0 Ok
|
||||||
|
ECDHE-RSA-RC4-SHA ECDH-256 bits 128 bits 250 2.0.0 Ok
|
||||||
|
ECDHE-RSA-AES128-SHA ECDH-256 bits 128 bits 250 2.0.0 Ok
|
||||||
|
DHE-RSA-SEED-SHA DH-2048 bits 128 bits 250 2.0.0 Ok
|
||||||
|
DHE-RSA-CAMELLIA128-SHA DH-2048 bits 128 bits 250 2.0.0 Ok
|
||||||
|
DHE-RSA-AES128-SHA DH-2048 bits 128 bits 250 2.0.0 Ok
|
||||||
|
SEED-SHA - 128 bits 250 2.0.0 Ok
|
||||||
|
RC4-SHA - 128 bits 250 2.0.0 Ok
|
||||||
|
RC4-MD5 - 128 bits 250 2.0.0 Ok
|
||||||
|
CAMELLIA128-SHA - 128 bits 250 2.0.0 Ok
|
||||||
|
AES128-SHA - 128 bits 250 2.0.0 Ok
|
||||||
|
ECDHE-RSA-DES-CBC3-SHA ECDH-256 bits 112 bits 250 2.0.0 Ok
|
||||||
|
EDH-RSA-DES-CBC3-SHA DH-2048 bits 112 bits 250 2.0.0 Ok
|
||||||
|
DES-CBC3-SHA - 112 bits 250 2.0.0 Ok
|
||||||
|
|
||||||
|
* TLSV1 Cipher Suites:
|
||||||
|
Preferred:
|
||||||
|
ECDHE-RSA-AES256-SHA ECDH-256 bits 256 bits 250 2.0.0 Ok
|
||||||
|
Accepted:
|
||||||
|
ECDHE-RSA-AES256-SHA ECDH-256 bits 256 bits 250 2.0.0 Ok
|
||||||
|
DHE-RSA-CAMELLIA256-SHA DH-2048 bits 256 bits 250 2.0.0 Ok
|
||||||
|
DHE-RSA-AES256-SHA DH-2048 bits 256 bits 250 2.0.0 Ok
|
||||||
|
CAMELLIA256-SHA - 256 bits 250 2.0.0 Ok
|
||||||
|
AES256-SHA - 256 bits 250 2.0.0 Ok
|
||||||
|
ECDHE-RSA-RC4-SHA ECDH-256 bits 128 bits 250 2.0.0 Ok
|
||||||
|
ECDHE-RSA-AES128-SHA ECDH-256 bits 128 bits 250 2.0.0 Ok
|
||||||
|
DHE-RSA-SEED-SHA DH-2048 bits 128 bits 250 2.0.0 Ok
|
||||||
|
DHE-RSA-CAMELLIA128-SHA DH-2048 bits 128 bits 250 2.0.0 Ok
|
||||||
|
DHE-RSA-AES128-SHA DH-2048 bits 128 bits 250 2.0.0 Ok
|
||||||
|
SEED-SHA - 128 bits 250 2.0.0 Ok
|
||||||
|
RC4-SHA - 128 bits 250 2.0.0 Ok
|
||||||
|
RC4-MD5 - 128 bits 250 2.0.0 Ok
|
||||||
|
CAMELLIA128-SHA - 128 bits 250 2.0.0 Ok
|
||||||
|
AES128-SHA - 128 bits 250 2.0.0 Ok
|
||||||
|
ECDHE-RSA-DES-CBC3-SHA ECDH-256 bits 112 bits 250 2.0.0 Ok
|
||||||
|
EDH-RSA-DES-CBC3-SHA DH-2048 bits 112 bits 250 2.0.0 Ok
|
||||||
|
DES-CBC3-SHA - 112 bits 250 2.0.0 Ok
|
||||||
|
|
||||||
|
Should Not Offer: DHE-RSA-SEED-SHA, ECDHE-RSA-RC4-SHA, EDH-RSA-DES-CBC3-SHA, RC4-MD5, RC4-SHA, SEED-SHA
|
||||||
|
Could Also Offer: DHE-DSS-AES128-GCM-SHA256, DHE-DSS-AES128-SHA, DHE-DSS-AES128-SHA256, DHE-DSS-AES256-GCM-SHA384, DHE-DSS-AES256-SHA, DHE-DSS-AES256-SHA256, DHE-DSS-CAMELLIA128-SHA, DHE-DSS-CAMELLIA256-SHA, ECDHE-ECDSA-AES128-GCM-SHA256, ECDHE-ECDSA-AES128-SHA, ECDHE-ECDSA-AES128-SHA256, ECDHE-ECDSA-AES256-GCM-SHA384, ECDHE-ECDSA-AES256-SHA, ECDHE-ECDSA-AES256-SHA384, ECDHE-ECDSA-DES-CBC3-SHA, SRP-3DES-EDE-CBC-SHA, SRP-AES-128-CBC-SHA, SRP-AES-256-CBC-SHA, SRP-DSS-3DES-EDE-CBC-SHA, SRP-DSS-AES-128-CBC-SHA, SRP-DSS-AES-256-CBC-SHA, SRP-RSA-3DES-EDE-CBC-SHA, SRP-RSA-AES-128-CBC-SHA, SRP-RSA-AES-256-CBC-SHA
|
||||||
|
Supported Clients: OpenSSL/1.0.2, Yahoo Slurp/Jan 2015, BingPreview/Jan 2015, OpenSSL/1.0.1l, YandexBot/Jan 2015, Android/4.4.2, Safari/8/iOS 8.1.2, Safari/7/OS X 10.9, Safari/8/OS X 10.10, Safari/7/iOS 7.1, Safari/6/iOS 6.0.1, Baidu/Jan 2015, Firefox/31.3.0 ESR/Win 7, Android/5.0.0, IE/11/Win 7, Java/8u31, Googlebot/Feb 2015, Chrome/42/OS X, IE Mobile/11/Win Phone 8.1, IE/11/Win 8.1, Android/4.0.4, Android/4.1.1, Safari/6.0.4/OS X 10.8.4, Android/4.3, Android/4.2.2, Safari/5.1.9/OS X 10.6.8, Java/7u25, OpenSSL/0.9.8y, Firefox/37/OS X, IE/7/Vista, IE/8-10/Win 7, IE Mobile/10/Win Phone 8.0, Java/6u45, Android/2.3.7, IE/8/XP
|
||||||
|
|
||||||
|
PORT 587
|
||||||
|
--------
|
||||||
|
|
||||||
|
* Deflate Compression:
|
||||||
|
OK - Compression disabled
|
||||||
|
|
||||||
|
* Session Renegotiation:
|
||||||
|
Client-initiated Renegotiations: VULNERABLE - Server honors client-initiated renegotiations
|
||||||
|
Secure Renegotiation: OK - Supported
|
||||||
|
|
||||||
|
* OpenSSL Heartbleed:
|
||||||
|
OK - Not vulnerable to Heartbleed
|
||||||
|
|
||||||
|
* Session Resumption:
|
||||||
|
With Session IDs: OK - Supported (5 successful, 0 failed, 0 errors, 5 total attempts).
|
||||||
|
With TLS Session Tickets: NOT SUPPORTED - TLS ticket not assigned.
|
||||||
|
|
||||||
|
* SSLV2 Cipher Suites:
|
||||||
|
Server rejected all cipher suites.
|
||||||
|
|
||||||
|
* TLSV1_2 Cipher Suites:
|
||||||
|
Preferred:
|
||||||
|
ECDHE-RSA-AES256-GCM-SHA384 ECDH-256 bits 256 bits 250 2.0.0 Ok
|
||||||
|
Accepted:
|
||||||
|
ECDHE-RSA-AES256-SHA384 ECDH-256 bits 256 bits 250 2.0.0 Ok
|
||||||
|
ECDHE-RSA-AES256-SHA ECDH-256 bits 256 bits 250 2.0.0 Ok
|
||||||
|
ECDHE-RSA-AES256-GCM-SHA384 ECDH-256 bits 256 bits 250 2.0.0 Ok
|
||||||
|
DHE-RSA-CAMELLIA256-SHA DH-2048 bits 256 bits 250 2.0.0 Ok
|
||||||
|
DHE-RSA-AES256-SHA256 DH-2048 bits 256 bits 250 2.0.0 Ok
|
||||||
|
DHE-RSA-AES256-SHA DH-2048 bits 256 bits 250 2.0.0 Ok
|
||||||
|
DHE-RSA-AES256-GCM-SHA384 DH-2048 bits 256 bits 250 2.0.0 Ok
|
||||||
|
CAMELLIA256-SHA - 256 bits 250 2.0.0 Ok
|
||||||
|
AES256-SHA256 - 256 bits 250 2.0.0 Ok
|
||||||
|
AES256-SHA - 256 bits 250 2.0.0 Ok
|
||||||
|
AES256-GCM-SHA384 - 256 bits 250 2.0.0 Ok
|
||||||
|
ECDHE-RSA-AES128-SHA256 ECDH-256 bits 128 bits 250 2.0.0 Ok
|
||||||
|
ECDHE-RSA-AES128-SHA ECDH-256 bits 128 bits 250 2.0.0 Ok
|
||||||
|
ECDHE-RSA-AES128-GCM-SHA256 ECDH-256 bits 128 bits 250 2.0.0 Ok
|
||||||
|
DHE-RSA-SEED-SHA DH-2048 bits 128 bits 250 2.0.0 Ok
|
||||||
|
DHE-RSA-CAMELLIA128-SHA DH-2048 bits 128 bits 250 2.0.0 Ok
|
||||||
|
DHE-RSA-AES128-SHA256 DH-2048 bits 128 bits 250 2.0.0 Ok
|
||||||
|
DHE-RSA-AES128-SHA DH-2048 bits 128 bits 250 2.0.0 Ok
|
||||||
|
DHE-RSA-AES128-GCM-SHA256 DH-2048 bits 128 bits 250 2.0.0 Ok
|
||||||
|
SEED-SHA - 128 bits 250 2.0.0 Ok
|
||||||
|
CAMELLIA128-SHA - 128 bits 250 2.0.0 Ok
|
||||||
|
AES128-SHA256 - 128 bits 250 2.0.0 Ok
|
||||||
|
AES128-SHA - 128 bits 250 2.0.0 Ok
|
||||||
|
AES128-GCM-SHA256 - 128 bits 250 2.0.0 Ok
|
||||||
|
|
||||||
|
* TLSV1_1 Cipher Suites:
|
||||||
|
Preferred:
|
||||||
|
ECDHE-RSA-AES256-SHA ECDH-256 bits 256 bits 250 2.0.0 Ok
|
||||||
|
Accepted:
|
||||||
|
ECDHE-RSA-AES256-SHA ECDH-256 bits 256 bits 250 2.0.0 Ok
|
||||||
|
DHE-RSA-CAMELLIA256-SHA DH-2048 bits 256 bits 250 2.0.0 Ok
|
||||||
|
DHE-RSA-AES256-SHA DH-2048 bits 256 bits 250 2.0.0 Ok
|
||||||
|
CAMELLIA256-SHA - 256 bits 250 2.0.0 Ok
|
||||||
|
AES256-SHA - 256 bits 250 2.0.0 Ok
|
||||||
|
ECDHE-RSA-AES128-SHA ECDH-256 bits 128 bits 250 2.0.0 Ok
|
||||||
|
DHE-RSA-SEED-SHA DH-2048 bits 128 bits 250 2.0.0 Ok
|
||||||
|
DHE-RSA-CAMELLIA128-SHA DH-2048 bits 128 bits 250 2.0.0 Ok
|
||||||
|
DHE-RSA-AES128-SHA DH-2048 bits 128 bits 250 2.0.0 Ok
|
||||||
|
SEED-SHA - 128 bits 250 2.0.0 Ok
|
||||||
|
CAMELLIA128-SHA - 128 bits 250 2.0.0 Ok
|
||||||
|
AES128-SHA - 128 bits 250 2.0.0 Ok
|
||||||
|
|
||||||
|
* SSLV3 Cipher Suites:
|
||||||
|
Server rejected all cipher suites.
|
||||||
|
|
||||||
|
* TLSV1 Cipher Suites:
|
||||||
|
Preferred:
|
||||||
|
ECDHE-RSA-AES256-SHA ECDH-256 bits 256 bits 250 2.0.0 Ok
|
||||||
|
Accepted:
|
||||||
|
ECDHE-RSA-AES256-SHA ECDH-256 bits 256 bits 250 2.0.0 Ok
|
||||||
|
DHE-RSA-CAMELLIA256-SHA DH-2048 bits 256 bits 250 2.0.0 Ok
|
||||||
|
DHE-RSA-AES256-SHA DH-2048 bits 256 bits 250 2.0.0 Ok
|
||||||
|
CAMELLIA256-SHA - 256 bits 250 2.0.0 Ok
|
||||||
|
AES256-SHA - 256 bits 250 2.0.0 Ok
|
||||||
|
ECDHE-RSA-AES128-SHA ECDH-256 bits 128 bits 250 2.0.0 Ok
|
||||||
|
DHE-RSA-SEED-SHA DH-2048 bits 128 bits 250 2.0.0 Ok
|
||||||
|
DHE-RSA-CAMELLIA128-SHA DH-2048 bits 128 bits 250 2.0.0 Ok
|
||||||
|
DHE-RSA-AES128-SHA DH-2048 bits 128 bits 250 2.0.0 Ok
|
||||||
|
SEED-SHA - 128 bits 250 2.0.0 Ok
|
||||||
|
CAMELLIA128-SHA - 128 bits 250 2.0.0 Ok
|
||||||
|
AES128-SHA - 128 bits 250 2.0.0 Ok
|
||||||
|
|
||||||
|
Should Not Offer: AES128-GCM-SHA256, AES128-SHA, AES128-SHA256, AES256-GCM-SHA384, AES256-SHA, AES256-SHA256, CAMELLIA128-SHA, CAMELLIA256-SHA, DHE-RSA-CAMELLIA128-SHA, DHE-RSA-CAMELLIA256-SHA, DHE-RSA-SEED-SHA, SEED-SHA
|
||||||
|
Could Also Offer: DHE-DSS-AES128-GCM-SHA256, DHE-DSS-AES128-SHA256, DHE-DSS-AES256-GCM-SHA384, DHE-DSS-AES256-SHA, ECDHE-ECDSA-AES128-GCM-SHA256, ECDHE-ECDSA-AES128-SHA, ECDHE-ECDSA-AES128-SHA256, ECDHE-ECDSA-AES256-GCM-SHA384, ECDHE-ECDSA-AES256-SHA, ECDHE-ECDSA-AES256-SHA384
|
||||||
|
Supported Clients: OpenSSL/1.0.2, Yahoo Slurp/Jan 2015, BingPreview/Jan 2015, OpenSSL/1.0.1l, YandexBot/Jan 2015, Android/4.4.2, Safari/8/iOS 8.1.2, Safari/7/OS X 10.9, Safari/8/OS X 10.10, Safari/7/iOS 7.1, IE Mobile/11/Win Phone 8.1, IE/11/Win 8.1, IE/11/Win 7, Safari/6/iOS 6.0.1, Firefox/31.3.0 ESR/Win 7, Baidu/Jan 2015, Android/5.0.0, Chrome/42/OS X, Java/8u31, Googlebot/Feb 2015, Firefox/37/OS X, Android/4.0.4, Android/4.1.1, Safari/6.0.4/OS X 10.8.4, Android/4.3, Android/4.2.2, Safari/5.1.9/OS X 10.6.8, OpenSSL/0.9.8y, IE/7/Vista, IE/8-10/Win 7, IE Mobile/10/Win Phone 8.0, Java/7u25, Java/6u45, Android/2.3.7
|
||||||
|
|
||||||
|
PORT 443
|
||||||
|
--------
|
||||||
|
|
||||||
|
* Deflate Compression:
|
||||||
|
OK - Compression disabled
|
||||||
|
|
||||||
|
* Session Renegotiation:
|
||||||
|
Client-initiated Renegotiations: OK - Rejected
|
||||||
|
Secure Renegotiation: OK - Supported
|
||||||
|
|
||||||
|
* HTTP Strict Transport Security:
|
||||||
|
OK - HSTS header received: max-age=31536000
|
||||||
|
|
||||||
|
* Session Resumption:
|
||||||
|
With Session IDs: OK - Supported (5 successful, 0 failed, 0 errors, 5 total attempts).
|
||||||
|
With TLS Session Tickets: OK - Supported
|
||||||
|
|
||||||
|
* OpenSSL Heartbleed:
|
||||||
|
OK - Not vulnerable to Heartbleed
|
||||||
|
|
||||||
|
* SSLV2 Cipher Suites:
|
||||||
|
Server rejected all cipher suites.
|
||||||
|
|
||||||
|
* Google Chrome SHA-1 Deprecation Status:
|
||||||
|
OK - Leaf certificate expires before 2016.
|
||||||
|
|
||||||
|
* TLSV1_2 Cipher Suites:
|
||||||
|
Preferred:
|
||||||
|
ECDHE-RSA-AES128-GCM-SHA256 ECDH-256 bits 128 bits HTTP 200 OK
|
||||||
|
Accepted:
|
||||||
|
ECDHE-RSA-AES256-SHA384 ECDH-256 bits 256 bits HTTP 200 OK
|
||||||
|
ECDHE-RSA-AES256-SHA ECDH-256 bits 256 bits HTTP 200 OK
|
||||||
|
ECDHE-RSA-AES256-GCM-SHA384 ECDH-256 bits 256 bits HTTP 200 OK
|
||||||
|
DHE-RSA-AES256-SHA256 DH-2048 bits 256 bits HTTP 200 OK
|
||||||
|
DHE-RSA-AES256-SHA DH-2048 bits 256 bits HTTP 200 OK
|
||||||
|
DHE-RSA-AES256-GCM-SHA384 DH-2048 bits 256 bits HTTP 200 OK
|
||||||
|
ECDHE-RSA-AES128-SHA256 ECDH-256 bits 128 bits HTTP 200 OK
|
||||||
|
ECDHE-RSA-AES128-SHA ECDH-256 bits 128 bits HTTP 200 OK
|
||||||
|
ECDHE-RSA-AES128-GCM-SHA256 ECDH-256 bits 128 bits HTTP 200 OK
|
||||||
|
DHE-RSA-AES128-SHA256 DH-2048 bits 128 bits HTTP 200 OK
|
||||||
|
DHE-RSA-AES128-SHA DH-2048 bits 128 bits HTTP 200 OK
|
||||||
|
DHE-RSA-AES128-GCM-SHA256 DH-2048 bits 128 bits HTTP 200 OK
|
||||||
|
DES-CBC3-SHA - 112 bits HTTP 200 OK
|
||||||
|
|
||||||
|
* TLSV1_1 Cipher Suites:
|
||||||
|
Preferred:
|
||||||
|
ECDHE-RSA-AES128-SHA ECDH-256 bits 128 bits HTTP 200 OK
|
||||||
|
Accepted:
|
||||||
|
ECDHE-RSA-AES256-SHA ECDH-256 bits 256 bits HTTP 200 OK
|
||||||
|
DHE-RSA-AES256-SHA DH-2048 bits 256 bits HTTP 200 OK
|
||||||
|
ECDHE-RSA-AES128-SHA ECDH-256 bits 128 bits HTTP 200 OK
|
||||||
|
DHE-RSA-AES128-SHA DH-2048 bits 128 bits HTTP 200 OK
|
||||||
|
DES-CBC3-SHA - 112 bits HTTP 200 OK
|
||||||
|
|
||||||
|
* SSLV3 Cipher Suites:
|
||||||
|
Server rejected all cipher suites.
|
||||||
|
|
||||||
|
* TLSV1 Cipher Suites:
|
||||||
|
Preferred:
|
||||||
|
ECDHE-RSA-AES128-SHA ECDH-256 bits 128 bits HTTP 200 OK
|
||||||
|
Accepted:
|
||||||
|
ECDHE-RSA-AES256-SHA ECDH-256 bits 256 bits HTTP 200 OK
|
||||||
|
DHE-RSA-AES256-SHA DH-2048 bits 256 bits HTTP 200 OK
|
||||||
|
ECDHE-RSA-AES128-SHA ECDH-256 bits 128 bits HTTP 200 OK
|
||||||
|
DHE-RSA-AES128-SHA DH-2048 bits 128 bits HTTP 200 OK
|
||||||
|
DES-CBC3-SHA - 112 bits HTTP 200 OK
|
||||||
|
|
||||||
|
Should Not Offer: (none -- good)
|
||||||
|
Could Also Offer: AES128-GCM-SHA256, AES128-SHA, AES128-SHA256, AES256-GCM-SHA384, AES256-SHA, AES256-SHA256, CAMELLIA128-SHA, CAMELLIA256-SHA, DHE-DSS-AES128-GCM-SHA256, DHE-DSS-AES128-SHA, DHE-DSS-AES128-SHA256, DHE-DSS-AES256-GCM-SHA384, DHE-DSS-AES256-SHA, DHE-DSS-AES256-SHA256, DHE-DSS-CAMELLIA128-SHA, DHE-DSS-CAMELLIA256-SHA, DHE-RSA-CAMELLIA128-SHA, DHE-RSA-CAMELLIA256-SHA, ECDHE-ECDSA-AES128-GCM-SHA256, ECDHE-ECDSA-AES128-SHA, ECDHE-ECDSA-AES128-SHA256, ECDHE-ECDSA-AES256-GCM-SHA384, ECDHE-ECDSA-AES256-SHA, ECDHE-ECDSA-AES256-SHA384, SRP-AES-128-CBC-SHA, SRP-AES-256-CBC-SHA, SRP-DSS-AES-128-CBC-SHA, SRP-DSS-AES-256-CBC-SHA, SRP-RSA-AES-128-CBC-SHA, SRP-RSA-AES-256-CBC-SHA
|
||||||
|
Supported Clients: YandexBot/Jan 2015, OpenSSL/1.0.2, Yahoo Slurp/Jan 2015, BingPreview/Jan 2015, OpenSSL/1.0.1l, Android/4.4.2, Safari/8/iOS 8.1.2, Safari/8/OS X 10.10, Safari/7/OS X 10.9, Safari/7/iOS 7.1, Safari/6/iOS 6.0.1, Android/5.0.0, Chrome/42/OS X, IE/11/Win 8.1, IE/11/Win 7, Java/8u31, IE Mobile/11/Win Phone 8.1, Googlebot/Feb 2015, Firefox/37/OS X, Firefox/31.3.0 ESR/Win 7, Android/4.2.2, Android/4.0.4, Baidu/Jan 2015, Safari/5.1.9/OS X 10.6.8, Android/4.1.1, Safari/6.0.4/OS X 10.8.4, Android/4.3, OpenSSL/0.9.8y, IE/7/Vista, IE/8-10/Win 7, IE Mobile/10/Win Phone 8.0, Java/7u25, Java/6u45, Android/2.3.7, IE/8/XP
|
||||||
|
|
||||||
|
PORT 993
|
||||||
|
--------
|
||||||
|
|
||||||
|
* Deflate Compression:
|
||||||
|
OK - Compression disabled
|
||||||
|
|
||||||
|
Unhandled exception when processing --reneg:
|
||||||
|
_nassl.OpenSSLError - error:140940F5:SSL routines:ssl3_read_bytes:unexpected record
|
||||||
|
|
||||||
|
* OpenSSL Heartbleed:
|
||||||
|
OK - Not vulnerable to Heartbleed
|
||||||
|
|
||||||
|
* SSLV2 Cipher Suites:
|
||||||
|
Server rejected all cipher suites.
|
||||||
|
|
||||||
|
* Session Resumption:
|
||||||
|
With Session IDs: NOT SUPPORTED (0 successful, 5 failed, 0 errors, 5 total attempts).
|
||||||
|
With TLS Session Tickets: NOT SUPPORTED - TLS ticket assigned but not accepted.
|
||||||
|
|
||||||
|
* TLSV1_2 Cipher Suites:
|
||||||
|
Preferred:
|
||||||
|
ECDHE-RSA-AES256-SHA ECDH-384 bits 256 bits
|
||||||
|
Accepted:
|
||||||
|
ECDHE-RSA-AES256-SHA ECDH-384 bits 256 bits
|
||||||
|
DHE-RSA-CAMELLIA256-SHA DH-1024 bits 256 bits
|
||||||
|
DHE-RSA-AES256-SHA DH-1024 bits 256 bits
|
||||||
|
CAMELLIA256-SHA - 256 bits
|
||||||
|
AES256-SHA - 256 bits
|
||||||
|
ECDHE-RSA-AES128-SHA ECDH-384 bits 128 bits
|
||||||
|
DHE-RSA-CAMELLIA128-SHA DH-1024 bits 128 bits
|
||||||
|
DHE-RSA-AES128-SHA DH-1024 bits 128 bits
|
||||||
|
CAMELLIA128-SHA - 128 bits
|
||||||
|
AES128-SHA - 128 bits
|
||||||
|
|
||||||
|
* TLSV1_1 Cipher Suites:
|
||||||
|
Preferred:
|
||||||
|
ECDHE-RSA-AES256-SHA ECDH-384 bits 256 bits
|
||||||
|
Accepted:
|
||||||
|
ECDHE-RSA-AES256-SHA ECDH-384 bits 256 bits
|
||||||
|
DHE-RSA-CAMELLIA256-SHA DH-1024 bits 256 bits
|
||||||
|
DHE-RSA-AES256-SHA DH-1024 bits 256 bits
|
||||||
|
CAMELLIA256-SHA - 256 bits
|
||||||
|
AES256-SHA - 256 bits
|
||||||
|
ECDHE-RSA-AES128-SHA ECDH-384 bits 128 bits
|
||||||
|
DHE-RSA-CAMELLIA128-SHA DH-1024 bits 128 bits
|
||||||
|
DHE-RSA-AES128-SHA DH-1024 bits 128 bits
|
||||||
|
CAMELLIA128-SHA - 128 bits
|
||||||
|
AES128-SHA - 128 bits
|
||||||
|
|
||||||
|
* SSLV3 Cipher Suites:
|
||||||
|
Server rejected all cipher suites.
|
||||||
|
|
||||||
|
* TLSV1 Cipher Suites:
|
||||||
|
Preferred:
|
||||||
|
ECDHE-RSA-AES256-SHA ECDH-384 bits 256 bits
|
||||||
|
Accepted:
|
||||||
|
ECDHE-RSA-AES256-SHA ECDH-384 bits 256 bits
|
||||||
|
DHE-RSA-CAMELLIA256-SHA DH-1024 bits 256 bits
|
||||||
|
DHE-RSA-AES256-SHA DH-1024 bits 256 bits
|
||||||
|
CAMELLIA256-SHA - 256 bits
|
||||||
|
AES256-SHA - 256 bits
|
||||||
|
ECDHE-RSA-AES128-SHA ECDH-384 bits 128 bits
|
||||||
|
DHE-RSA-CAMELLIA128-SHA DH-1024 bits 128 bits
|
||||||
|
DHE-RSA-AES128-SHA DH-1024 bits 128 bits
|
||||||
|
CAMELLIA128-SHA - 128 bits
|
||||||
|
AES128-SHA - 128 bits
|
||||||
|
|
||||||
|
Should Not Offer: AES128-SHA, AES256-SHA, CAMELLIA128-SHA, CAMELLIA256-SHA, DHE-RSA-CAMELLIA128-SHA, DHE-RSA-CAMELLIA256-SHA
|
||||||
|
Could Also Offer: DHE-DSS-AES128-GCM-SHA256, DHE-DSS-AES128-SHA256, DHE-DSS-AES256-GCM-SHA384, DHE-DSS-AES256-SHA, DHE-RSA-AES128-GCM-SHA256, DHE-RSA-AES128-SHA256, DHE-RSA-AES256-GCM-SHA384, DHE-RSA-AES256-SHA256, ECDHE-ECDSA-AES128-GCM-SHA256, ECDHE-ECDSA-AES128-SHA, ECDHE-ECDSA-AES128-SHA256, ECDHE-ECDSA-AES256-GCM-SHA384, ECDHE-ECDSA-AES256-SHA, ECDHE-ECDSA-AES256-SHA384, ECDHE-RSA-AES128-GCM-SHA256, ECDHE-RSA-AES128-SHA256, ECDHE-RSA-AES256-GCM-SHA384, ECDHE-RSA-AES256-SHA384
|
||||||
|
Supported Clients: OpenSSL/1.0.2, Baidu/Jan 2015, Yahoo Slurp/Jan 2015, BingPreview/Jan 2015, OpenSSL/1.0.1l, Firefox/31.3.0 ESR/Win 7, Googlebot/Feb 2015, Android/4.2.2, Android/5.0.0, Android/4.0.4, Safari/8/iOS 8.1.2, Safari/7/OS X 10.9, YandexBot/Jan 2015, Safari/8/OS X 10.10, Safari/7/iOS 7.1, Chrome/42/OS X, Safari/5.1.9/OS X 10.6.8, Android/4.1.1, Firefox/37/OS X, Safari/6.0.4/OS X 10.8.4, Android/4.3, Safari/6/iOS 6.0.1, Android/4.4.2, OpenSSL/0.9.8y, IE Mobile/11/Win Phone 8.1, IE/7/Vista, IE/11/Win 8.1, IE/11/Win 7, IE/8-10/Win 7, IE Mobile/10/Win Phone 8.0, Java/8u31, Java/7u25, Java/6u45, Android/2.3.7
|
||||||
|
|
||||||
|
PORT 995
|
||||||
|
--------
|
||||||
|
|
||||||
|
* Deflate Compression:
|
||||||
|
OK - Compression disabled
|
||||||
|
|
||||||
|
Unhandled exception when processing --reneg:
|
||||||
|
_nassl.OpenSSLError - error:140940F5:SSL routines:ssl3_read_bytes:unexpected record
|
||||||
|
|
||||||
|
* OpenSSL Heartbleed:
|
||||||
|
OK - Not vulnerable to Heartbleed
|
||||||
|
|
||||||
|
* SSLV2 Cipher Suites:
|
||||||
|
Server rejected all cipher suites.
|
||||||
|
|
||||||
|
* Session Resumption:
|
||||||
|
With Session IDs: NOT SUPPORTED (0 successful, 5 failed, 0 errors, 5 total attempts).
|
||||||
|
With TLS Session Tickets: NOT SUPPORTED - TLS ticket assigned but not accepted.
|
||||||
|
|
||||||
|
* TLSV1_2 Cipher Suites:
|
||||||
|
Preferred:
|
||||||
|
ECDHE-RSA-AES256-SHA ECDH-384 bits 256 bits
|
||||||
|
Accepted:
|
||||||
|
ECDHE-RSA-AES256-SHA ECDH-384 bits 256 bits
|
||||||
|
DHE-RSA-CAMELLIA256-SHA DH-1024 bits 256 bits
|
||||||
|
DHE-RSA-AES256-SHA DH-1024 bits 256 bits
|
||||||
|
CAMELLIA256-SHA - 256 bits
|
||||||
|
AES256-SHA - 256 bits
|
||||||
|
ECDHE-RSA-AES128-SHA ECDH-384 bits 128 bits
|
||||||
|
DHE-RSA-CAMELLIA128-SHA DH-1024 bits 128 bits
|
||||||
|
DHE-RSA-AES128-SHA DH-1024 bits 128 bits
|
||||||
|
CAMELLIA128-SHA - 128 bits
|
||||||
|
AES128-SHA - 128 bits
|
||||||
|
|
||||||
|
* TLSV1_1 Cipher Suites:
|
||||||
|
Preferred:
|
||||||
|
ECDHE-RSA-AES256-SHA ECDH-384 bits 256 bits
|
||||||
|
Accepted:
|
||||||
|
ECDHE-RSA-AES256-SHA ECDH-384 bits 256 bits
|
||||||
|
DHE-RSA-CAMELLIA256-SHA DH-1024 bits 256 bits
|
||||||
|
DHE-RSA-AES256-SHA DH-1024 bits 256 bits
|
||||||
|
CAMELLIA256-SHA - 256 bits
|
||||||
|
AES256-SHA - 256 bits
|
||||||
|
ECDHE-RSA-AES128-SHA ECDH-384 bits 128 bits
|
||||||
|
DHE-RSA-CAMELLIA128-SHA DH-1024 bits 128 bits
|
||||||
|
DHE-RSA-AES128-SHA DH-1024 bits 128 bits
|
||||||
|
CAMELLIA128-SHA - 128 bits
|
||||||
|
AES128-SHA - 128 bits
|
||||||
|
|
||||||
|
* SSLV3 Cipher Suites:
|
||||||
|
Server rejected all cipher suites.
|
||||||
|
|
||||||
|
* TLSV1 Cipher Suites:
|
||||||
|
Preferred:
|
||||||
|
ECDHE-RSA-AES256-SHA ECDH-384 bits 256 bits
|
||||||
|
Accepted:
|
||||||
|
ECDHE-RSA-AES256-SHA ECDH-384 bits 256 bits
|
||||||
|
DHE-RSA-CAMELLIA256-SHA DH-1024 bits 256 bits
|
||||||
|
DHE-RSA-AES256-SHA DH-1024 bits 256 bits
|
||||||
|
CAMELLIA256-SHA - 256 bits
|
||||||
|
AES256-SHA - 256 bits
|
||||||
|
ECDHE-RSA-AES128-SHA ECDH-384 bits 128 bits
|
||||||
|
DHE-RSA-CAMELLIA128-SHA DH-1024 bits 128 bits
|
||||||
|
DHE-RSA-AES128-SHA DH-1024 bits 128 bits
|
||||||
|
CAMELLIA128-SHA - 128 bits
|
||||||
|
AES128-SHA - 128 bits
|
||||||
|
|
||||||
|
Should Not Offer: AES128-SHA, AES256-SHA, CAMELLIA128-SHA, CAMELLIA256-SHA, DHE-RSA-CAMELLIA128-SHA, DHE-RSA-CAMELLIA256-SHA
|
||||||
|
Could Also Offer: DHE-DSS-AES128-GCM-SHA256, DHE-DSS-AES128-SHA256, DHE-DSS-AES256-GCM-SHA384, DHE-DSS-AES256-SHA, DHE-RSA-AES128-GCM-SHA256, DHE-RSA-AES128-SHA256, DHE-RSA-AES256-GCM-SHA384, DHE-RSA-AES256-SHA256, ECDHE-ECDSA-AES128-GCM-SHA256, ECDHE-ECDSA-AES128-SHA, ECDHE-ECDSA-AES128-SHA256, ECDHE-ECDSA-AES256-GCM-SHA384, ECDHE-ECDSA-AES256-SHA, ECDHE-ECDSA-AES256-SHA384, ECDHE-RSA-AES128-GCM-SHA256, ECDHE-RSA-AES128-SHA256, ECDHE-RSA-AES256-GCM-SHA384, ECDHE-RSA-AES256-SHA384
|
||||||
|
Supported Clients: OpenSSL/1.0.2, Baidu/Jan 2015, Yahoo Slurp/Jan 2015, BingPreview/Jan 2015, OpenSSL/1.0.1l, Firefox/31.3.0 ESR/Win 7, Googlebot/Feb 2015, Android/4.2.2, Android/5.0.0, Android/4.0.4, Safari/8/iOS 8.1.2, Safari/7/OS X 10.9, YandexBot/Jan 2015, Safari/8/OS X 10.10, Safari/7/iOS 7.1, Chrome/42/OS X, Safari/5.1.9/OS X 10.6.8, Android/4.1.1, Firefox/37/OS X, Safari/6.0.4/OS X 10.8.4, Android/4.3, Safari/6/iOS 6.0.1, Android/4.4.2, OpenSSL/0.9.8y, IE Mobile/11/Win Phone 8.1, IE/7/Vista, IE/11/Win 8.1, IE/11/Win 7, IE/8-10/Win 7, IE Mobile/10/Win Phone 8.0, Java/8u31, Java/7u25, Java/6u45, Android/2.3.7
|
||||||
|
|
||||||
@@ -54,6 +54,14 @@ while settings[0][0] == "-" and settings[0] != "--":
|
|||||||
print("Invalid option.")
|
print("Invalid option.")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
# sanity check command line
|
||||||
|
for setting in settings:
|
||||||
|
try:
|
||||||
|
name, value = setting.split("=", 1)
|
||||||
|
except:
|
||||||
|
import subprocess
|
||||||
|
print("Invalid command line: ", subprocess.list2cmdline(sys.argv))
|
||||||
|
|
||||||
# create the new config file in memory
|
# create the new config file in memory
|
||||||
|
|
||||||
found = set()
|
found = set()
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
#!/usr/bin/python3
|
#!/usr/bin/python3
|
||||||
|
|
||||||
import sys, getpass, urllib.request, urllib.error, json
|
import sys, getpass, urllib.request, urllib.error, json, re
|
||||||
|
|
||||||
def mgmt(cmd, data=None, is_json=False):
|
def mgmt(cmd, data=None, is_json=False):
|
||||||
# The base URL for the management daemon. (Listens on IPv4 only.)
|
# The base URL for the management daemon. (Listens on IPv4 only.)
|
||||||
@@ -28,13 +28,20 @@ def mgmt(cmd, data=None, is_json=False):
|
|||||||
return resp
|
return resp
|
||||||
|
|
||||||
def read_password():
|
def read_password():
|
||||||
first = getpass.getpass('password: ')
|
while True:
|
||||||
second = getpass.getpass(' (again): ')
|
first = getpass.getpass('password: ')
|
||||||
while first != second:
|
if len(first) < 4:
|
||||||
print('Passwords not the same. Try again.')
|
print("Passwords must be at least four characters.")
|
||||||
first = getpass.getpass('password: ')
|
continue
|
||||||
second = getpass.getpass(' (again): ')
|
if re.search(r'[\s]', first):
|
||||||
return first
|
print("Passwords cannot contain spaces.")
|
||||||
|
continue
|
||||||
|
second = getpass.getpass(' (again): ')
|
||||||
|
if first != second:
|
||||||
|
print("Passwords not the same. Try again.")
|
||||||
|
continue
|
||||||
|
break
|
||||||
|
return first
|
||||||
|
|
||||||
def setup_key_auth(mgmt_uri):
|
def setup_key_auth(mgmt_uri):
|
||||||
key = open('/var/lib/mailinabox/api.key').read().strip()
|
key = open('/var/lib/mailinabox/api.key').read().strip()
|
||||||
|
|||||||
@@ -25,6 +25,7 @@ for fn in glob.glob("/var/log/nginx/access.log*"):
|
|||||||
with f:
|
with f:
|
||||||
for line in f:
|
for line in f:
|
||||||
# Find lines that are GETs on /bootstrap.sh by either curl or wget.
|
# Find lines that are GETs on /bootstrap.sh by either curl or wget.
|
||||||
|
# (Note that we purposely skip ...?ping=1 requests which is the admin panel querying us for updates.)
|
||||||
m = re.match(rb"(?P<ip>\S+) - - \[(?P<date>.*?)\] \"GET /bootstrap.sh HTTP/.*\" 200 \d+ .* \"(?:curl|wget)", line, re.I)
|
m = re.match(rb"(?P<ip>\S+) - - \[(?P<date>.*?)\] \"GET /bootstrap.sh HTTP/.*\" 200 \d+ .* \"(?:curl|wget)", line, re.I)
|
||||||
if m:
|
if m:
|
||||||
date, time = m.group("date").decode("ascii").split(":", 1)
|
date, time = m.group("date").decode("ascii").split(":", 1)
|
||||||
|
|||||||
Reference in New Issue
Block a user