1
0
mirror of https://github.com/mail-in-a-box/mailinabox.git synced 2026-03-05 15:57:23 +01:00

Initial commit of a log capture and reporting feature

This adds a new section to the admin panel called "Activity", that
supplies charts, graphs and details about messages entering and leaving
the host.

A new daemon captures details of system mail activity by monitoring
the /var/log/mail.log file, summarizing it into a sqllite database
that's kept in user-data.
This commit is contained in:
downtownallday
2021-01-11 18:02:07 -05:00
parent 73a2b72243
commit 2a0e50c8d4
108 changed files with 9027 additions and 6 deletions

View File

@@ -0,0 +1,10 @@
class DatabaseConnectionFactory(object):
def connect(self):
raise NotImplementedError()
def close(self, conn):
raise NotImplementedError()

View File

@@ -0,0 +1,122 @@
# -*- indent-tabs-mode: t; tab-width: 4; python-indent-offset: 4; -*-
import threading
import queue
import logging
from .Prunable import Prunable
log = logging.getLogger(__name__)
'''subclass this and override:
write_rec()
read_rec()
to provide storage for event "records"
EventStore is thread safe and uses a single thread to write all
records.
'''
class EventStore(Prunable):
def __init__(self, db_conn_factory):
self.db_conn_factory = db_conn_factory
# we'll have a single thread do all the writing to the database
#self.queue = queue.SimpleQueue() # available in Python 3.7+
self.queue = queue.Queue()
self.interrupt = threading.Event()
self.rec_added = threading.Event()
self.have_event = threading.Event()
self.t = threading.Thread(
target=self._bg_writer,
name="EventStore",
daemon=True
)
self.max_queue_size = 100000
self.t.start()
def connect(self):
return self.db_conn_factory.connect()
def close(self, conn):
self.db_conn_factory.close(conn)
def write_rec(self, conn, type, rec):
'''write a "rec" of the given "type" to the database. The subclass
must know how to do that. "type" is a string identifier of the
subclass's choosing. Users of this class should call store()
and not this function, which will queue the request and a
thread managed by this class will call this function.
'''
raise NotImplementedError()
def read_rec(self, conn, type, args):
'''read from the database'''
raise NotImplementedError()
def prune(self, conn):
raise NotImplementedError()
def store(self, type, rec):
self.queue.put({
'type': type,
'rec': rec
})
self.rec_added.set()
self.have_event.set()
def stop(self):
self.interrupt.set()
self.have_event.set()
self.t.join()
def __del__(self):
log.debug('EventStore __del__')
self.interrupt.set()
self.have_event.set()
def _pop(self):
try:
return self.queue.get(block=False)
except queue.Empty:
return None
def _bg_writer(self):
log.debug('start EventStore thread')
conn = self.connect()
try:
while not self.interrupt.is_set() or not self.queue.empty():
item = self._pop()
if item:
try:
self.write_rec(conn, item['type'], item['rec'])
except Exception as e:
log.exception(e)
retry_count = item.get('retry_count', 0)
if self.interrupt.is_set():
log.warning('interrupted, dropping record: %s',item)
elif retry_count > 2:
log.warning('giving up after %s attempts, dropping record: %s', retry_count, item)
elif self.queue.qsize() >= self.max_queue_size:
log.warning('queue full, dropping record: %s', item)
else:
item['retry_count'] = retry_count + 1
self.queue.put(item)
# wait for another record to prevent immediate retry
if not self.interrupt.is_set():
self.have_event.wait()
self.rec_added.clear()
self.have_event.clear()
self.queue.task_done() # remove for SimpleQueue
else:
self.have_event.wait()
self.rec_added.clear()
self.have_event.clear()
finally:
self.close(conn)

View File

@@ -0,0 +1,7 @@
class Prunable(object):
def prune(self, conn, policy):
raise NotImplementedError()

View File

@@ -0,0 +1,79 @@
import threading
import logging
log = logging.getLogger(__name__)
class Pruner(object):
'''periodically calls the prune() method of registered Prunable
objects
'''
def __init__(self, db_conn_factory, policy={
'older_than_days': 7,
'frequency_min': 60
}):
self.db_conn_factory = db_conn_factory
self.policy = policy
self.prunables = []
self.interrupt = threading.Event()
self._new_thread()
self.t.start()
def _new_thread(self):
self.interrupt.clear()
self.t = threading.Thread(
target=self._bg_pruner,
name="Pruner",
daemon=True
)
def add_prunable(self, inst):
self.prunables.append(inst)
def set_policy(self, policy):
self.stop()
self.policy = policy
# a new thread object must be created or Python(<3.8?) throws
# RuntimeError("threads can only be started once")
self._new_thread()
self.t.start()
def stop(self, do_join=True):
self.interrupt.set()
if do_join:
self.t.join()
def connect(self):
return self.db_conn_factory.connect()
def close(self, conn):
self.db_conn_factory.close(conn)
def __del__(self):
self.stop(do_join=False)
def _bg_pruner(self):
conn = self.connect()
def do_prune():
for prunable in self.prunables:
if not self.interrupt.is_set():
try:
prunable.prune(conn, self.policy)
except Exception as e:
log.exception(e)
try:
# prune right-off
do_prune()
while not self.interrupt.is_set():
# wait until interrupted or it's time to prune
if self.interrupt.wait(self.policy['frequency_min'] * 60) is not True:
do_prune()
finally:
self.close(conn)

View File

@@ -0,0 +1,52 @@
import os, stat
import sqlite3
import logging
import threading
from .DatabaseConnectionFactory import DatabaseConnectionFactory
log = logging.getLogger(__name__)
class SqliteConnFactory(DatabaseConnectionFactory):
def __init__(self, db_path):
super(SqliteConnFactory, self).__init__()
log.debug('factory for %s', db_path)
self.db_path = db_path
self.db_basename = os.path.basename(db_path)
self.ensure_exists()
def ensure_exists(self):
# create the parent directory and set its permissions
parent = os.path.dirname(self.db_path)
if parent != '' and not os.path.exists(parent):
os.makedirs(parent)
os.chmod(parent,
stat.S_IRWXU |
stat.S_IRGRP |
stat.S_IXGRP |
stat.S_IROTH |
stat.S_IXOTH
)
# if the database is new, create an empty file and set file
# permissions
if not os.path.exists(self.db_path):
log.debug('creating empty database: %s', self.db_basename)
with open(self.db_path, 'w') as fp:
pass
os.chmod(self.db_path,
stat.S_IRUSR |
stat.S_IWUSR
)
def connect(self):
log.debug('opening database %s', self.db_basename)
conn = sqlite3.connect(self.db_path)
conn.row_factory = sqlite3.Row
return conn
def close(self, conn):
log.debug('closing database %s', self.db_basename)
conn.close()

View File

@@ -0,0 +1,368 @@
# -*- indent-tabs-mode: t; tab-width: 4; python-indent-offset: 4; -*-
import sqlite3
import os, stat
import logging
import json
import datetime
from .EventStore import EventStore
log = logging.getLogger(__name__)
#
# schema
#
mta_conn_fields = [
'service',
'service_tid',
'connect_time',
'disconnect_time',
'remote_host',
'remote_ip',
'sasl_method',
'sasl_username',
'remote_auth_success',
'remote_auth_attempts',
'remote_used_starttls',
'disposition',
]
mta_accept_fields = [
'mta_conn_id',
'queue_time',
'queue_remove_time',
'subsystems',
# 'spf_tid',
'spf_result',
'spf_reason',
'postfix_msg_id',
'message_id',
'dkim_result',
'dkim_reason',
'dmarc_result',
'dmarc_reason',
'envelope_from',
'message_size',
'message_nrcpt',
'accept_status',
'failure_info',
'failure_category',
]
mta_delivery_fields = [
'mta_accept_id',
'service',
# 'service_tid',
'rcpt_to',
# 'postgrey_tid',
'postgrey_result',
'postgrey_reason',
'postgrey_delay',
# 'spam_tid',
'spam_result',
'spam_score',
'relay',
'status',
'delay',
'delivery_connection',
'delivery_connection_info',
'delivery_info',
'failure_category',
]
db_info_create_table_stmt = "CREATE TABLE IF NOT EXISTS db_info(id INTEGER PRIMARY KEY AUTOINCREMENT, key TEXT NOT NULL, value TEXT NOT NULL)"
schema_updates = [
# update 0
[
# three "mta" tables having a one-to-many-to-many relationship:
# mta_connection(1) -> mta_accept(0:N) -> mta_delivery(0:N)
#
"CREATE TABLE mta_connection(\
mta_conn_id INTEGER PRIMARY KEY AUTOINCREMENT,\
service TEXT NOT NULL, /* 'smtpd', 'submission' or 'pickup' */\
service_tid TEXT NOT NULL,\
connect_time TEXT NOT NULL,\
disconnect_time TEXT,\
remote_host TEXT COLLATE NOCASE,\
remote_ip TEXT COLLATE NOCASE,\
sasl_method TEXT, /* sasl: submission service only */\
sasl_username TEXT COLLATE NOCASE,\
remote_auth_success INTEGER, /* count of successes */\
remote_auth_attempts INTEGER, /* count of attempts */\
remote_used_starttls INTEGER, /* 1 if STARTTLS used */\
disposition TEXT /* 'normal','scanner','login_attempt',etc */\
)",
"CREATE INDEX idx_mta_connection_connect_time ON mta_connection(connect_time, sasl_username COLLATE NOCASE)",
"CREATE TABLE mta_accept(\
mta_accept_id INTEGER PRIMARY KEY AUTOINCREMENT,\
mta_conn_id INTEGER,\
queue_time TEXT,\
queue_remove_time TEXT,\
subsystems TEXT,\
/*spf_tid TEXT,*/\
spf_result TEXT,\
spf_reason TEXT,\
postfix_msg_id TEXT,\
message_id TEXT,\
dkim_result TEXT,\
dkim_reason TEXT,\
dmarc_result TEXT,\
dmarc_reason TEXT,\
envelope_from TEXT COLLATE NOCASE,\
message_size INTEGER,\
message_nrcpt INTEGER,\
accept_status TEXT, /* 'accept','greylist','spf-reject',others... */\
failure_info TEXT, /* details from mta or subsystems */\
failure_category TEXT,\
FOREIGN KEY(mta_conn_id) REFERENCES mta_connection(mta_conn_id) ON DELETE RESTRICT\
)",
"CREATE TABLE mta_delivery(\
mta_delivery_id INTEGER PRIMARY KEY AUTOINCREMENT,\
mta_accept_id INTEGER,\
service TEXT, /* 'lmtp' or 'smtp' */\
/*service_tid TEXT,*/\
rcpt_to TEXT COLLATE NOCASE, /* email addr */\
/*postgrey_tid TEXT,*/\
postgrey_result TEXT,\
postgrey_reason TEXT,\
postgrey_delay NUMBER,\
/*spam_tid TEXT,*/ /* spam: lmtp only */\
spam_result TEXT, /* 'clean' or 'spam' */\
spam_score NUMBER, /* eg: 2.10 */\
relay TEXT, /* hostname[IP]:port */\
status TEXT, /* 'sent', 'bounce', 'reject', etc */\
delay NUMBER, /* fractional seconds, 'sent' status only */\
delivery_connection TEXT, /* 'trusted' or 'untrusted' */\
delivery_connection_info TEXT, /* details on TLS connection */\
delivery_info TEXT, /* details from the remote mta */\
failure_category TEXT,\
FOREIGN KEY(mta_accept_id) REFERENCES mta_accept(mta_accept_id) ON DELETE RESTRICT\
)",
"CREATE INDEX idx_mta_delivery_rcpt_to ON mta_delivery(rcpt_to COLLATE NOCASE)",
"CREATE TABLE state_cache(\
state_cache_id INTEGER PRIMARY KEY AUTOINCREMENT,\
owner_id INTEGER NOT NULL,\
state TEXT\
)",
"INSERT INTO db_info (key,value) VALUES ('schema_version', '0')"
]
]
class SqliteEventStore(EventStore):
def __init__(self, db_conn_factory):
super(SqliteEventStore, self).__init__(db_conn_factory)
self.update_schema()
def update_schema(self):
''' update the schema to the latest version
'''
c = None
conn = None
try:
conn = self.connect()
c = conn.cursor()
c.execute(db_info_create_table_stmt)
conn.commit()
c.execute("SELECT value from db_info WHERE key='schema_version'")
v = c.fetchone()
if v is None:
v = -1
else:
v = int(v[0])
for idx in range(v+1, len(schema_updates)):
log.info('updating database to v%s', idx)
for stmt in schema_updates[idx]:
try:
c.execute(stmt)
except Exception as e:
log.error('problem with sql statement at version=%s error="%s" stmt="%s"' % (idx, e, stmt))
raise e
conn.commit()
finally:
if c: c.close(); c=None
if conn: self.close(conn); conn=None
def write_rec(self, conn, type, rec):
if type=='inbound_mail':
#log.debug('wrote inbound_mail record')
self.write_inbound_mail(conn, rec)
elif type=='state':
''' rec: {
owner_id: int,
state: list
}
'''
self.write_state(conn, rec)
else:
raise ValueError('type "%s" not implemented' % type)
def _insert(self, table, fields):
insert = 'INSERT INTO ' + table + ' (' + \
",".join(fields) + \
') VALUES (' + \
"?,"*(len(fields)-1) + \
'?)'
return insert
def _values(self, fields, data_dict):
values = []
for field in fields:
if field in data_dict:
values.append(data_dict[field])
data_dict.pop(field)
else:
values.append(None)
for field in data_dict:
if type(data_dict[field]) != list and not field.startswith('_') and not field.endswith('_tid'):
log.warning('unused field: %s', field)
return values
def write_inbound_mail(self, conn, rec):
c = None
try:
c = conn.cursor()
# mta_connection
insert = self._insert('mta_connection', mta_conn_fields)
values = self._values(mta_conn_fields, rec)
#log.debug('INSERT: %s VALUES: %s REC=%s', insert, values, rec)
c.execute(insert, values)
conn_id = c.lastrowid
accept_insert = self._insert('mta_accept', mta_accept_fields)
delivery_insert = self._insert('mta_delivery', mta_delivery_fields)
for accept in rec.get('mta_accept', []):
accept['mta_conn_id'] = conn_id
values = self._values(mta_accept_fields, accept)
c.execute(accept_insert, values)
accept_id = c.lastrowid
for delivery in accept.get('mta_delivery', []):
delivery['mta_accept_id'] = accept_id
values = self._values(mta_delivery_fields, delivery)
c.execute(delivery_insert, values)
conn.commit()
except sqlite3.Error as e:
conn.rollback()
raise e
finally:
if c: c.close(); c=None
def write_state(self, conn, rec):
c = None
try:
c = conn.cursor()
owner_id = rec['owner_id']
insert = 'INSERT INTO state_cache (owner_id, state) VALUES (?, ?)'
for item in rec['state']:
item_json = json.dumps(item)
c.execute(insert, (owner_id, item_json))
conn.commit()
except sqlite3.Error as e:
conn.rollback()
raise e
finally:
if c: c.close(); c=None
def read_rec(self, conn, type, args):
if type=='state':
return self.read_state(
conn,
args['owner_id'],
args.get('clear',False)
)
else:
raise ValueError('type "%s" not implemented' % type)
def read_state(self, conn, owner_id, clear):
c = None
state = []
try:
c = conn.cursor()
select = 'SELECT state FROM state_cache WHERE owner_id=? ORDER BY state_cache_id'
for row in c.execute(select, (owner_id,)):
state.append(json.loads(row[0]))
if clear:
delete = 'DELETE FROM state_cache WHERE owner_id=?'
c.execute(delete, (owner_id,))
conn.commit()
finally:
if c: c.close(); c=None
return state
def prune(self, conn, policy):
older_than_days = datetime.timedelta(days=policy['older_than_days'])
if older_than_days.days <= 0:
return
now = datetime.datetime.now(datetime.timezone.utc)
d = (now - older_than_days)
dstr = d.isoformat(sep=' ', timespec='seconds')
c = None
try:
c = conn.cursor()
deletes = [
'DELETE FROM mta_delivery WHERE mta_accept_id IN (\
SELECT mta_accept.mta_accept_id FROM mta_accept\
JOIN mta_connection ON mta_connection.mta_conn_id = mta_accept.mta_conn_id\
WHERE connect_time < ?)',
'DELETE FROM mta_accept WHERE mta_accept_id IN (\
SELECT mta_accept.mta_accept_id FROM mta_accept\
JOIN mta_connection ON mta_connection.mta_conn_id = mta_accept.mta_conn_id\
WHERE connect_time < ?)',
'DELETE FROM mta_connection WHERE connect_time < ?'
]
counts = []
for delete in deletes:
c.execute(delete, (dstr,))
counts.append(str(c.rowcount))
conn.commit()
counts.reverse()
log.info("pruned %s rows", "/".join(counts))
except sqlite3.Error as e:
conn.rollback()
raise e
finally:
if c: c.close()