mirror of
https://github.com/mail-in-a-box/mailinabox.git
synced 2026-03-05 15:57:23 +01:00
Initial commit of a log capture and reporting feature
This adds a new section to the admin panel called "Activity", that supplies charts, graphs and details about messages entering and leaving the host. A new daemon captures details of system mail activity by monitoring the /var/log/mail.log file, summarizing it into a sqllite database that's kept in user-data.
This commit is contained in:
33
management/reporting/capture/logs/DateParser.py
Normal file
33
management/reporting/capture/logs/DateParser.py
Normal file
@@ -0,0 +1,33 @@
|
||||
import datetime
|
||||
import pytz
|
||||
|
||||
rsyslog_traditional_regexp = '^(.{15})'
|
||||
|
||||
with open('/etc/timezone') as fp:
|
||||
timezone_id = fp.read().strip()
|
||||
|
||||
|
||||
def rsyslog_traditional(str):
|
||||
# Handles the default timestamp in rsyslog
|
||||
# (RSYSLOG_TraditionalFileFormat)
|
||||
#
|
||||
# eg: "Dec 6 06:25:04" (always 15 characters)
|
||||
#
|
||||
# the date string is in local time
|
||||
#
|
||||
d = datetime.datetime.strptime(str, '%b %d %H:%M:%S')
|
||||
|
||||
# since the log date has no year, use the current year
|
||||
today = datetime.date.today()
|
||||
year = today.year
|
||||
if d.month == 12 and today.month == 1:
|
||||
year -= 1
|
||||
d = d.replace(year=year)
|
||||
|
||||
# convert to UTC
|
||||
if timezone_id == 'Etc/UTC':
|
||||
return d
|
||||
local_tz = pytz.timezone(timezone_id)
|
||||
return local_tz.localize(d, is_dst=None).astimezone(pytz.utc)
|
||||
|
||||
|
||||
15
management/reporting/capture/logs/ReadLineHandler.py
Normal file
15
management/reporting/capture/logs/ReadLineHandler.py
Normal file
@@ -0,0 +1,15 @@
|
||||
|
||||
'''subclass this and override methods to handle log output'''
|
||||
class ReadLineHandler(object):
|
||||
def handle(self, line):
|
||||
''' handle a single line of output '''
|
||||
raise NotImplementedError()
|
||||
|
||||
def end_of_callbacks(self, thread):
|
||||
'''called when no more output will be sent to handle(). override this
|
||||
method to save state, or perform cleanup during this
|
||||
callback
|
||||
|
||||
'''
|
||||
pass
|
||||
|
||||
26
management/reporting/capture/logs/ReadPositionStore.py
Normal file
26
management/reporting/capture/logs/ReadPositionStore.py
Normal file
@@ -0,0 +1,26 @@
|
||||
'''subclass this and override all methods to persist the position of
|
||||
the log file that has been processed so far.
|
||||
|
||||
this enables the log monitor to pick up where it left off
|
||||
|
||||
a single FilePositionStore can safely be used with multiple
|
||||
LogMonitor instances
|
||||
|
||||
'''
|
||||
class ReadPositionStore(object):
|
||||
def get(self, log_file, inode):
|
||||
'''return the offset from the start of the file of the last
|
||||
position saved for log_file having the given inode, or zero if
|
||||
no position is currently saved
|
||||
|
||||
'''
|
||||
raise NotImplementedError()
|
||||
|
||||
def save(self, log_file, inode, offset):
|
||||
'''save the current position'''
|
||||
raise NotImplementedError()
|
||||
|
||||
def clear(self, log_file):
|
||||
'''remove all entries for `log_file`'''
|
||||
raise NotImplementedError()
|
||||
|
||||
84
management/reporting/capture/logs/ReadPositionStoreInFile.py
Normal file
84
management/reporting/capture/logs/ReadPositionStoreInFile.py
Normal file
@@ -0,0 +1,84 @@
|
||||
from .ReadPositionStore import ReadPositionStore
|
||||
|
||||
import threading
|
||||
import json
|
||||
import os
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ReadPositionStoreInFile(ReadPositionStore):
|
||||
def __init__(self, output_file):
|
||||
self.output_file = output_file
|
||||
self.changed = False
|
||||
self.lock = threading.Lock()
|
||||
self.interrupt = threading.Event()
|
||||
|
||||
if os.path.exists(output_file):
|
||||
with open(output_file, "r", encoding="utf-8") as fp:
|
||||
self.db = json.loads(fp.read())
|
||||
else:
|
||||
self.db = {}
|
||||
|
||||
self.t = threading.Thread(
|
||||
target=self._persist_bg,
|
||||
name="ReadPositionStoreInFile",
|
||||
daemon=True
|
||||
)
|
||||
self.t.start()
|
||||
|
||||
def __del__(self):
|
||||
log.debug('ReadPositionStoreInFile __del__')
|
||||
self.interrupt.set()
|
||||
|
||||
def stop(self):
|
||||
self.interrupt.set()
|
||||
self.t.join()
|
||||
|
||||
def get(self, file, inode):
|
||||
with self.lock:
|
||||
if file in self.db and str(inode) in self.db[file]:
|
||||
return self.db[file][str(inode)]
|
||||
return 0
|
||||
|
||||
def save(self, file, inode, pos):
|
||||
with self.lock:
|
||||
if not file in self.db:
|
||||
self.db[file] = { str(inode):pos }
|
||||
else:
|
||||
self.db[file][str(inode)] = pos
|
||||
self.changed = True
|
||||
|
||||
def clear(self, file):
|
||||
with self.lock:
|
||||
self.db[file] = {}
|
||||
self.changed = True
|
||||
|
||||
|
||||
def persist(self):
|
||||
if self.changed:
|
||||
try:
|
||||
with open(self.output_file, "w") as fp:
|
||||
with self.lock:
|
||||
json_str = json.dumps(self.db)
|
||||
self.changed = False
|
||||
|
||||
try:
|
||||
fp.write(json_str)
|
||||
except Exception as e:
|
||||
with self.lock:
|
||||
self.changed = True
|
||||
log.error(e)
|
||||
|
||||
except Exception as e:
|
||||
log.error(e)
|
||||
|
||||
|
||||
def _persist_bg(self):
|
||||
while not self.interrupt.is_set():
|
||||
# wait 60 seconds before persisting
|
||||
self.interrupt.wait(60)
|
||||
# even if interrupted, persist one final time
|
||||
self.persist()
|
||||
|
||||
160
management/reporting/capture/logs/TailFile.py
Normal file
160
management/reporting/capture/logs/TailFile.py
Normal file
@@ -0,0 +1,160 @@
|
||||
import threading
|
||||
import os
|
||||
import logging
|
||||
import stat
|
||||
|
||||
from .ReadLineHandler import ReadLineHandler
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
'''Spawn a thread to "tail" a log file. For each line read, provided
|
||||
callbacks do something with the output. Callbacks must be a subclass
|
||||
of ReadLineHandler.
|
||||
|
||||
'''
|
||||
|
||||
class TailFile(threading.Thread):
|
||||
def __init__(self, log_file, store=None):
|
||||
''' log_file - the log file to monitor
|
||||
store - a ReadPositionStore instance
|
||||
'''
|
||||
self.log_file = log_file
|
||||
self.store = store
|
||||
|
||||
self.fp = None
|
||||
self.inode = None
|
||||
self.callbacks = []
|
||||
self.interrupt = threading.Event()
|
||||
|
||||
name=f'{__name__}-{os.path.basename(log_file)}'
|
||||
log.debug('init thread: %s', name)
|
||||
super(TailFile, self).__init__(name=name, daemon=True)
|
||||
|
||||
def stop(self, do_join=True):
|
||||
log.debug('TailFile stopping')
|
||||
self.interrupt.set()
|
||||
# close must be called to unblock the thread fp.readline() call
|
||||
self._close()
|
||||
if do_join:
|
||||
self.join()
|
||||
|
||||
def __del__(self):
|
||||
self.stop(do_join=False)
|
||||
|
||||
def add_handler(self, fn):
|
||||
assert self.is_alive() == False
|
||||
self.callbacks.append(fn)
|
||||
|
||||
def clear_callbacks(self):
|
||||
assert self.is_alive() == False
|
||||
self.callbacks = []
|
||||
|
||||
def _open(self):
|
||||
self._close()
|
||||
self.inode = os.stat(self.log_file)[stat.ST_INO]
|
||||
self.fp = open(
|
||||
self.log_file,
|
||||
"r",
|
||||
encoding="utf-8",
|
||||
errors="backslashreplace"
|
||||
)
|
||||
|
||||
def _close(self):
|
||||
if self.fp is not None:
|
||||
self.fp.close()
|
||||
self.fp = None
|
||||
|
||||
def _is_rotated(self):
|
||||
try:
|
||||
return os.stat(self.log_file)[stat.ST_INO] != self.inode
|
||||
except FileNotFoundError:
|
||||
return False
|
||||
|
||||
def _issue_callbacks(self, line):
|
||||
for cb in self.callbacks:
|
||||
if isinstance(cb, ReadLineHandler):
|
||||
cb.handle(line)
|
||||
else:
|
||||
cb(line)
|
||||
|
||||
def _notify_end_of_callbacks(self):
|
||||
for cb in self.callbacks:
|
||||
if isinstance(cb, ReadLineHandler):
|
||||
cb.end_of_callbacks(self)
|
||||
|
||||
def _restore_read_position(self):
|
||||
if self.fp is None:
|
||||
return
|
||||
|
||||
if self.store is None:
|
||||
self.fp.seek(
|
||||
0,
|
||||
os.SEEK_END
|
||||
)
|
||||
else:
|
||||
pos = self.store.get(self.log_file, self.inode)
|
||||
size = os.stat(self.log_file)[stat.ST_SIZE]
|
||||
if size < pos:
|
||||
log.debug("truncated: %s" % self.log_file)
|
||||
self.fp.seek(0, os.SEEK_SET)
|
||||
else:
|
||||
# if pos>size here, the seek call succeeds and returns
|
||||
# 'pos', but future reads will fail
|
||||
self.fp.seek(pos, os.SEEK_SET)
|
||||
|
||||
def run(self):
|
||||
self.interrupt.clear()
|
||||
|
||||
# initial open - wait until file exists
|
||||
while not self.interrupt.is_set() and self.fp is None:
|
||||
try:
|
||||
self._open()
|
||||
except FileNotFoundError:
|
||||
log.debug('log file "%s" not found, waiting...', self.log_file)
|
||||
self.interrupt.wait(2)
|
||||
continue
|
||||
|
||||
# restore reading position
|
||||
self._restore_read_position()
|
||||
|
||||
while not self.interrupt.is_set():
|
||||
try:
|
||||
line = self.fp.readline() # blocking
|
||||
if line=='':
|
||||
log.debug('got EOF')
|
||||
# EOF - check if file was rotated
|
||||
if self._is_rotated():
|
||||
log.debug('rotated')
|
||||
self._open()
|
||||
if self.store is not None:
|
||||
self.store.clear(self.log_file)
|
||||
|
||||
# if not rotated, sleep
|
||||
else:
|
||||
self.interrupt.wait(1)
|
||||
|
||||
else:
|
||||
# save position and call all callbacks
|
||||
if self.store is not None:
|
||||
self.store.save(
|
||||
self.log_file,
|
||||
self.inode,
|
||||
self.fp.tell()
|
||||
)
|
||||
self._issue_callbacks(line)
|
||||
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
if self.interrupt.wait(1) is not True:
|
||||
if self._is_rotated():
|
||||
self._open()
|
||||
|
||||
|
||||
self._close()
|
||||
|
||||
try:
|
||||
self._notify_end_of_callbacks()
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
|
||||
|
||||
0
management/reporting/capture/logs/__init__.py
Normal file
0
management/reporting/capture/logs/__init__.py
Normal file
Reference in New Issue
Block a user