instructions
This commit is contained in:
88
fail2ban-master/fail2ban/__init__.py
Normal file
88
fail2ban-master/fail2ban/__init__.py
Normal file
@@ -0,0 +1,88 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Author: Cyril Jaquier
|
||||
#
|
||||
|
||||
__author__ = "Cyril Jaquier"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
|
||||
__license__ = "GPL"
|
||||
|
||||
import logging.handlers
|
||||
|
||||
# Custom debug levels
|
||||
logging.MSG = logging.INFO - 2
|
||||
logging.TRACEDEBUG = 7
|
||||
logging.HEAVYDEBUG = 5
|
||||
logging.addLevelName(logging.MSG, 'MSG')
|
||||
logging.addLevelName(logging.TRACEDEBUG, 'TRACE')
|
||||
logging.addLevelName(logging.HEAVYDEBUG, 'HEAVY')
|
||||
|
||||
"""
|
||||
Below derived from:
|
||||
https://mail.python.org/pipermail/tutor/2007-August/056243.html
|
||||
"""
|
||||
|
||||
logging.NOTICE = logging.INFO + 5
|
||||
logging.addLevelName(logging.NOTICE, 'NOTICE')
|
||||
|
||||
|
||||
# define a new logger function for notice
|
||||
# this is exactly like existing info, critical, debug...etc
|
||||
def _Logger_notice(self, msg, *args, **kwargs):
|
||||
"""
|
||||
Log 'msg % args' with severity 'NOTICE'.
|
||||
|
||||
To pass exception information, use the keyword argument exc_info with
|
||||
a true value, e.g.
|
||||
|
||||
logger.notice("Houston, we have a %s", "major disaster", exc_info=1)
|
||||
"""
|
||||
if self.isEnabledFor(logging.NOTICE):
|
||||
self._log(logging.NOTICE, msg, args, **kwargs)
|
||||
|
||||
logging.Logger.notice = _Logger_notice
|
||||
|
||||
|
||||
# define a new root level notice function
|
||||
# this is exactly like existing info, critical, debug...etc
|
||||
def _root_notice(msg, *args, **kwargs):
|
||||
"""
|
||||
Log a message with severity 'NOTICE' on the root logger.
|
||||
"""
|
||||
if len(logging.root.handlers) == 0:
|
||||
logging.basicConfig()
|
||||
logging.root.notice(msg, *args, **kwargs)
|
||||
|
||||
# make the notice root level function known
|
||||
logging.notice = _root_notice
|
||||
|
||||
# add NOTICE to the priority map of all the levels
|
||||
logging.handlers.SysLogHandler.priority_map['NOTICE'] = 'notice'
|
||||
|
||||
from time import strptime
|
||||
# strptime thread safety hack-around - http://bugs.python.org/issue7980
|
||||
strptime("2012", "%Y")
|
||||
|
||||
# short names for pure numeric log-level ("Level 25" could be truncated by short formats):
|
||||
def _init():
|
||||
for i in range(50):
|
||||
if logging.getLevelName(i).startswith('Level'):
|
||||
logging.addLevelName(i, '#%02d-Lev.' % i)
|
||||
_init()
|
||||
25
fail2ban-master/fail2ban/client/__init__.py
Normal file
25
fail2ban-master/fail2ban/client/__init__.py
Normal file
@@ -0,0 +1,25 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Author: Cyril Jaquier
|
||||
#
|
||||
|
||||
__author__ = "Cyril Jaquier"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
|
||||
__license__ = "GPL"
|
||||
104
fail2ban-master/fail2ban/client/actionreader.py
Normal file
104
fail2ban-master/fail2ban/client/actionreader.py
Normal file
@@ -0,0 +1,104 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Author: Cyril Jaquier
|
||||
#
|
||||
|
||||
__author__ = "Cyril Jaquier"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
|
||||
__license__ = "GPL"
|
||||
|
||||
import os
|
||||
|
||||
from .configreader import DefinitionInitConfigReader
|
||||
from ..helpers import getLogger
|
||||
from ..server.action import CommandAction
|
||||
|
||||
# Gets the instance of the logger.
|
||||
logSys = getLogger(__name__)
|
||||
|
||||
|
||||
class ActionReader(DefinitionInitConfigReader):
|
||||
|
||||
_configOpts = {
|
||||
"actionstart": ["string", None],
|
||||
"actionstart_on_demand": ["bool", None],
|
||||
"actionstop": ["string", None],
|
||||
"actionflush": ["string", None],
|
||||
"actionreload": ["string", None],
|
||||
"actioncheck": ["string", None],
|
||||
"actionrepair": ["string", None],
|
||||
"actionrepair_on_unban": ["bool", None],
|
||||
"actionban": ["string", None],
|
||||
"actionprolong": ["string", None],
|
||||
"actionreban": ["string", None],
|
||||
"actionunban": ["string", None],
|
||||
"norestored": ["bool", None],
|
||||
}
|
||||
|
||||
def __init__(self, file_, jailName, initOpts, **kwargs):
|
||||
# always supply jail name as name parameter if not specified in options:
|
||||
n = initOpts.get("name")
|
||||
if n is None:
|
||||
initOpts["name"] = n = jailName
|
||||
actname = initOpts.get("actname")
|
||||
if actname is None:
|
||||
actname = file_
|
||||
# ensure we've unique action name per jail:
|
||||
if n != jailName:
|
||||
actname += n[len(jailName):] if n.startswith(jailName) else '-' + n
|
||||
initOpts["actname"] = actname
|
||||
self._name = actname
|
||||
DefinitionInitConfigReader.__init__(
|
||||
self, file_, jailName, initOpts, **kwargs)
|
||||
|
||||
def setFile(self, fileName):
|
||||
self.__file = fileName
|
||||
DefinitionInitConfigReader.setFile(self, os.path.join("action.d", fileName))
|
||||
|
||||
def getFile(self):
|
||||
return self.__file
|
||||
|
||||
def setName(self, name):
|
||||
self._name = name
|
||||
|
||||
def getName(self):
|
||||
return self._name
|
||||
|
||||
def convert(self):
|
||||
opts = self.getCombined(
|
||||
ignore=CommandAction._escapedTags | set(('timeout', 'bantime')))
|
||||
# stream-convert:
|
||||
head = ["set", self._jailName]
|
||||
stream = list()
|
||||
stream.append(head + ["addaction", self._name])
|
||||
multi = []
|
||||
for opt, optval in opts.items():
|
||||
if opt in self._configOpts and not opt.startswith('known/'):
|
||||
multi.append([opt, optval])
|
||||
if self._initOpts:
|
||||
for opt, optval in self._initOpts.items():
|
||||
if opt not in self._configOpts and not opt.startswith('known/'):
|
||||
multi.append([opt, optval])
|
||||
if len(multi) > 1:
|
||||
stream.append(["multi-set", self._jailName, "action", self._name, multi])
|
||||
elif len(multi):
|
||||
stream.append(["set", self._jailName, "action", self._name] + multi[0])
|
||||
|
||||
return stream
|
||||
272
fail2ban-master/fail2ban/client/beautifier.py
Normal file
272
fail2ban-master/fail2ban/client/beautifier.py
Normal file
@@ -0,0 +1,272 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
__author__ = "Cyril Jaquier, Yaroslav Halchenko"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier, 2013- Yaroslav Halchenko"
|
||||
__license__ = "GPL"
|
||||
|
||||
import sys
|
||||
|
||||
from ..exceptions import UnknownJailException, DuplicateJailException
|
||||
from ..helpers import getLogger, logging, PREFER_ENC
|
||||
|
||||
# Gets the instance of the logger.
|
||||
logSys = getLogger(__name__)
|
||||
|
||||
|
||||
##
|
||||
# Beautify the output of the client.
|
||||
#
|
||||
# Fail2ban server only return unformatted return codes which need to be
|
||||
# converted into user readable messages.
|
||||
|
||||
class Beautifier:
|
||||
|
||||
stdoutEnc = PREFER_ENC
|
||||
if sys.stdout and sys.stdout.encoding is not None:
|
||||
stdoutEnc = sys.stdout.encoding
|
||||
encUtf = 1 if stdoutEnc.lower() == 'utf-8' else 0
|
||||
|
||||
def __init__(self, cmd = None):
|
||||
self.__inputCmd = cmd
|
||||
|
||||
def setInputCmd(self, cmd):
|
||||
self.__inputCmd = cmd
|
||||
|
||||
def getInputCmd(self):
|
||||
return self.__inputCmd
|
||||
|
||||
def beautify(self, response):
|
||||
logSys.log(5,
|
||||
"Beautify " + repr(response) + " with " + repr(self.__inputCmd))
|
||||
inC = self.__inputCmd
|
||||
msg = response
|
||||
try:
|
||||
if inC[0] == "ping":
|
||||
msg = "Server replied: " + response
|
||||
elif inC[0] == "version":
|
||||
msg = response
|
||||
elif inC[0] == "start":
|
||||
msg = "Jail started"
|
||||
elif inC[0] == "stop":
|
||||
if len(inC) == 1:
|
||||
if response is None:
|
||||
msg = "Shutdown successful"
|
||||
else:
|
||||
if response is None:
|
||||
msg = "Jail stopped"
|
||||
elif inC[0] == "add":
|
||||
msg = "Added jail " + response
|
||||
elif inC[0] == "flushlogs":
|
||||
msg = "logs: " + response
|
||||
elif inC[0] == "echo":
|
||||
msg = ' '.join(msg)
|
||||
elif inC[0:1] == ['status']:
|
||||
def jail_stat(response, pref=""):
|
||||
# Display jail information
|
||||
for n, res1 in enumerate(response):
|
||||
prefix1 = pref + ("`-" if n == len(response) - 1 else "|-")
|
||||
msg.append("%s %s" % (prefix1, res1[0]))
|
||||
prefix1 = pref + (" " if n == len(response) - 1 else "| ")
|
||||
for m, res2 in enumerate(res1[1]):
|
||||
prefix2 = prefix1 + ("`-" if m == len(res1[1]) - 1 else "|-")
|
||||
val = " ".join(map(str, res2[1])) if isinstance(res2[1], list) else res2[1]
|
||||
msg.append("%s %s:\t%s" % (prefix2, res2[0], val))
|
||||
if len(inC) > 1 and inC[1] != "--all":
|
||||
msg = ["Status for the jail: %s" % inC[1]]
|
||||
jail_stat(response)
|
||||
else:
|
||||
jstat = None
|
||||
if len(inC) > 1: # --all
|
||||
jstat = response[-1]
|
||||
response = response[:-1]
|
||||
msg = ["Status"]
|
||||
for n, res1 in enumerate(response):
|
||||
prefix1 = "`-" if not jstat and n == len(response) - 1 else "|-"
|
||||
val = " ".join(map(str, res1[1])) if isinstance(res1[1], list) else res1[1]
|
||||
msg.append("%s %s:\t%s" % (prefix1, res1[0], val))
|
||||
if jstat:
|
||||
msg.append("`- Status for the jails:")
|
||||
i = 0
|
||||
for n, j in jstat.items():
|
||||
i += 1
|
||||
prefix1 = "`-" if i == len(jstat) else "|-"
|
||||
msg.append(" %s Jail: %s" % (prefix1, n))
|
||||
jail_stat(j, " " if i == len(jstat) else " | ")
|
||||
msg = "\n".join(msg)
|
||||
elif inC[0:1] == ['stats'] or inC[0:1] == ['statistics']:
|
||||
chrTable = [
|
||||
['|', '-', '|', 'x', 'x', '-', '|', '-'], ## ascii
|
||||
["\u2551", "\u2550", "\u255F", "\u256B", "\u256C", "\u2569", "\u2502", "\u2500"] ## utf-8
|
||||
];
|
||||
def _statstable(response, ct):
|
||||
tophead = ["Jail", "Backend", "Filter", "Actions"]
|
||||
headers = ["", "", "cur", "tot", "cur", "tot"]
|
||||
minlens = [8, 8, 3, 3, 3, 3]
|
||||
ralign = [0, 0, 1, 1, 1, 1]
|
||||
rows = [[n, r[0], *r[1], *r[2]] for n, r in response.items()]
|
||||
lens = []
|
||||
for i in range(len(rows[0])):
|
||||
col = (len(str(s[i])) for s in rows)
|
||||
lens.append(max(minlens[i], max(col)))
|
||||
rfmt = []
|
||||
hfmt = []
|
||||
for i in range(len(rows[0])):
|
||||
f = "%%%ds" if ralign[i] else "%%-%ds"
|
||||
rfmt.append(f % lens[i])
|
||||
hfmt.append(f % lens[i])
|
||||
rfmt = [rfmt[0], rfmt[1], "%s %s %s" % (rfmt[2], ct[6], rfmt[3]), "%s %s %s" % (rfmt[4], ct[6], rfmt[5])]
|
||||
hfmt = [hfmt[0], hfmt[1], "%s %s %s" % (hfmt[2], ct[6], hfmt[3]), "%s %s %s" % (hfmt[4], ct[6], hfmt[5])]
|
||||
tlens = [lens[0], lens[1], 3 + lens[2] + lens[3], 3 + lens[4] + lens[5]]
|
||||
tfmt = [hfmt[0], hfmt[1], "%%-%ds" % (tlens[2],), "%%-%ds" % (tlens[3],)]
|
||||
tsep = tfmt[0:2]
|
||||
rfmt = (" "+ct[0]+" ").join(rfmt)
|
||||
hfmt = (" "+ct[0]+" ").join(hfmt)
|
||||
tfmt = (" "+ct[0]+" ").join(tfmt)
|
||||
tsep = (" "+ct[0]+" ").join(tsep)
|
||||
separator = ((tsep % tuple(tophead[0:2])) + " "+ct[2]+ct[7] +
|
||||
((ct[7]+ct[3]+ct[7]).join([ct[7] * n for n in tlens[2:]])) + ct[7])
|
||||
ret = []
|
||||
ret.append(" "+tfmt % tuple(["", ""]+tophead[2:]))
|
||||
ret.append(" "+separator)
|
||||
ret.append(" "+hfmt % tuple(headers))
|
||||
separator = (ct[1]+ct[4]+ct[1]).join([ct[1] * n for n in tlens]) + ct[1]
|
||||
ret.append(ct[1]+separator)
|
||||
for row in rows:
|
||||
ret.append(" "+rfmt % tuple(row))
|
||||
separator = (ct[1]+ct[5]+ct[1]).join([ct[1] * n for n in tlens]) + ct[1]
|
||||
ret.append(ct[1]+separator)
|
||||
return ret
|
||||
if not response:
|
||||
return "No jails found."
|
||||
msg = "\n".join(_statstable(response, chrTable[self.encUtf]))
|
||||
elif len(inC) < 2:
|
||||
pass # to few cmd args for below
|
||||
elif inC[1] == "syslogsocket":
|
||||
msg = "Current syslog socket is:\n"
|
||||
msg += "`- " + response
|
||||
elif inC[1] == "logtarget":
|
||||
msg = "Current logging target is:\n"
|
||||
msg += "`- " + response
|
||||
elif inC[1:2] == ['loglevel']:
|
||||
msg = "Current logging level is "
|
||||
msg += repr(logging.getLevelName(response) if isinstance(response, int) else response)
|
||||
elif inC[1] == "dbfile":
|
||||
if response is None:
|
||||
msg = "Database currently disabled"
|
||||
else:
|
||||
msg = "Current database file is:\n"
|
||||
msg += "`- " + response
|
||||
elif inC[1] == "dbpurgeage":
|
||||
if response is None:
|
||||
msg = "Database currently disabled"
|
||||
else:
|
||||
msg = "Current database purge age is:\n"
|
||||
msg += "`- %iseconds" % response
|
||||
elif len(inC) < 3:
|
||||
pass # to few cmd args for below
|
||||
elif inC[2] in ("logpath", "addlogpath", "dellogpath"):
|
||||
if len(response) == 0:
|
||||
msg = "No file is currently monitored"
|
||||
else:
|
||||
msg = "Current monitored log file(s):\n"
|
||||
for path in response[:-1]:
|
||||
msg += "|- " + path + "\n"
|
||||
msg += "`- " + response[-1]
|
||||
elif inC[2] == "logencoding":
|
||||
msg = "Current log encoding is set to:\n"
|
||||
msg += response
|
||||
elif inC[2] in ("journalmatch", "addjournalmatch", "deljournalmatch"):
|
||||
if len(response) == 0:
|
||||
msg = "No journal match filter set"
|
||||
else:
|
||||
msg = "Current match filter:\n"
|
||||
msg += ' + '.join(" ".join(res) for res in response)
|
||||
elif inC[2] == "datepattern":
|
||||
msg = "Current date pattern set to: "
|
||||
if response is None:
|
||||
msg += "Not set/required"
|
||||
elif response[0] is None:
|
||||
msg += "%s" % response[1]
|
||||
else:
|
||||
msg += "%s (%s)" % response
|
||||
elif inC[2] in ("ignoreip", "addignoreip", "delignoreip"):
|
||||
if len(response) == 0:
|
||||
msg = "No IP address/network is ignored"
|
||||
else:
|
||||
msg = "These IP addresses/networks are ignored:\n"
|
||||
for ip in response[:-1]:
|
||||
msg += "|- " + str(ip) + "\n"
|
||||
msg += "`- " + str(response[-1])
|
||||
elif inC[2] in ("failregex", "addfailregex", "delfailregex",
|
||||
"ignoreregex", "addignoreregex", "delignoreregex"):
|
||||
if len(response) == 0:
|
||||
msg = "No regular expression is defined"
|
||||
else:
|
||||
msg = "The following regular expression are defined:\n"
|
||||
c = 0
|
||||
for l in response[:-1]:
|
||||
msg += "|- [" + str(c) + "]: " + l + "\n"
|
||||
c += 1
|
||||
msg += "`- [" + str(c) + "]: " + response[-1]
|
||||
elif inC[2] == "actions":
|
||||
if len(response) == 0:
|
||||
msg = "No actions for jail %s" % inC[1]
|
||||
else:
|
||||
msg = "The jail %s has the following actions:\n" % inC[1]
|
||||
msg += ", ".join(response)
|
||||
elif inC[2] == "actionproperties":
|
||||
if len(response) == 0:
|
||||
msg = "No properties for jail %s action %s" % (
|
||||
inC[1], inC[3])
|
||||
else:
|
||||
msg = "The jail %s action %s has the following " \
|
||||
"properties:\n" % (inC[1], inC[3])
|
||||
msg += ", ".join(response)
|
||||
elif inC[2] == "actionmethods":
|
||||
if len(response) == 0:
|
||||
msg = "No methods for jail %s action %s" % (
|
||||
inC[1], inC[3])
|
||||
else:
|
||||
msg = "The jail %s action %s has the following " \
|
||||
"methods:\n" % (inC[1], inC[3])
|
||||
msg += ", ".join(response)
|
||||
elif inC[2] == "banip" and inC[0] == "get":
|
||||
if isinstance(response, list):
|
||||
sep = " " if len(inC) <= 3 else inC[3]
|
||||
if sep == "--with-time":
|
||||
sep = "\n"
|
||||
msg = sep.join(response)
|
||||
except Exception:
|
||||
logSys.warning("Beautifier error. Please report the error")
|
||||
logSys.error("Beautify %r with %r failed", response, self.__inputCmd,
|
||||
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
msg = repr(msg) + repr(response)
|
||||
return msg
|
||||
|
||||
def beautifyError(self, response):
|
||||
logSys.debug("Beautify (error) %r with %r", response, self.__inputCmd)
|
||||
msg = response
|
||||
if isinstance(response, UnknownJailException):
|
||||
msg = "Sorry but the jail '" + response.args[0] + "' does not exist"
|
||||
elif isinstance(response, IndexError):
|
||||
msg = "Sorry but the command is invalid"
|
||||
elif isinstance(response, DuplicateJailException):
|
||||
msg = "The jail '" + response.args[0] + "' already exists"
|
||||
return msg
|
||||
385
fail2ban-master/fail2ban/client/configparserinc.py
Normal file
385
fail2ban-master/fail2ban/client/configparserinc.py
Normal file
@@ -0,0 +1,385 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Author: Yaroslav Halchenko
|
||||
# Modified: Cyril Jaquier
|
||||
|
||||
__author__ = 'Yaroslav Halchenko, Serg G. Brester (aka sebres)'
|
||||
__copyright__ = 'Copyright (c) 2007 Yaroslav Halchenko, 2015 Serg G. Brester (aka sebres)'
|
||||
__license__ = 'GPL'
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
from ..helpers import getLogger
|
||||
|
||||
# SafeConfigParser deprecated from Python 3.2 (renamed to ConfigParser)
|
||||
from configparser import ConfigParser as SafeConfigParser, BasicInterpolation, \
|
||||
InterpolationMissingOptionError, NoOptionError, NoSectionError
|
||||
|
||||
# And interpolation of __name__ was simply removed, thus we need to
|
||||
# decorate default interpolator to handle it
|
||||
class BasicInterpolationWithName(BasicInterpolation):
|
||||
"""Decorator to bring __name__ interpolation back.
|
||||
|
||||
Original handling of __name__ was removed because of
|
||||
functional deficiencies: http://bugs.python.org/issue10489
|
||||
|
||||
commit v3.2a4-105-g61f2761
|
||||
Author: Lukasz Langa <lukasz@langa.pl>
|
||||
Date: Sun Nov 21 13:41:35 2010 +0000
|
||||
|
||||
Issue #10489: removed broken `__name__` support from configparser
|
||||
|
||||
But should be fine to reincarnate for our use case
|
||||
"""
|
||||
def _interpolate_some(self, parser, option, accum, rest, section, map,
|
||||
*args, **kwargs):
|
||||
if section and not (__name__ in map):
|
||||
map = map.copy() # just to be safe
|
||||
map['__name__'] = section
|
||||
# try to wrap section options like %(section/option)s:
|
||||
parser._map_section_options(section, option, rest, map)
|
||||
return super(BasicInterpolationWithName, self)._interpolate_some(
|
||||
parser, option, accum, rest, section, map, *args, **kwargs)
|
||||
|
||||
|
||||
def _expandConfFilesWithLocal(filenames):
|
||||
"""Expands config files with local extension.
|
||||
"""
|
||||
newFilenames = []
|
||||
for filename in filenames:
|
||||
newFilenames.append(filename)
|
||||
localname = os.path.splitext(filename)[0] + '.local'
|
||||
if localname not in filenames and os.path.isfile(localname):
|
||||
newFilenames.append(localname)
|
||||
return newFilenames
|
||||
|
||||
# Gets the instance of the logger.
|
||||
logSys = getLogger(__name__)
|
||||
logLevel = 7
|
||||
|
||||
|
||||
__all__ = ['SafeConfigParserWithIncludes']
|
||||
|
||||
|
||||
class SafeConfigParserWithIncludes(SafeConfigParser):
|
||||
"""
|
||||
Class adds functionality to SafeConfigParser to handle included
|
||||
other configuration files (or may be urls, whatever in the future)
|
||||
|
||||
File should have section [includes] and only 2 options implemented
|
||||
are 'files_before' and 'files_after' where files are listed 1 per
|
||||
line.
|
||||
|
||||
Example:
|
||||
|
||||
[INCLUDES]
|
||||
before = 1.conf
|
||||
3.conf
|
||||
|
||||
after = 1.conf
|
||||
|
||||
It is a simple implementation, so just basic care is taken about
|
||||
recursion. Includes preserve right order, ie new files are
|
||||
inserted to the list of read configs before original, and their
|
||||
includes correspondingly so the list should follow the leaves of
|
||||
the tree.
|
||||
|
||||
I wasn't sure what would be the right way to implement generic (aka c++
|
||||
template) so we could base at any *configparser class... so I will
|
||||
leave it for the future
|
||||
|
||||
"""
|
||||
|
||||
SECTION_NAME = "INCLUDES"
|
||||
|
||||
SECTION_OPTNAME_CRE = re.compile(r'^([\w\-]+)/([^\s>]+)$')
|
||||
|
||||
SECTION_OPTSUBST_CRE = re.compile(r'%\(([\w\-]+/([^\)]+))\)s')
|
||||
|
||||
CONDITIONAL_RE = re.compile(r"^(\w+)(\?.+)$")
|
||||
|
||||
# overload constructor only for fancy new Python3's
|
||||
def __init__(self, share_config=None, *args, **kwargs):
|
||||
kwargs = kwargs.copy()
|
||||
kwargs['interpolation'] = BasicInterpolationWithName()
|
||||
kwargs['inline_comment_prefixes'] = ";"
|
||||
super(SafeConfigParserWithIncludes, self).__init__(
|
||||
*args, **kwargs)
|
||||
self._cfg_share = share_config
|
||||
|
||||
def get_ex(self, section, option, raw=False, vars={}):
|
||||
"""Get an option value for a given section.
|
||||
|
||||
In opposite to `get`, it differentiate session-related option name like `sec/opt`.
|
||||
"""
|
||||
sopt = None
|
||||
# if option name contains section:
|
||||
if '/' in option:
|
||||
sopt = SafeConfigParserWithIncludes.SECTION_OPTNAME_CRE.search(option)
|
||||
# try get value from named section/option:
|
||||
if sopt:
|
||||
sec = sopt.group(1)
|
||||
opt = sopt.group(2)
|
||||
seclwr = sec.lower()
|
||||
if seclwr == 'known':
|
||||
# try get value firstly from known options, hereafter from current section:
|
||||
sopt = ('KNOWN/'+section, section)
|
||||
else:
|
||||
sopt = (sec,) if seclwr != 'default' else ("DEFAULT",)
|
||||
for sec in sopt:
|
||||
try:
|
||||
v = self.get(sec, opt, raw=raw)
|
||||
return v
|
||||
except (NoSectionError, NoOptionError) as e:
|
||||
pass
|
||||
# get value of section/option using given section and vars (fallback):
|
||||
v = self.get(section, option, raw=raw, vars=vars)
|
||||
return v
|
||||
|
||||
def _map_section_options(self, section, option, rest, defaults):
|
||||
"""
|
||||
Interpolates values of the section options (name syntax `%(section/option)s`).
|
||||
|
||||
Fallback: try to wrap missing default options as "default/options" resp. "known/options"
|
||||
"""
|
||||
if '/' not in rest or '%(' not in rest: # pragma: no cover
|
||||
return 0
|
||||
rplcmnt = 0
|
||||
soptrep = SafeConfigParserWithIncludes.SECTION_OPTSUBST_CRE.findall(rest)
|
||||
if not soptrep: # pragma: no cover
|
||||
return 0
|
||||
for sopt, opt in soptrep:
|
||||
if sopt not in defaults:
|
||||
sec = sopt[:~len(opt)]
|
||||
seclwr = sec.lower()
|
||||
if seclwr != 'default':
|
||||
usedef = 0
|
||||
if seclwr == 'known':
|
||||
# try get raw value from known options:
|
||||
try:
|
||||
v = self._sections['KNOWN/'+section][opt]
|
||||
except KeyError:
|
||||
# fallback to default:
|
||||
usedef = 1
|
||||
else:
|
||||
# get raw value of opt in section:
|
||||
try:
|
||||
# if section not found - ignore:
|
||||
try:
|
||||
sec = self._sections[sec]
|
||||
except KeyError: # pragma: no cover
|
||||
continue
|
||||
v = sec[opt]
|
||||
except KeyError: # pragma: no cover
|
||||
# fallback to default:
|
||||
usedef = 1
|
||||
else:
|
||||
usedef = 1
|
||||
if usedef:
|
||||
try:
|
||||
v = self._defaults[opt]
|
||||
except KeyError: # pragma: no cover
|
||||
continue
|
||||
# replacement found:
|
||||
rplcmnt = 1
|
||||
try: # set it in map-vars (consider different python versions):
|
||||
defaults[sopt] = v
|
||||
except:
|
||||
# try to set in first default map (corresponding vars):
|
||||
try:
|
||||
defaults._maps[0][sopt] = v
|
||||
except: # pragma: no cover
|
||||
# no way to update vars chain map - overwrite defaults:
|
||||
self._defaults[sopt] = v
|
||||
return rplcmnt
|
||||
|
||||
@property
|
||||
def share_config(self):
|
||||
return self._cfg_share
|
||||
|
||||
def _getSharedSCPWI(self, filename):
|
||||
SCPWI = SafeConfigParserWithIncludes
|
||||
# read single one, add to return list, use sharing if possible:
|
||||
if self._cfg_share:
|
||||
# cache/share each file as include (ex: filter.d/common could be included in each filter config):
|
||||
hashv = 'inc:'+(filename if not isinstance(filename, list) else '\x01'.join(filename))
|
||||
cfg, i = self._cfg_share.get(hashv, (None, None))
|
||||
if cfg is None:
|
||||
cfg = SCPWI(share_config=self._cfg_share)
|
||||
i = cfg.read(filename, get_includes=False)
|
||||
self._cfg_share[hashv] = (cfg, i)
|
||||
elif logSys.getEffectiveLevel() <= logLevel:
|
||||
logSys.log(logLevel, " Shared file: %s", filename)
|
||||
else:
|
||||
# don't have sharing:
|
||||
cfg = SCPWI()
|
||||
i = cfg.read(filename, get_includes=False)
|
||||
return (cfg, i)
|
||||
|
||||
def _getIncludes(self, filenames, seen=[]):
|
||||
if not isinstance(filenames, list):
|
||||
filenames = [ filenames ]
|
||||
filenames = _expandConfFilesWithLocal(filenames)
|
||||
# retrieve or cache include paths:
|
||||
if self._cfg_share:
|
||||
# cache/share include list:
|
||||
hashv = 'inc-path:'+('\x01'.join(filenames))
|
||||
fileNamesFull = self._cfg_share.get(hashv)
|
||||
if fileNamesFull is None:
|
||||
fileNamesFull = []
|
||||
for filename in filenames:
|
||||
fileNamesFull += self.__getIncludesUncached(filename, seen)
|
||||
self._cfg_share[hashv] = fileNamesFull
|
||||
return fileNamesFull
|
||||
# don't have sharing:
|
||||
fileNamesFull = []
|
||||
for filename in filenames:
|
||||
fileNamesFull += self.__getIncludesUncached(filename, seen)
|
||||
return fileNamesFull
|
||||
|
||||
def __getIncludesUncached(self, resource, seen=[]):
|
||||
"""
|
||||
Given 1 config resource returns list of included files
|
||||
(recursively) with the original one as well
|
||||
Simple loops are taken care about
|
||||
"""
|
||||
SCPWI = SafeConfigParserWithIncludes
|
||||
try:
|
||||
parser, i = self._getSharedSCPWI(resource)
|
||||
if not i:
|
||||
return []
|
||||
except UnicodeDecodeError as e:
|
||||
logSys.error("Error decoding config file '%s': %s" % (resource, e))
|
||||
return []
|
||||
|
||||
resourceDir = os.path.dirname(resource)
|
||||
|
||||
newFiles = [ ('before', []), ('after', []) ]
|
||||
if SCPWI.SECTION_NAME in parser.sections():
|
||||
for option_name, option_list in newFiles:
|
||||
if option_name in parser.options(SCPWI.SECTION_NAME):
|
||||
newResources = parser.get(SCPWI.SECTION_NAME, option_name)
|
||||
for newResource in newResources.split('\n'):
|
||||
if os.path.isabs(newResource):
|
||||
r = newResource
|
||||
else:
|
||||
r = os.path.join(resourceDir, newResource)
|
||||
if r in seen:
|
||||
continue
|
||||
s = seen + [resource]
|
||||
option_list += self._getIncludes(r, s)
|
||||
# combine lists
|
||||
return newFiles[0][1] + [resource] + newFiles[1][1]
|
||||
|
||||
def get_defaults(self):
|
||||
return self._defaults
|
||||
|
||||
def get_sections(self):
|
||||
return self._sections
|
||||
|
||||
def options(self, section, withDefault=True):
|
||||
"""Return a list of option names for the given section name.
|
||||
|
||||
Parameter `withDefault` controls the include of names from section `[DEFAULT]`
|
||||
"""
|
||||
try:
|
||||
opts = self._sections[section]
|
||||
except KeyError: # pragma: no cover
|
||||
raise NoSectionError(section)
|
||||
if withDefault:
|
||||
# mix it with defaults:
|
||||
return set(opts.keys()) | set(self._defaults)
|
||||
# only own option names:
|
||||
return list(opts.keys())
|
||||
|
||||
def read(self, filenames, get_includes=True):
|
||||
if not isinstance(filenames, list):
|
||||
filenames = [ filenames ]
|
||||
# retrieve (and cache) includes:
|
||||
fileNamesFull = []
|
||||
if get_includes:
|
||||
fileNamesFull += self._getIncludes(filenames)
|
||||
else:
|
||||
fileNamesFull = filenames
|
||||
|
||||
if not fileNamesFull:
|
||||
return []
|
||||
|
||||
logSys.info(" Loading files: %s", fileNamesFull)
|
||||
|
||||
if get_includes or len(fileNamesFull) > 1:
|
||||
# read multiple configs:
|
||||
ret = []
|
||||
alld = self.get_defaults()
|
||||
alls = self.get_sections()
|
||||
for filename in fileNamesFull:
|
||||
# read single one, add to return list, use sharing if possible:
|
||||
cfg, i = self._getSharedSCPWI(filename)
|
||||
if i:
|
||||
ret += i
|
||||
# merge defaults and all sections to self:
|
||||
alld.update(cfg.get_defaults())
|
||||
for n, s in cfg.get_sections().items():
|
||||
# conditional sections
|
||||
cond = SafeConfigParserWithIncludes.CONDITIONAL_RE.match(n)
|
||||
if cond:
|
||||
n, cond = cond.groups()
|
||||
s = s.copy()
|
||||
try:
|
||||
del(s['__name__'])
|
||||
except KeyError:
|
||||
pass
|
||||
for k in list(s.keys()):
|
||||
v = s.pop(k)
|
||||
s[k + cond] = v
|
||||
s2 = alls.get(n)
|
||||
if isinstance(s2, dict):
|
||||
# save previous known values, for possible using in local interpolations later:
|
||||
self.merge_section('KNOWN/'+n,
|
||||
dict([i for i in iter(s2.items()) if i[0] in s]), '')
|
||||
# merge section
|
||||
s2.update(s)
|
||||
else:
|
||||
alls[n] = s.copy()
|
||||
|
||||
return ret
|
||||
|
||||
# read one config :
|
||||
if logSys.getEffectiveLevel() <= logLevel:
|
||||
logSys.log(logLevel, " Reading file: %s", fileNamesFull[0])
|
||||
# read file(s) :
|
||||
return SafeConfigParser.read(self, fileNamesFull, encoding='utf-8')
|
||||
|
||||
def merge_section(self, section, options, pref=None):
|
||||
alls = self.get_sections()
|
||||
try:
|
||||
sec = alls[section]
|
||||
except KeyError:
|
||||
alls[section] = sec = dict()
|
||||
if not pref:
|
||||
sec.update(options)
|
||||
return
|
||||
sk = {}
|
||||
for k, v in options.items():
|
||||
if not k.startswith(pref) and k != '__name__':
|
||||
sk[pref+k] = v
|
||||
sec.update(sk)
|
||||
|
||||
425
fail2ban-master/fail2ban/client/configreader.py
Normal file
425
fail2ban-master/fail2ban/client/configreader.py
Normal file
@@ -0,0 +1,425 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Author: Cyril Jaquier
|
||||
# Modified by: Yaroslav Halchenko (SafeConfigParserWithIncludes)
|
||||
|
||||
__author__ = "Cyril Jaquier, Yaroslav Halchenko, Serg G. Brester (aka sebres)"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier, 2007 Yaroslav Halchenko, 2015 Serg G. Brester (aka sebres)"
|
||||
__license__ = "GPL"
|
||||
|
||||
import glob
|
||||
import os
|
||||
from configparser import NoOptionError, NoSectionError
|
||||
|
||||
from .configparserinc import sys, SafeConfigParserWithIncludes, logLevel
|
||||
from ..helpers import getLogger, _as_bool, _merge_dicts, substituteRecursiveTags
|
||||
|
||||
# Gets the instance of the logger.
|
||||
logSys = getLogger(__name__)
|
||||
|
||||
CONVERTER = {
|
||||
"bool": _as_bool,
|
||||
"int": int,
|
||||
}
|
||||
def _OptionsTemplateGen(options):
|
||||
"""Iterator over the options template with default options.
|
||||
|
||||
Each options entry is composed of an array or tuple with:
|
||||
[[type, name, ?default?], ...]
|
||||
Or it is a dict:
|
||||
{name: [type, default], ...}
|
||||
"""
|
||||
if isinstance(options, (list,tuple)):
|
||||
for optname in options:
|
||||
if len(optname) > 2:
|
||||
opttype, optname, optvalue = optname
|
||||
else:
|
||||
(opttype, optname), optvalue = optname, None
|
||||
yield opttype, optname, optvalue
|
||||
else:
|
||||
for optname in options:
|
||||
opttype, optvalue = options[optname]
|
||||
yield opttype, optname, optvalue
|
||||
|
||||
|
||||
class ConfigReader():
|
||||
"""Generic config reader class.
|
||||
|
||||
A caching adapter which automatically reuses already shared configuration.
|
||||
"""
|
||||
|
||||
def __init__(self, use_config=None, share_config=None, **kwargs):
|
||||
# use given shared config if possible (see read):
|
||||
self._cfg_share = None
|
||||
self._cfg = None
|
||||
if use_config is not None:
|
||||
self._cfg = use_config
|
||||
# share config if possible:
|
||||
if share_config is not None:
|
||||
self._cfg_share = share_config
|
||||
self._cfg_share_kwargs = kwargs
|
||||
self._cfg_share_basedir = None
|
||||
elif self._cfg is None:
|
||||
self._cfg = ConfigReaderUnshared(**kwargs)
|
||||
|
||||
def setBaseDir(self, basedir):
|
||||
if self._cfg:
|
||||
self._cfg.setBaseDir(basedir)
|
||||
else:
|
||||
self._cfg_share_basedir = basedir
|
||||
|
||||
def getBaseDir(self):
|
||||
if self._cfg:
|
||||
return self._cfg.getBaseDir()
|
||||
else:
|
||||
return self._cfg_share_basedir
|
||||
|
||||
@property
|
||||
def share_config(self):
|
||||
return self._cfg_share
|
||||
|
||||
def read(self, name, once=True):
|
||||
""" Overloads a default (not shared) read of config reader.
|
||||
|
||||
To prevent multiple reads of config files with it includes, reads into
|
||||
the config reader, if it was not yet cached/shared by 'name'.
|
||||
"""
|
||||
# already shared ?
|
||||
if not self._cfg:
|
||||
self._create_unshared(name)
|
||||
# performance feature - read once if using shared config reader:
|
||||
if once and self._cfg.read_cfg_files is not None:
|
||||
return self._cfg.read_cfg_files
|
||||
|
||||
# load:
|
||||
logSys.info("Loading configs for %s under %s ", name, self._cfg.getBaseDir())
|
||||
ret = self._cfg.read(name)
|
||||
|
||||
# save already read and return:
|
||||
self._cfg.read_cfg_files = ret
|
||||
return ret
|
||||
|
||||
def _create_unshared(self, name=''):
|
||||
""" Allocates and share a config file by it name.
|
||||
|
||||
Automatically allocates unshared or reuses shared handle by given 'name' and
|
||||
init arguments inside a given shared storage.
|
||||
"""
|
||||
if not self._cfg and self._cfg_share is not None:
|
||||
self._cfg = self._cfg_share.get(name)
|
||||
if not self._cfg:
|
||||
self._cfg = ConfigReaderUnshared(share_config=self._cfg_share, **self._cfg_share_kwargs)
|
||||
if self._cfg_share_basedir is not None:
|
||||
self._cfg.setBaseDir(self._cfg_share_basedir)
|
||||
self._cfg_share[name] = self._cfg
|
||||
else:
|
||||
self._cfg = ConfigReaderUnshared(**self._cfg_share_kwargs)
|
||||
|
||||
def sections(self):
|
||||
try:
|
||||
return (n for n in self._cfg.sections() if not n.startswith('KNOWN/'))
|
||||
except AttributeError:
|
||||
return []
|
||||
|
||||
def has_section(self, sec):
|
||||
try:
|
||||
return self._cfg.has_section(sec)
|
||||
except AttributeError:
|
||||
return False
|
||||
|
||||
def has_option(self, sec, opt, withDefault=True):
|
||||
return self._cfg.has_option(sec, opt) if withDefault \
|
||||
else opt in self._cfg._sections.get(sec, {})
|
||||
|
||||
def merge_defaults(self, d):
|
||||
self._cfg.get_defaults().update(d)
|
||||
|
||||
def merge_section(self, section, *args, **kwargs):
|
||||
try:
|
||||
return self._cfg.merge_section(section, *args, **kwargs)
|
||||
except AttributeError:
|
||||
raise NoSectionError(section)
|
||||
|
||||
def options(self, section, withDefault=False):
|
||||
"""Return a list of option names for the given section name.
|
||||
|
||||
Parameter `withDefault` controls the include of names from section `[DEFAULT]`
|
||||
"""
|
||||
try:
|
||||
return self._cfg.options(section, withDefault)
|
||||
except AttributeError:
|
||||
raise NoSectionError(section)
|
||||
|
||||
def get(self, sec, opt, raw=False, vars={}):
|
||||
try:
|
||||
return self._cfg.get(sec, opt, raw=raw, vars=vars)
|
||||
except AttributeError:
|
||||
raise NoSectionError(sec)
|
||||
|
||||
def getOptions(self, section, *args, **kwargs):
|
||||
try:
|
||||
return self._cfg.getOptions(section, *args, **kwargs)
|
||||
except AttributeError:
|
||||
raise NoSectionError(section)
|
||||
|
||||
|
||||
class ConfigReaderUnshared(SafeConfigParserWithIncludes):
|
||||
"""Unshared config reader (previously ConfigReader).
|
||||
|
||||
Do not use this class (internal not shared/cached representation).
|
||||
Use ConfigReader instead.
|
||||
"""
|
||||
|
||||
DEFAULT_BASEDIR = '/etc/fail2ban'
|
||||
|
||||
def __init__(self, basedir=None, *args, **kwargs):
|
||||
SafeConfigParserWithIncludes.__init__(self, *args, **kwargs)
|
||||
self.read_cfg_files = None
|
||||
self.setBaseDir(basedir)
|
||||
|
||||
def setBaseDir(self, basedir):
|
||||
if basedir is None:
|
||||
basedir = ConfigReaderUnshared.DEFAULT_BASEDIR # stock system location
|
||||
self._basedir = basedir.rstrip('/')
|
||||
|
||||
def getBaseDir(self):
|
||||
return self._basedir
|
||||
|
||||
def read(self, filename):
|
||||
if not os.path.exists(self._basedir):
|
||||
raise ValueError("Base configuration directory %s does not exist "
|
||||
% self._basedir)
|
||||
if filename.startswith("./"): # pragma: no cover
|
||||
filename = os.path.abspath(filename)
|
||||
basename = os.path.join(self._basedir, filename)
|
||||
logSys.debug("Reading configs for %s under %s " , filename, self._basedir)
|
||||
config_files = [ basename + ".conf" ]
|
||||
|
||||
# possible further customizations under a .conf.d directory
|
||||
config_dir = basename + '.d'
|
||||
config_files += sorted(glob.glob('%s/*.conf' % config_dir))
|
||||
|
||||
config_files.append(basename + ".local")
|
||||
|
||||
config_files += sorted(glob.glob('%s/*.local' % config_dir))
|
||||
|
||||
# choose only existing ones
|
||||
config_files = list(filter(os.path.exists, config_files))
|
||||
|
||||
if len(config_files):
|
||||
# at least one config exists and accessible
|
||||
logSys.debug("Reading config files: %s", ', '.join(config_files))
|
||||
config_files_read = SafeConfigParserWithIncludes.read(self, config_files)
|
||||
missed = [ cf for cf in config_files if cf not in config_files_read ]
|
||||
if missed:
|
||||
logSys.error("Could not read config files: %s", ', '.join(missed))
|
||||
return False
|
||||
if config_files_read:
|
||||
return True
|
||||
logSys.error("Found no accessible config files for %r under %s",
|
||||
filename, self.getBaseDir())
|
||||
return False
|
||||
else:
|
||||
logSys.error("Found no accessible config files for %r " % filename
|
||||
+ (["under %s" % self.getBaseDir(),
|
||||
"among existing ones: " + ', '.join(config_files)][bool(len(config_files))]))
|
||||
|
||||
return False
|
||||
|
||||
##
|
||||
# Read the options.
|
||||
#
|
||||
# Read the given option in the configuration file. Default values
|
||||
# are used...
|
||||
# Each options entry is composed of an array with:
|
||||
# [[type, name, default], ...]
|
||||
# Or it is a dict:
|
||||
# {name: [type, default], ...}
|
||||
|
||||
def getOptions(self, sec, options, pOptions=None, shouldExist=False, convert=True):
|
||||
values = dict()
|
||||
if pOptions is None:
|
||||
pOptions = {}
|
||||
# Get only specified options:
|
||||
for opttype, optname, optvalue in _OptionsTemplateGen(options):
|
||||
if optname in pOptions:
|
||||
continue
|
||||
try:
|
||||
v = self.get(sec, optname, vars=pOptions)
|
||||
values[optname] = v
|
||||
if convert:
|
||||
conv = CONVERTER.get(opttype)
|
||||
if conv:
|
||||
if v is None: continue
|
||||
values[optname] = conv(v)
|
||||
except NoSectionError as e:
|
||||
if shouldExist:
|
||||
raise
|
||||
# No "Definition" section or wrong basedir
|
||||
logSys.error(e)
|
||||
values[optname] = optvalue
|
||||
# TODO: validate error handling here.
|
||||
except NoOptionError:
|
||||
if not optvalue is None:
|
||||
logSys.debug("'%s' not defined in '%s'. Using default one: %r"
|
||||
% (optname, sec, optvalue))
|
||||
values[optname] = optvalue
|
||||
# elif logSys.getEffectiveLevel() <= logLevel:
|
||||
# logSys.log(logLevel, "Non essential option '%s' not defined in '%s'.", optname, sec)
|
||||
except ValueError:
|
||||
logSys.warning("Wrong value for '" + optname + "' in '" + sec +
|
||||
"'. Using default one: '" + repr(optvalue) + "'")
|
||||
values[optname] = optvalue
|
||||
return values
|
||||
|
||||
|
||||
class DefinitionInitConfigReader(ConfigReader):
|
||||
"""Config reader for files with options grouped in [Definition] and
|
||||
[Init] sections.
|
||||
|
||||
Is a base class for readers of filters and actions, where definitions
|
||||
in jails might provide custom values for options defined in [Init]
|
||||
section.
|
||||
"""
|
||||
|
||||
_configOpts = []
|
||||
|
||||
def __init__(self, file_, jailName, initOpts, **kwargs):
|
||||
ConfigReader.__init__(self, **kwargs)
|
||||
if file_.startswith("./"): # pragma: no cover
|
||||
file_ = os.path.abspath(file_)
|
||||
self.setFile(file_)
|
||||
self.setJailName(jailName)
|
||||
self._initOpts = initOpts
|
||||
self._pOpts = dict()
|
||||
self._defCache = dict()
|
||||
|
||||
def setFile(self, fileName):
|
||||
self._file = fileName
|
||||
self._initOpts = {}
|
||||
|
||||
def getFile(self):
|
||||
return self._file
|
||||
|
||||
def setJailName(self, jailName):
|
||||
self._jailName = jailName
|
||||
|
||||
def getJailName(self):
|
||||
return self._jailName
|
||||
|
||||
def read(self):
|
||||
return ConfigReader.read(self, self._file)
|
||||
|
||||
# needed for fail2ban-regex that doesn't need fancy directories
|
||||
def readexplicit(self):
|
||||
if not self._cfg:
|
||||
self._create_unshared(self._file)
|
||||
return SafeConfigParserWithIncludes.read(self._cfg, self._file)
|
||||
|
||||
def getOptions(self, pOpts, all=False):
|
||||
# overwrite static definition options with init values, supplied as
|
||||
# direct parameters from jail-config via action[xtra1="...", xtra2=...]:
|
||||
if not pOpts:
|
||||
pOpts = dict()
|
||||
if self._initOpts:
|
||||
pOpts = _merge_dicts(pOpts, self._initOpts)
|
||||
# type-convert only in combined (otherwise int/bool converting prevents substitution):
|
||||
self._opts = ConfigReader.getOptions(
|
||||
self, "Definition", self._configOpts, pOpts, convert=False)
|
||||
self._pOpts = pOpts
|
||||
if self.has_section("Init"):
|
||||
# get only own options (without options from default):
|
||||
getopt = lambda opt: self.get("Init", opt)
|
||||
for opt in self.options("Init", withDefault=False):
|
||||
if opt == '__name__': continue
|
||||
v = None
|
||||
if not opt.startswith('known/'):
|
||||
if v is None: v = getopt(opt)
|
||||
self._initOpts['known/'+opt] = v
|
||||
if opt not in self._initOpts:
|
||||
# overwrite also conditional init options (from init?... section):
|
||||
cond = SafeConfigParserWithIncludes.CONDITIONAL_RE.match(opt)
|
||||
if cond:
|
||||
optc, cond = cond.groups()
|
||||
v = pOpts.get(optc, v)
|
||||
if v is None: v = getopt(opt)
|
||||
self._initOpts[opt] = v
|
||||
if all and self.has_section("Definition"):
|
||||
# merge with all definition options (and options from default),
|
||||
# bypass already converted option (so merge only new options):
|
||||
for opt in self.options("Definition"):
|
||||
if opt == '__name__' or opt in self._opts: continue
|
||||
self._opts[opt] = self.get("Definition", opt)
|
||||
|
||||
def convertOptions(self, opts, configOpts):
|
||||
"""Convert interpolated combined options to expected type.
|
||||
"""
|
||||
for opttype, optname, optvalue in _OptionsTemplateGen(configOpts):
|
||||
conv = CONVERTER.get(opttype)
|
||||
if conv:
|
||||
v = opts.get(optname)
|
||||
if v is None: continue
|
||||
try:
|
||||
opts[optname] = conv(v)
|
||||
except ValueError:
|
||||
logSys.warning("Wrong %s value %r for %r. Using default one: %r",
|
||||
opttype, v, optname, optvalue)
|
||||
opts[optname] = optvalue
|
||||
|
||||
def getCombOption(self, optname):
|
||||
"""Get combined definition option (as string) using pre-set and init
|
||||
options as preselection (values with higher precedence as specified in section).
|
||||
|
||||
Can be used only after calling of getOptions.
|
||||
"""
|
||||
try:
|
||||
return self._defCache[optname]
|
||||
except KeyError:
|
||||
try:
|
||||
v = self._cfg.get_ex("Definition", optname, vars=self._pOpts)
|
||||
except (NoSectionError, NoOptionError, ValueError):
|
||||
v = None
|
||||
self._defCache[optname] = v
|
||||
return v
|
||||
|
||||
def getCombined(self, ignore=()):
|
||||
combinedopts = self._opts
|
||||
if self._initOpts:
|
||||
combinedopts = _merge_dicts(combinedopts, self._initOpts)
|
||||
if not len(combinedopts):
|
||||
return {}
|
||||
# ignore conditional options:
|
||||
ignore = set(ignore).copy()
|
||||
for n in combinedopts:
|
||||
cond = SafeConfigParserWithIncludes.CONDITIONAL_RE.match(n)
|
||||
if cond:
|
||||
n, cond = cond.groups()
|
||||
ignore.add(n)
|
||||
# substitute options already specified direct:
|
||||
opts = substituteRecursiveTags(combinedopts,
|
||||
ignore=ignore, addrepl=self.getCombOption)
|
||||
if not opts:
|
||||
raise ValueError('recursive tag definitions unable to be resolved')
|
||||
# convert options after all interpolations:
|
||||
self.convertOptions(opts, self._configOpts)
|
||||
return opts
|
||||
|
||||
def convert(self):
|
||||
raise NotImplementedError
|
||||
92
fail2ban-master/fail2ban/client/configurator.py
Normal file
92
fail2ban-master/fail2ban/client/configurator.py
Normal file
@@ -0,0 +1,92 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Author: Cyril Jaquier
|
||||
#
|
||||
|
||||
__author__ = "Cyril Jaquier"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
|
||||
__license__ = "GPL"
|
||||
|
||||
from .fail2banreader import Fail2banReader
|
||||
from .jailsreader import JailsReader
|
||||
from ..helpers import getLogger
|
||||
|
||||
# Gets the instance of the logger.
|
||||
logSys = getLogger(__name__)
|
||||
|
||||
|
||||
class Configurator:
|
||||
|
||||
def __init__(self, force_enable=False, share_config=None):
|
||||
self.__settings = dict()
|
||||
self.__streams = dict()
|
||||
# always share all config readers:
|
||||
if share_config is None:
|
||||
share_config = dict()
|
||||
self.__share_config = share_config
|
||||
self.__fail2ban = Fail2banReader(share_config=share_config)
|
||||
self.__jails = JailsReader(force_enable=force_enable, share_config=share_config)
|
||||
|
||||
def Reload(self):
|
||||
# clear all shared handlers:
|
||||
self.__share_config.clear()
|
||||
|
||||
def setBaseDir(self, folderName):
|
||||
self.__fail2ban.setBaseDir(folderName)
|
||||
self.__jails.setBaseDir(folderName)
|
||||
|
||||
def getBaseDir(self):
|
||||
fail2ban_basedir = self.__fail2ban.getBaseDir()
|
||||
jails_basedir = self.__jails.getBaseDir()
|
||||
if fail2ban_basedir != jails_basedir:
|
||||
logSys.error("fail2ban.conf and jails.conf readers have differing "
|
||||
"basedirs: %r and %r. "
|
||||
"Returning the one for fail2ban.conf"
|
||||
% (fail2ban_basedir, jails_basedir))
|
||||
return fail2ban_basedir
|
||||
|
||||
def readEarly(self):
|
||||
if not self.__fail2ban.read():
|
||||
raise LookupError("Read fail2ban configuration failed.")
|
||||
|
||||
def readAll(self):
|
||||
self.readEarly()
|
||||
if not self.__jails.read():
|
||||
raise LookupError("Read jails configuration failed.")
|
||||
|
||||
def getEarlyOptions(self):
|
||||
return self.__fail2ban.getEarlyOptions()
|
||||
|
||||
def getOptions(self, jail=None, updateMainOpt=None, ignoreWrong=True):
|
||||
self.__fail2ban.getOptions(updateMainOpt)
|
||||
return self.__jails.getOptions(jail, ignoreWrong=ignoreWrong)
|
||||
|
||||
def convertToProtocol(self, allow_no_files=False):
|
||||
self.__streams["general"] = self.__fail2ban.convert()
|
||||
self.__streams["jails"] = self.__jails.convert(allow_no_files=allow_no_files)
|
||||
|
||||
def getConfigStream(self):
|
||||
cmds = list()
|
||||
for opt in self.__streams["general"]:
|
||||
cmds.append(opt)
|
||||
for opt in self.__streams["jails"]:
|
||||
cmds.append(opt)
|
||||
return cmds
|
||||
|
||||
93
fail2ban-master/fail2ban/client/csocket.py
Normal file
93
fail2ban-master/fail2ban/client/csocket.py
Normal file
@@ -0,0 +1,93 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Author: Cyril Jaquier
|
||||
#
|
||||
|
||||
__author__ = "Cyril Jaquier"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
|
||||
__license__ = "GPL"
|
||||
|
||||
#from cPickle import dumps, loads, HIGHEST_PROTOCOL
|
||||
from pickle import dumps, loads, HIGHEST_PROTOCOL
|
||||
from ..protocol import CSPROTO
|
||||
import socket
|
||||
import sys
|
||||
|
||||
class CSocket:
|
||||
|
||||
def __init__(self, sock="/var/run/fail2ban/fail2ban.sock", timeout=-1):
|
||||
# Create an INET, STREAMing socket
|
||||
#self.csock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
self.__csock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
self.__deftout = self.__csock.gettimeout()
|
||||
if timeout != -1:
|
||||
self.settimeout(timeout)
|
||||
#self.csock.connect(("localhost", 2222))
|
||||
self.__csock.connect(sock)
|
||||
|
||||
def __del__(self):
|
||||
self.close()
|
||||
|
||||
def send(self, msg, nonblocking=False, timeout=None):
|
||||
# Convert every list member to string
|
||||
obj = dumps(list(map(CSocket.convert, msg)), HIGHEST_PROTOCOL)
|
||||
self.__csock.send(obj)
|
||||
self.__csock.send(CSPROTO.END)
|
||||
return self.receive(self.__csock, nonblocking, timeout)
|
||||
|
||||
def settimeout(self, timeout):
|
||||
self.__csock.settimeout(timeout if timeout != -1 else self.__deftout)
|
||||
|
||||
def close(self):
|
||||
if not self.__csock:
|
||||
return
|
||||
try:
|
||||
self.__csock.sendall(CSPROTO.CLOSE + CSPROTO.END)
|
||||
self.__csock.shutdown(socket.SHUT_RDWR)
|
||||
except socket.error: # pragma: no cover - normally unreachable
|
||||
pass
|
||||
try:
|
||||
self.__csock.close()
|
||||
except socket.error: # pragma: no cover - normally unreachable
|
||||
pass
|
||||
self.__csock = None
|
||||
|
||||
@staticmethod
|
||||
def convert(m):
|
||||
"""Convert every "unexpected" member of message to string"""
|
||||
if isinstance(m, (str, bool, int, float, list, dict, set)):
|
||||
return m
|
||||
else: # pragma: no cover
|
||||
return str(m)
|
||||
|
||||
@staticmethod
|
||||
def receive(sock, nonblocking=False, timeout=None):
|
||||
msg = CSPROTO.EMPTY
|
||||
if nonblocking: sock.setblocking(0)
|
||||
if timeout: sock.settimeout(timeout)
|
||||
bufsize = 1024
|
||||
while msg.rfind(CSPROTO.END, -32) == -1:
|
||||
chunk = sock.recv(bufsize)
|
||||
if not len(chunk):
|
||||
raise socket.error(104, 'Connection reset by peer')
|
||||
if chunk == CSPROTO.END: break
|
||||
msg = msg + chunk
|
||||
if bufsize < 32768: bufsize <<= 1
|
||||
return loads(msg)
|
||||
517
fail2ban-master/fail2ban/client/fail2banclient.py
Normal file
517
fail2ban-master/fail2ban/client/fail2banclient.py
Normal file
@@ -0,0 +1,517 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
#
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
__author__ = "Fail2Ban Developers"
|
||||
__copyright__ = "Copyright (c) 2004-2008 Cyril Jaquier, 2012-2014 Yaroslav Halchenko, 2014-2016 Serg G. Brester"
|
||||
__license__ = "GPL"
|
||||
|
||||
import os
|
||||
import shlex
|
||||
import signal
|
||||
import socket
|
||||
import sys
|
||||
import time
|
||||
|
||||
import threading
|
||||
from threading import Thread
|
||||
|
||||
from ..version import version
|
||||
from .csocket import CSocket
|
||||
from .beautifier import Beautifier
|
||||
from .fail2bancmdline import Fail2banCmdLine, ServerExecutionException, ExitException, \
|
||||
logSys, exit, output
|
||||
|
||||
from ..server.utils import Utils
|
||||
|
||||
PROMPT = "fail2ban> "
|
||||
|
||||
|
||||
def _thread_name():
|
||||
return threading.current_thread().__class__.__name__
|
||||
|
||||
def input_command(): # pragma: no cover
|
||||
return input(PROMPT)
|
||||
|
||||
##
|
||||
#
|
||||
# @todo This class needs cleanup.
|
||||
|
||||
class Fail2banClient(Fail2banCmdLine, Thread):
|
||||
|
||||
def __init__(self):
|
||||
Fail2banCmdLine.__init__(self)
|
||||
Thread.__init__(self)
|
||||
self._alive = True
|
||||
self._server = None
|
||||
self._beautifier = None
|
||||
|
||||
def dispInteractive(self):
|
||||
output("Fail2Ban v" + version + " reads log file that contains password failure report")
|
||||
output("and bans the corresponding IP addresses using firewall rules.")
|
||||
output("")
|
||||
|
||||
def __sigTERMhandler(self, signum, frame): # pragma: no cover
|
||||
# Print a new line because we probably come from wait
|
||||
output("")
|
||||
logSys.warning("Caught signal %d. Exiting" % signum)
|
||||
exit(255)
|
||||
|
||||
def __ping(self, timeout=0.1):
|
||||
return self.__processCmd([["ping"] + ([timeout] if timeout != -1 else [])],
|
||||
False, timeout=timeout)
|
||||
|
||||
@property
|
||||
def beautifier(self):
|
||||
if self._beautifier:
|
||||
return self._beautifier
|
||||
self._beautifier = Beautifier()
|
||||
return self._beautifier
|
||||
|
||||
def __processCmd(self, cmd, showRet=True, timeout=-1):
|
||||
client = None
|
||||
try:
|
||||
beautifier = self.beautifier
|
||||
streamRet = True
|
||||
for c in cmd:
|
||||
beautifier.setInputCmd(c)
|
||||
try:
|
||||
if not client:
|
||||
client = CSocket(self._conf["socket"], timeout=timeout)
|
||||
elif timeout != -1:
|
||||
client.settimeout(timeout)
|
||||
if self._conf["verbose"] > 2:
|
||||
logSys.log(5, "CMD: %r", c)
|
||||
ret = client.send(c)
|
||||
if ret[0] == 0:
|
||||
logSys.log(5, "OK : %r", ret[1])
|
||||
if showRet or c[0] in ('echo', 'server-status'):
|
||||
output(beautifier.beautify(ret[1]))
|
||||
else:
|
||||
logSys.error("NOK: %r", ret[1].args)
|
||||
if showRet:
|
||||
output(beautifier.beautifyError(ret[1]))
|
||||
streamRet = False
|
||||
except socket.error as e:
|
||||
if showRet or self._conf["verbose"] > 1:
|
||||
if showRet or c[0] != "ping":
|
||||
self.__logSocketError(e, c[0] == "ping")
|
||||
else:
|
||||
logSys.log(5, " -- %s failed -- %r", c, e)
|
||||
return False
|
||||
except Exception as e: # pragma: no cover
|
||||
if showRet or self._conf["verbose"] > 1:
|
||||
if self._conf["verbose"] > 1:
|
||||
logSys.exception(e)
|
||||
else:
|
||||
logSys.error(e)
|
||||
return False
|
||||
finally:
|
||||
# prevent errors by close during shutdown (on exit command):
|
||||
if client:
|
||||
try :
|
||||
client.close()
|
||||
except Exception as e: # pragma: no cover
|
||||
if showRet or self._conf["verbose"] > 1:
|
||||
logSys.debug(e)
|
||||
if showRet or c[0] in ('echo', 'server-status'):
|
||||
sys.stdout.flush()
|
||||
return streamRet
|
||||
|
||||
def __logSocketError(self, prevError="", errorOnly=False):
|
||||
try:
|
||||
if os.access(self._conf["socket"], os.F_OK): # pragma: no cover
|
||||
# This doesn't check if path is a socket,
|
||||
# but socket.error should be raised
|
||||
if os.access(self._conf["socket"], os.W_OK):
|
||||
# Permissions look good, but socket.error was raised
|
||||
if errorOnly:
|
||||
logSys.error(prevError)
|
||||
else:
|
||||
logSys.error("%sUnable to contact server. Is it running?",
|
||||
("[%s] " % prevError) if prevError else '')
|
||||
else:
|
||||
logSys.error("Permission denied to socket: %s,"
|
||||
" (you must be root)", self._conf["socket"])
|
||||
else:
|
||||
logSys.error("Failed to access socket path: %s."
|
||||
" Is fail2ban running?",
|
||||
self._conf["socket"])
|
||||
except Exception as e: # pragma: no cover
|
||||
logSys.error("Exception while checking socket access: %s",
|
||||
self._conf["socket"])
|
||||
logSys.error(e)
|
||||
|
||||
##
|
||||
def __prepareStartServer(self):
|
||||
if self.__ping():
|
||||
logSys.error("Server already running")
|
||||
return None
|
||||
|
||||
# Read the config
|
||||
ret, stream = self.readConfig()
|
||||
# Do not continue if configuration is not 100% valid
|
||||
if not ret:
|
||||
return None
|
||||
|
||||
# Check already running
|
||||
if not self._conf["force"] and os.path.exists(self._conf["socket"]):
|
||||
logSys.error("Fail2ban seems to be in unexpected state (not running but the socket exists)")
|
||||
return None
|
||||
|
||||
return [["server-stream", stream], ['server-status']]
|
||||
|
||||
def _set_server(self, s):
|
||||
self._server = s
|
||||
|
||||
##
|
||||
def __startServer(self, background=True):
|
||||
from .fail2banserver import Fail2banServer
|
||||
# read configuration here (in client only, in server we do that in the config-thread):
|
||||
stream = self.__prepareStartServer()
|
||||
self._alive = True
|
||||
if not stream:
|
||||
return False
|
||||
# Start the server or just initialize started one:
|
||||
try:
|
||||
if background:
|
||||
# Start server daemon as fork of client process (or new process):
|
||||
Fail2banServer.startServerAsync(self._conf)
|
||||
# Send config stream to server:
|
||||
if not self.__processStartStreamAfterWait(stream, False):
|
||||
return False
|
||||
else:
|
||||
# In foreground mode we should make server/client communication in different threads:
|
||||
phase = dict()
|
||||
self.configureServer(phase=phase, stream=stream)
|
||||
# Mark current (main) thread as daemon:
|
||||
self.daemon = True
|
||||
# Start server direct here in main thread (not fork):
|
||||
self._server = Fail2banServer.startServerDirect(self._conf, False, self._set_server)
|
||||
if not phase.get('done', False):
|
||||
if self._server: # pragma: no cover
|
||||
self._server.quit()
|
||||
self._server = None
|
||||
exit(255)
|
||||
except ExitException: # pragma: no cover
|
||||
raise
|
||||
except Exception as e: # pragma: no cover
|
||||
output("")
|
||||
logSys.error("Exception while starting server " + ("background" if background else "foreground"))
|
||||
if self._conf["verbose"] > 1:
|
||||
logSys.exception(e)
|
||||
else:
|
||||
logSys.error(e)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
##
|
||||
def configureServer(self, nonsync=True, phase=None, stream=None):
|
||||
# if asynchronous start this operation in the new thread:
|
||||
if nonsync:
|
||||
if phase is not None:
|
||||
# event for server ready flag:
|
||||
def _server_ready():
|
||||
phase['start-ready'] = True
|
||||
logSys.log(5, ' server phase %s', phase)
|
||||
# notify waiting thread if server really ready
|
||||
self._conf['onstart'] = _server_ready
|
||||
th = Thread(target=Fail2banClient.configureServer, args=(self, False, phase, stream))
|
||||
th.daemon = True
|
||||
th.start()
|
||||
# if we need to read configuration stream:
|
||||
if stream is None and phase is not None:
|
||||
# wait, do not continue if configuration is not 100% valid:
|
||||
Utils.wait_for(lambda: phase.get('ready', None) is not None, self._conf["timeout"], 0.001)
|
||||
logSys.log(5, ' server phase %s', phase)
|
||||
if not phase.get('start', False):
|
||||
raise ServerExecutionException('Async configuration of server failed')
|
||||
return True
|
||||
# prepare: read config, check configuration is valid, etc.:
|
||||
if phase is not None:
|
||||
phase['start'] = True
|
||||
logSys.log(5, ' client phase %s', phase)
|
||||
if stream is None:
|
||||
stream = self.__prepareStartServer()
|
||||
if phase is not None:
|
||||
phase['ready'] = phase['start'] = (True if stream else False)
|
||||
logSys.log(5, ' client phase %s', phase)
|
||||
if not stream:
|
||||
return False
|
||||
# wait a little bit for phase "start-ready" before enter active waiting:
|
||||
if phase is not None:
|
||||
Utils.wait_for(lambda: phase.get('start-ready', None) is not None, 0.5, 0.001)
|
||||
phase['configure'] = (True if stream else False)
|
||||
logSys.log(5, ' client phase %s', phase)
|
||||
# configure server with config stream:
|
||||
ret = self.__processStartStreamAfterWait(stream, False)
|
||||
if phase is not None:
|
||||
phase['done'] = ret
|
||||
return ret
|
||||
|
||||
##
|
||||
# Process a command line.
|
||||
#
|
||||
# Process one command line and exit.
|
||||
# @param cmd the command line
|
||||
|
||||
def __processCommand(self, cmd):
|
||||
# wrap tuple to list (because could be modified here):
|
||||
if not isinstance(cmd, list):
|
||||
cmd = list(cmd)
|
||||
# process:
|
||||
if len(cmd) == 1 and cmd[0] == "start":
|
||||
|
||||
ret = self.__startServer(self._conf["background"])
|
||||
if not ret:
|
||||
return False
|
||||
return ret
|
||||
|
||||
elif len(cmd) >= 1 and cmd[0] == "restart":
|
||||
# if restart jail - re-operate via "reload --restart ...":
|
||||
if len(cmd) > 1:
|
||||
cmd[0:1] = ["reload", "--restart"]
|
||||
return self.__processCommand(cmd)
|
||||
# restart server:
|
||||
if self._conf.get("interactive", False):
|
||||
output(' ## stop ... ')
|
||||
self.__processCommand(['stop'])
|
||||
if not self.__waitOnServer(False): # pragma: no cover
|
||||
logSys.error("Could not stop server")
|
||||
return False
|
||||
# in interactive mode reset config, to make full-reload if there something changed:
|
||||
if self._conf.get("interactive", False):
|
||||
output(' ## load configuration ... ')
|
||||
self.resetConf()
|
||||
ret = self.initCmdLine(self._argv)
|
||||
if ret is not None:
|
||||
return ret
|
||||
if self._conf.get("interactive", False):
|
||||
output(' ## start ... ')
|
||||
return self.__processCommand(['start'])
|
||||
|
||||
elif len(cmd) >= 1 and cmd[0] == "reload":
|
||||
# reload options:
|
||||
opts = []
|
||||
while len(cmd) >= 2:
|
||||
if cmd[1] in ('--restart', "--unban", "--if-exists"):
|
||||
opts.append(cmd[1])
|
||||
del cmd[1]
|
||||
else:
|
||||
if len(cmd) > 2:
|
||||
logSys.error("Unexpected argument(s) for reload: %r", cmd[1:])
|
||||
return False
|
||||
# stop options - jail name or --all
|
||||
break
|
||||
if self.__ping(timeout=-1):
|
||||
if len(cmd) == 1 or cmd[1] == '--all':
|
||||
jail = '--all'
|
||||
ret, stream = self.readConfig()
|
||||
else:
|
||||
jail = cmd[1]
|
||||
ret, stream = self.readConfig(jail)
|
||||
# Do not continue if configuration is not 100% valid
|
||||
if not ret:
|
||||
return False
|
||||
if self._conf.get("interactive", False):
|
||||
output(' ## reload ... ')
|
||||
# Reconfigure the server
|
||||
return self.__processCmd([['reload', jail, opts, stream]], True)
|
||||
else:
|
||||
logSys.error("Could not find server")
|
||||
return False
|
||||
|
||||
elif len(cmd) > 1 and cmd[0] == "ping":
|
||||
return self.__processCmd([cmd], timeout=float(cmd[1]))
|
||||
|
||||
else:
|
||||
return self.__processCmd([cmd])
|
||||
|
||||
|
||||
def __processStartStreamAfterWait(self, *args):
|
||||
ret = False
|
||||
try:
|
||||
# Wait for the server to start
|
||||
if not self.__waitOnServer(): # pragma: no cover
|
||||
logSys.error("Could not find server, waiting failed")
|
||||
return False
|
||||
# Configure the server
|
||||
ret = self.__processCmd(*args)
|
||||
except ServerExecutionException as e: # pragma: no cover
|
||||
if self._conf["verbose"] > 1:
|
||||
logSys.exception(e)
|
||||
logSys.error("Could not start server. Maybe an old "
|
||||
"socket file is still present. Try to "
|
||||
"remove " + self._conf["socket"] + ". If "
|
||||
"you used fail2ban-client to start the "
|
||||
"server, adding the -x option will do it")
|
||||
|
||||
if not ret and self._server: # stop on error (foreground, config read in another thread):
|
||||
self._server.quit()
|
||||
self._server = None
|
||||
return ret
|
||||
|
||||
def __waitOnServer(self, alive=True, maxtime=None):
|
||||
if maxtime is None:
|
||||
maxtime = self._conf["timeout"]
|
||||
# Wait for the server to start (the server has 30 seconds to answer ping)
|
||||
starttime = time.time()
|
||||
logSys.log(5, "__waitOnServer: %r", (alive, maxtime))
|
||||
sltime = 0.0125 / 2
|
||||
test = lambda: os.path.exists(self._conf["socket"]) and self.__ping(timeout=sltime)
|
||||
with VisualWait(self._conf["verbose"]) as vis:
|
||||
while self._alive:
|
||||
runf = test()
|
||||
if runf == alive:
|
||||
return True
|
||||
waittime = time.time() - starttime
|
||||
logSys.log(5, " wait-time: %s", waittime)
|
||||
# Wonderful visual :)
|
||||
if waittime > 1:
|
||||
vis.heartbeat()
|
||||
# f end time reached:
|
||||
if waittime >= maxtime:
|
||||
raise ServerExecutionException("Failed to start server")
|
||||
# first 200ms faster:
|
||||
sltime = min(sltime * 2, 0.5 if waittime > 0.2 else 0.1)
|
||||
time.sleep(sltime)
|
||||
return False
|
||||
|
||||
def start(self, argv):
|
||||
# Install signal handlers
|
||||
_prev_signals = {}
|
||||
if _thread_name() == '_MainThread':
|
||||
for s in (signal.SIGTERM, signal.SIGINT):
|
||||
_prev_signals[s] = signal.getsignal(s)
|
||||
signal.signal(s, self.__sigTERMhandler)
|
||||
try:
|
||||
# Command line options
|
||||
if self._argv is None:
|
||||
ret = self.initCmdLine(argv)
|
||||
if ret is not None:
|
||||
if ret:
|
||||
return True
|
||||
if self._conf.get("test", False) and not self._args: # test only
|
||||
return False
|
||||
raise ServerExecutionException("Init of command line failed")
|
||||
|
||||
# Commands
|
||||
args = self._args
|
||||
|
||||
# Interactive mode
|
||||
if self._conf.get("interactive", False):
|
||||
try:
|
||||
import readline
|
||||
except ImportError:
|
||||
raise ServerExecutionException("Readline not available")
|
||||
try:
|
||||
ret = True
|
||||
if len(args) > 0:
|
||||
ret = self.__processCommand(args)
|
||||
if ret:
|
||||
readline.parse_and_bind("tab: complete")
|
||||
self.dispInteractive()
|
||||
while True:
|
||||
cmd = input_command()
|
||||
if cmd == "exit" or cmd == "quit":
|
||||
# Exit
|
||||
return True
|
||||
if cmd == "help":
|
||||
self.dispUsage()
|
||||
elif not cmd == "":
|
||||
try:
|
||||
self.__processCommand(shlex.split(cmd))
|
||||
except Exception as e: # pragma: no cover
|
||||
if self._conf["verbose"] > 1:
|
||||
logSys.exception(e)
|
||||
else:
|
||||
logSys.error(e)
|
||||
except (EOFError, KeyboardInterrupt): # pragma: no cover
|
||||
output("")
|
||||
raise
|
||||
# Single command mode
|
||||
else:
|
||||
if len(args) < 1:
|
||||
self.dispUsage()
|
||||
return False
|
||||
return self.__processCommand(args)
|
||||
except Exception as e:
|
||||
if self._conf["verbose"] > 1:
|
||||
logSys.exception(e)
|
||||
else:
|
||||
logSys.error(e)
|
||||
return False
|
||||
finally:
|
||||
self._alive = False
|
||||
for s, sh in _prev_signals.items():
|
||||
signal.signal(s, sh)
|
||||
|
||||
|
||||
class _VisualWait:
|
||||
"""Small progress indication (as "wonderful visual") during waiting process
|
||||
"""
|
||||
pos = 0
|
||||
delta = 1
|
||||
def __init__(self, maxpos=10):
|
||||
self.maxpos = maxpos
|
||||
def __enter__(self):
|
||||
return self
|
||||
def __exit__(self, *args):
|
||||
if self.pos:
|
||||
sys.stdout.write('\r'+(' '*(35+self.maxpos))+'\r')
|
||||
sys.stdout.flush()
|
||||
def heartbeat(self):
|
||||
"""Show or step for progress indicator
|
||||
"""
|
||||
if not self.pos:
|
||||
sys.stdout.write("\nINFO [#" + (' '*self.maxpos) + "] Waiting on the server...\r\x1b[8C")
|
||||
self.pos += self.delta
|
||||
if self.delta > 0:
|
||||
s = " #\x1b[1D" if self.pos > 1 else "# \x1b[2D"
|
||||
else:
|
||||
s = "\x1b[1D# \x1b[2D"
|
||||
sys.stdout.write(s)
|
||||
sys.stdout.flush()
|
||||
if self.pos > self.maxpos:
|
||||
self.delta = -1
|
||||
elif self.pos < 2:
|
||||
self.delta = 1
|
||||
class _NotVisualWait:
|
||||
"""Mockup for invisible progress indication (not verbose)
|
||||
"""
|
||||
def __enter__(self):
|
||||
return self
|
||||
def __exit__(self, *args):
|
||||
pass
|
||||
def heartbeat(self):
|
||||
pass
|
||||
|
||||
def VisualWait(verbose, *args, **kwargs):
|
||||
"""Wonderful visual progress indication (if verbose)
|
||||
"""
|
||||
return _VisualWait(*args, **kwargs) if verbose > 1 else _NotVisualWait()
|
||||
|
||||
|
||||
def exec_command_line(argv):
|
||||
client = Fail2banClient()
|
||||
# Exit with correct return value
|
||||
if client.start(argv):
|
||||
exit(0)
|
||||
else:
|
||||
exit(255)
|
||||
|
||||
349
fail2ban-master/fail2ban/client/fail2bancmdline.py
Normal file
349
fail2ban-master/fail2ban/client/fail2bancmdline.py
Normal file
@@ -0,0 +1,349 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
#
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
__author__ = "Fail2Ban Developers"
|
||||
__copyright__ = "Copyright (c) 2004-2008 Cyril Jaquier, 2012-2014 Yaroslav Halchenko, 2014-2016 Serg G. Brester"
|
||||
__license__ = "GPL"
|
||||
|
||||
import getopt
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
|
||||
from ..version import version, normVersion
|
||||
from ..protocol import printFormatted
|
||||
from ..helpers import getLogger, str2LogLevel, getVerbosityFormat
|
||||
|
||||
# Gets the instance of the logger.
|
||||
logSys = getLogger("fail2ban")
|
||||
|
||||
def output(s): # pragma: no cover
|
||||
try:
|
||||
print(s)
|
||||
except (BrokenPipeError, IOError) as e: # pragma: no cover
|
||||
if e.errno != 32: # closed / broken pipe
|
||||
raise
|
||||
|
||||
# Config parameters required to start fail2ban which can be also set via command line (overwrite fail2ban.conf),
|
||||
CONFIG_PARAMS = ("socket", "pidfile", "logtarget", "loglevel", "syslogsocket")
|
||||
# Used to signal - we are in test cases (ex: prevents change logging params, log capturing, etc)
|
||||
PRODUCTION = True
|
||||
|
||||
MAX_WAITTIME = 30
|
||||
|
||||
|
||||
class Fail2banCmdLine():
|
||||
|
||||
def __init__(self):
|
||||
self._argv = self._args = None
|
||||
self._configurator = None
|
||||
self.cleanConfOnly = False
|
||||
self.resetConf()
|
||||
|
||||
def resetConf(self):
|
||||
self._conf = {
|
||||
"async": False,
|
||||
"conf": "/etc/fail2ban",
|
||||
"force": False,
|
||||
"background": True,
|
||||
"verbose": 1,
|
||||
"socket": None,
|
||||
"pidfile": None,
|
||||
"timeout": MAX_WAITTIME
|
||||
}
|
||||
|
||||
@property
|
||||
def configurator(self):
|
||||
if self._configurator:
|
||||
return self._configurator
|
||||
# New configurator
|
||||
from .configurator import Configurator
|
||||
self._configurator = Configurator()
|
||||
# Set the configuration path
|
||||
self._configurator.setBaseDir(self._conf["conf"])
|
||||
return self._configurator
|
||||
|
||||
|
||||
def applyMembers(self, obj):
|
||||
for o in obj.__dict__:
|
||||
self.__dict__[o] = obj.__dict__[o]
|
||||
|
||||
def dispVersion(self, short=False):
|
||||
if not short:
|
||||
output("Fail2Ban v" + version)
|
||||
else:
|
||||
output(normVersion())
|
||||
|
||||
def dispUsage(self):
|
||||
""" Prints Fail2Ban command line options and exits
|
||||
"""
|
||||
caller = os.path.basename(self._argv[0])
|
||||
output("Usage: "+caller+" [OPTIONS]" + (" <COMMAND>" if not caller.endswith('server') else ""))
|
||||
output("")
|
||||
output("Fail2Ban v" + version + " reads log file that contains password failure report")
|
||||
output("and bans the corresponding IP addresses using firewall rules.")
|
||||
output("")
|
||||
output("Options:")
|
||||
output(" -c, --conf <DIR> configuration directory")
|
||||
output(" -s, --socket <FILE> socket path")
|
||||
output(" -p, --pidfile <FILE> pidfile path")
|
||||
output(" --pname <NAME> name of the process (main thread) to identify instance (default fail2ban-server)")
|
||||
output(" --loglevel <LEVEL> logging level")
|
||||
output(" --logtarget <TARGET> logging target, use file-name or stdout, stderr, syslog or sysout.")
|
||||
output(" --syslogsocket auto|<FILE>")
|
||||
output(" -d dump configuration. For debugging")
|
||||
output(" --dp, --dump-pretty dump the configuration using more human readable representation")
|
||||
output(" -t, --test test configuration (can be also specified with start parameters)")
|
||||
output(" -i interactive mode")
|
||||
output(" -v increase verbosity")
|
||||
output(" -q decrease verbosity")
|
||||
output(" -x force execution of the server (remove socket file)")
|
||||
output(" -b start server in background (default)")
|
||||
output(" -f start server in foreground")
|
||||
output(" --async start server in async mode (for internal usage only, don't read configuration)")
|
||||
output(" --timeout timeout to wait for the server (for internal usage only, don't read configuration)")
|
||||
output(" --str2sec <STRING> convert time abbreviation format to seconds")
|
||||
output(" -h, --help display this help message")
|
||||
output(" -V, --version print the version (-V returns machine-readable short format)")
|
||||
|
||||
if not caller.endswith('server'):
|
||||
output("")
|
||||
output("Command:")
|
||||
# Prints the protocol
|
||||
printFormatted()
|
||||
|
||||
output("")
|
||||
output("Report bugs to https://github.com/fail2ban/fail2ban/issues")
|
||||
|
||||
def __getCmdLineOptions(self, optList):
|
||||
""" Gets the command line options
|
||||
"""
|
||||
for opt in optList:
|
||||
o = opt[0]
|
||||
if o in ("-c", "--conf"):
|
||||
self._conf["conf"] = opt[1]
|
||||
elif o in ("-s", "--socket"):
|
||||
self._conf["socket"] = opt[1]
|
||||
elif o in ("-p", "--pidfile"):
|
||||
self._conf["pidfile"] = opt[1]
|
||||
elif o in ("-d", "--dp", "--dump-pretty"):
|
||||
self._conf["dump"] = True if o == "-d" else 2
|
||||
elif o in ("-t", "--test"):
|
||||
self.cleanConfOnly = True
|
||||
self._conf["test"] = True
|
||||
elif o == "-v":
|
||||
self._conf["verbose"] += 1
|
||||
elif o == "-q":
|
||||
self._conf["verbose"] -= 1
|
||||
elif o == "-x":
|
||||
self._conf["force"] = True
|
||||
elif o == "-i":
|
||||
self._conf["interactive"] = True
|
||||
elif o == "-b":
|
||||
self._conf["background"] = True
|
||||
elif o == "-f":
|
||||
self._conf["background"] = False
|
||||
elif o == "--async":
|
||||
self._conf["async"] = True
|
||||
elif o == "--timeout":
|
||||
from ..server.mytime import MyTime
|
||||
self._conf["timeout"] = MyTime.str2seconds(opt[1])
|
||||
elif o == "--str2sec":
|
||||
from ..server.mytime import MyTime
|
||||
output(MyTime.str2seconds(opt[1]))
|
||||
return True
|
||||
elif o in ("-h", "--help"):
|
||||
self.dispUsage()
|
||||
return True
|
||||
elif o in ("-V", "--version"):
|
||||
self.dispVersion(o == "-V")
|
||||
return True
|
||||
elif o.startswith("--"): # other long named params (see also resetConf)
|
||||
self._conf[ o[2:] ] = opt[1]
|
||||
return None
|
||||
|
||||
def initCmdLine(self, argv):
|
||||
verbose = 1
|
||||
try:
|
||||
# First time?
|
||||
initial = (self._argv is None)
|
||||
|
||||
# Command line options
|
||||
self._argv = argv
|
||||
logSys.info("Using start params %s", argv[1:])
|
||||
|
||||
# Reads the command line options.
|
||||
try:
|
||||
cmdOpts = 'hc:s:p:xfbdtviqV'
|
||||
cmdLongOpts = ['loglevel=', 'logtarget=', 'syslogsocket=', 'test', 'async',
|
||||
'conf=', 'pidfile=', 'pname=', 'socket=',
|
||||
'timeout=', 'str2sec=', 'help', 'version', 'dp', 'dump-pretty']
|
||||
optList, self._args = getopt.getopt(self._argv[1:], cmdOpts, cmdLongOpts)
|
||||
except getopt.GetoptError:
|
||||
self.dispUsage()
|
||||
return False
|
||||
|
||||
ret = self.__getCmdLineOptions(optList)
|
||||
if ret is not None:
|
||||
return ret
|
||||
|
||||
logSys.debug(" conf: %r, args: %r", self._conf, self._args)
|
||||
|
||||
if initial and PRODUCTION: # pragma: no cover - can't test
|
||||
verbose = self._conf["verbose"]
|
||||
if verbose <= 0:
|
||||
logSys.setLevel(logging.ERROR)
|
||||
elif verbose == 1:
|
||||
logSys.setLevel(logging.WARNING)
|
||||
elif verbose == 2:
|
||||
logSys.setLevel(logging.INFO)
|
||||
elif verbose == 3:
|
||||
logSys.setLevel(logging.DEBUG)
|
||||
else:
|
||||
logSys.setLevel(logging.HEAVYDEBUG)
|
||||
# Add the default logging handler to dump to stderr
|
||||
logout = logging.StreamHandler(sys.stderr)
|
||||
|
||||
# Custom log format for the verbose run (-1, because default verbosity here is 1):
|
||||
fmt = getVerbosityFormat(verbose-1)
|
||||
formatter = logging.Formatter(fmt)
|
||||
# tell the handler to use this format
|
||||
logout.setFormatter(formatter)
|
||||
logSys.addHandler(logout)
|
||||
|
||||
# Set expected parameters (like socket, pidfile, etc) from configuration,
|
||||
# if those not yet specified, in which read configuration only if needed here:
|
||||
conf = None
|
||||
for o in CONFIG_PARAMS:
|
||||
if self._conf.get(o, None) is None:
|
||||
if not conf:
|
||||
self.configurator.readEarly()
|
||||
conf = self.configurator.getEarlyOptions()
|
||||
if o in conf:
|
||||
self._conf[o] = conf[o]
|
||||
|
||||
logSys.info("Using socket file %s", self._conf["socket"])
|
||||
|
||||
# Check log-level before start (or transmit to server), to prevent error in background:
|
||||
llev = str2LogLevel(self._conf["loglevel"])
|
||||
logSys.info("Using pid file %s, [%s] logging to %s",
|
||||
self._conf["pidfile"], logging.getLevelName(llev), self._conf["logtarget"])
|
||||
|
||||
readcfg = True
|
||||
if self._conf.get("dump", False):
|
||||
if readcfg:
|
||||
ret, stream = self.readConfig()
|
||||
readcfg = False
|
||||
if stream is not None:
|
||||
self.dumpConfig(stream, self._conf["dump"] == 2)
|
||||
else: # pragma: no cover
|
||||
output("ERROR: The configuration stream failed because of the invalid syntax.")
|
||||
if not self._conf.get("test", False):
|
||||
return ret
|
||||
|
||||
if self._conf.get("test", False):
|
||||
if readcfg:
|
||||
readcfg = False
|
||||
ret, stream = self.readConfig()
|
||||
# exit after test if no commands specified (test only):
|
||||
if not len(self._args):
|
||||
if ret:
|
||||
output("OK: configuration test is successful")
|
||||
else:
|
||||
output("ERROR: test configuration failed")
|
||||
return ret
|
||||
if not ret:
|
||||
raise ServerExecutionException("ERROR: test configuration failed")
|
||||
|
||||
# Nothing to do here, process in client/server
|
||||
return None
|
||||
except ServerExecutionException:
|
||||
raise
|
||||
except Exception as e:
|
||||
output("ERROR: %s" % (e,))
|
||||
if verbose > 2:
|
||||
logSys.exception(e)
|
||||
return False
|
||||
|
||||
def readConfig(self, jail=None):
|
||||
# Read the configuration
|
||||
# TODO: get away from stew of return codes and exception
|
||||
# handling -- handle via exceptions
|
||||
stream = None
|
||||
try:
|
||||
self.configurator.Reload()
|
||||
self.configurator.readAll()
|
||||
ret = self.configurator.getOptions(jail, self._conf,
|
||||
ignoreWrong=not self.cleanConfOnly)
|
||||
self.configurator.convertToProtocol(
|
||||
allow_no_files=self._conf.get("dump", False))
|
||||
stream = self.configurator.getConfigStream()
|
||||
except Exception as e:
|
||||
logSys.error("Failed during configuration: %s" % e)
|
||||
ret = False
|
||||
return ret, stream
|
||||
|
||||
@staticmethod
|
||||
def dumpConfig(cmd, pretty=False):
|
||||
if pretty:
|
||||
from pprint import pformat
|
||||
def _output(s):
|
||||
output(pformat(s, width=1000, indent=2))
|
||||
else:
|
||||
_output = output
|
||||
for c in cmd:
|
||||
_output(c)
|
||||
return True
|
||||
|
||||
#
|
||||
# _exit is made to ease mocking out of the behaviour in tests,
|
||||
# since method is also exposed in API via globally bound variable
|
||||
@staticmethod
|
||||
def _exit(code=0):
|
||||
# implicit flush without to produce broken pipe error (32):
|
||||
sys.stderr.close()
|
||||
try:
|
||||
sys.stdout.flush()
|
||||
# exit:
|
||||
if hasattr(sys, 'exit') and sys.exit:
|
||||
sys.exit(code)
|
||||
else:
|
||||
os._exit(code)
|
||||
except (BrokenPipeError, IOError) as e: # pragma: no cover
|
||||
if e.errno != 32: # closed / broken pipe
|
||||
raise
|
||||
|
||||
@staticmethod
|
||||
def exit(code=0):
|
||||
logSys.debug("Exit with code %s", code)
|
||||
# because of possible buffered output in python, we should flush it before exit:
|
||||
logging.shutdown()
|
||||
# exit
|
||||
Fail2banCmdLine._exit(code)
|
||||
|
||||
|
||||
# global exit handler:
|
||||
exit = Fail2banCmdLine.exit
|
||||
|
||||
|
||||
class ExitException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ServerExecutionException(Exception):
|
||||
pass
|
||||
85
fail2ban-master/fail2ban/client/fail2banreader.py
Normal file
85
fail2ban-master/fail2ban/client/fail2banreader.py
Normal file
@@ -0,0 +1,85 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Author: Cyril Jaquier
|
||||
#
|
||||
|
||||
__author__ = "Cyril Jaquier"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
|
||||
__license__ = "GPL"
|
||||
|
||||
from .configreader import ConfigReader
|
||||
from ..helpers import getLogger, str2LogLevel
|
||||
|
||||
# Gets the instance of the logger.
|
||||
logSys = getLogger(__name__)
|
||||
|
||||
|
||||
class Fail2banReader(ConfigReader):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
ConfigReader.__init__(self, **kwargs)
|
||||
|
||||
def read(self):
|
||||
return ConfigReader.read(self, "fail2ban")
|
||||
|
||||
def getEarlyOptions(self):
|
||||
opts = [
|
||||
["string", "socket", "/var/run/fail2ban/fail2ban.sock"],
|
||||
["string", "pidfile", "/var/run/fail2ban/fail2ban.pid"],
|
||||
["string", "loglevel", "INFO"],
|
||||
["string", "logtarget", "/var/log/fail2ban.log"],
|
||||
["string", "syslogsocket", "auto"]
|
||||
]
|
||||
return ConfigReader.getOptions(self, "Definition", opts)
|
||||
|
||||
def getOptions(self, updateMainOpt=None):
|
||||
opts = [["string", "loglevel", "INFO" ],
|
||||
["string", "logtarget", "STDERR"],
|
||||
["string", "syslogsocket", "auto"],
|
||||
["string", "allowipv6", "auto"],
|
||||
["string", "dbfile", "/var/lib/fail2ban/fail2ban.sqlite3"],
|
||||
["int", "dbmaxmatches", None],
|
||||
["string", "dbpurgeage", "1d"]]
|
||||
self.__opts = ConfigReader.getOptions(self, "Definition", opts)
|
||||
if updateMainOpt:
|
||||
self.__opts.update(updateMainOpt)
|
||||
# check given log-level:
|
||||
str2LogLevel(self.__opts.get('loglevel', 0))
|
||||
# thread options:
|
||||
opts = [["int", "stacksize", ],
|
||||
]
|
||||
if self.has_section("Thread"):
|
||||
thopt = ConfigReader.getOptions(self, "Thread", opts)
|
||||
if thopt:
|
||||
self.__opts['thread'] = thopt
|
||||
|
||||
def convert(self):
|
||||
# Ensure logtarget/level set first so any db errors are captured
|
||||
# Also dbfile should be set before all other database options.
|
||||
# So adding order indices into items, to be stripped after sorting, upon return
|
||||
order = {"thread":0, "syslogsocket":11, "loglevel":12, "logtarget":13,
|
||||
"allowipv6": 14,
|
||||
"dbfile":50, "dbmaxmatches":51, "dbpurgeage":51}
|
||||
stream = list()
|
||||
for opt in self.__opts:
|
||||
if opt in order:
|
||||
stream.append((order[opt], ["set", opt, self.__opts[opt]]))
|
||||
return [opt[1] for opt in sorted(stream)]
|
||||
|
||||
899
fail2ban-master/fail2ban/client/fail2banregex.py
Normal file
899
fail2ban-master/fail2ban/client/fail2banregex.py
Normal file
@@ -0,0 +1,899 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
#
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
"""
|
||||
Fail2Ban reads log file that contains password failure report
|
||||
and bans the corresponding IP addresses using firewall rules.
|
||||
|
||||
This tools can test regular expressions for "fail2ban".
|
||||
"""
|
||||
|
||||
__author__ = "Fail2Ban Developers"
|
||||
__copyright__ = """Copyright (c) 2004-2008 Cyril Jaquier, 2008- Fail2Ban Contributors
|
||||
Copyright of modifications held by their respective authors.
|
||||
Licensed under the GNU General Public License v2 (GPL).
|
||||
|
||||
Written by Cyril Jaquier <cyril.jaquier@fail2ban.org>.
|
||||
Many contributions by Yaroslav O. Halchenko, Steven Hiscocks, Sergey G. Brester (sebres)."""
|
||||
|
||||
__license__ = "GPL"
|
||||
|
||||
import getopt
|
||||
import logging
|
||||
import re
|
||||
import os
|
||||
import shlex
|
||||
import sys
|
||||
import time
|
||||
import urllib.request, urllib.parse, urllib.error
|
||||
from optparse import OptionParser, Option
|
||||
|
||||
from configparser import NoOptionError, NoSectionError, MissingSectionHeaderError
|
||||
|
||||
try: # pragma: no cover
|
||||
from ..server.filtersystemd import FilterSystemd
|
||||
except ImportError:
|
||||
FilterSystemd = None
|
||||
|
||||
from ..version import version, normVersion
|
||||
from .jailreader import FilterReader, JailReader, NoJailError
|
||||
from ..server.filter import Filter, FileContainer, MyTime
|
||||
from ..server.failregex import Regex, RegexException
|
||||
|
||||
from ..helpers import str2LogLevel, getVerbosityFormat, FormatterWithTraceBack, getLogger, \
|
||||
extractOptions, PREFER_ENC
|
||||
# Gets the instance of the logger.
|
||||
logSys = getLogger("fail2ban")
|
||||
|
||||
def debuggexURL(sample, regex, multiline=False, useDns="yes"):
|
||||
args = {
|
||||
're': Regex._resolveHostTag(regex, useDns=useDns),
|
||||
'str': sample,
|
||||
'flavor': 'python'
|
||||
}
|
||||
if multiline: args['flags'] = 'm'
|
||||
return 'https://www.debuggex.com/?' + urllib.parse.urlencode(args)
|
||||
|
||||
def output(args): # pragma: no cover (overridden in test-cases)
|
||||
print(args)
|
||||
|
||||
def shortstr(s, l=53):
|
||||
"""Return shortened string
|
||||
"""
|
||||
if len(s) > l:
|
||||
return s[:l-3] + '...'
|
||||
return s
|
||||
|
||||
def pprint_list(l, header=None):
|
||||
if not len(l):
|
||||
return
|
||||
if header:
|
||||
s = "|- %s\n" % header
|
||||
else:
|
||||
s = ''
|
||||
output( s + "| " + "\n| ".join(l) + '\n`-' )
|
||||
|
||||
def journal_lines_gen(flt, myjournal): # pragma: no cover
|
||||
while True:
|
||||
try:
|
||||
entry = myjournal.get_next()
|
||||
except OSError:
|
||||
continue
|
||||
if not entry:
|
||||
break
|
||||
yield flt.formatJournalEntry(entry)
|
||||
|
||||
def dumpNormVersion(*args):
|
||||
output(normVersion())
|
||||
sys.exit(0)
|
||||
|
||||
usage = lambda: "%s [OPTIONS] <LOG> <REGEX> [IGNOREREGEX]" % sys.argv[0]
|
||||
|
||||
class _f2bOptParser(OptionParser):
|
||||
def format_help(self, *args, **kwargs):
|
||||
""" Overwritten format helper with full usage."""
|
||||
self.usage = ''
|
||||
return "Usage: " + usage() + "\n" + __doc__ + """
|
||||
LOG:
|
||||
string a string representing a log line
|
||||
filename path to a log file (/var/log/auth.log)
|
||||
systemd-journal search systemd journal (systemd-python required),
|
||||
optionally with backend parameters, see `man jail.conf`
|
||||
for usage and examples (systemd-journal[journalflags=1]).
|
||||
|
||||
REGEX:
|
||||
string a string representing a 'failregex'
|
||||
filter name of jail or filter, optionally with options (sshd[mode=aggressive])
|
||||
filename path to a filter file (filter.d/sshd.conf)
|
||||
|
||||
IGNOREREGEX:
|
||||
string a string representing an 'ignoreregex'
|
||||
\n""" + OptionParser.format_help(self, *args, **kwargs) + """\n
|
||||
Report bugs to https://github.com/fail2ban/fail2ban/issues\n
|
||||
""" + __copyright__ + "\n"
|
||||
|
||||
|
||||
def get_opt_parser():
|
||||
# use module docstring for help output
|
||||
p = _f2bOptParser(
|
||||
usage=usage(),
|
||||
version="%prog " + version)
|
||||
|
||||
p.add_options([
|
||||
Option("-c", "--config", default='/etc/fail2ban',
|
||||
help="set alternate config directory"),
|
||||
Option("-d", "--datepattern",
|
||||
help="set custom pattern used to match date/times"),
|
||||
Option("--timezone", "--TZ", action='store', default=None,
|
||||
help="set time-zone used by convert time format"),
|
||||
Option("-e", "--encoding", default=PREFER_ENC,
|
||||
help="File encoding. Default: system locale"),
|
||||
Option("-r", "--raw", action='store_true', default=False,
|
||||
help="Raw hosts, don't resolve dns"),
|
||||
Option("--usedns", action='store', default=None,
|
||||
help="DNS specified replacement of tags <HOST> in regexp "
|
||||
"('yes' - matches all form of hosts, 'no' - IP addresses only)"),
|
||||
Option("-L", "--maxlines", type=int, default=0,
|
||||
help="maxlines for multi-line regex."),
|
||||
Option("-m", "--journalmatch",
|
||||
help="journalctl style matches overriding filter file. "
|
||||
"\"systemd-journal\" only"),
|
||||
Option('-l', "--log-level",
|
||||
dest="log_level",
|
||||
default='critical',
|
||||
help="Log level for the Fail2Ban logger to use"),
|
||||
Option('-V', action="callback", callback=dumpNormVersion,
|
||||
help="get version in machine-readable short format"),
|
||||
Option('-v', '--verbose', action="count", dest="verbose",
|
||||
default=0,
|
||||
help="Increase verbosity"),
|
||||
Option("--verbosity", action="store", dest="verbose", type=int,
|
||||
help="Set numerical level of verbosity (0..4)"),
|
||||
Option("--verbose-date", "--VD", action='store_true',
|
||||
help="Verbose date patterns/regex in output"),
|
||||
Option("-D", "--debuggex", action='store_true',
|
||||
help="Produce debuggex.com urls for debugging there"),
|
||||
Option("--no-check-all", action="store_false", dest="checkAllRegex", default=True,
|
||||
help="Disable check for all regex's"),
|
||||
Option("-o", "--out", action="store", dest="out", default=None,
|
||||
help="Set token to print failure information only (row, id, ip, msg, host, ip4, ip6, dns, matches, ...)"),
|
||||
Option("-i", "--invert", action="store_true", dest="invert",
|
||||
help="Invert the sense of matching, to output non-matching lines."),
|
||||
Option("--print-no-missed", action='store_true',
|
||||
help="Do not print any missed lines"),
|
||||
Option("--print-no-ignored", action='store_true',
|
||||
help="Do not print any ignored lines"),
|
||||
Option("--print-all-matched", action='store_true',
|
||||
help="Print all matched lines"),
|
||||
Option("--print-all-missed", action='store_true',
|
||||
help="Print all missed lines, no matter how many"),
|
||||
Option("--print-all-ignored", action='store_true',
|
||||
help="Print all ignored lines, no matter how many"),
|
||||
Option("-t", "--log-traceback", action='store_true',
|
||||
help="Enrich log-messages with compressed tracebacks"),
|
||||
Option("--full-traceback", action='store_true',
|
||||
help="Either to make the tracebacks full, not compressed (as by default)"),
|
||||
])
|
||||
|
||||
return p
|
||||
|
||||
|
||||
class RegexStat(object):
|
||||
|
||||
def __init__(self, failregex):
|
||||
self._stats = 0
|
||||
self._failregex = failregex
|
||||
self._ipList = list()
|
||||
|
||||
def __str__(self):
|
||||
return "%s(%r) %d failed: %s" \
|
||||
% (self.__class__, self._failregex, self._stats, self._ipList)
|
||||
|
||||
def inc(self):
|
||||
self._stats += 1
|
||||
|
||||
def getStats(self):
|
||||
return self._stats
|
||||
|
||||
def getFailRegex(self):
|
||||
return self._failregex
|
||||
|
||||
def appendIP(self, value):
|
||||
self._ipList.append(value)
|
||||
|
||||
def getIPList(self):
|
||||
return self._ipList
|
||||
|
||||
|
||||
class LineStats(object):
|
||||
"""Just a convenience container for stats
|
||||
"""
|
||||
def __init__(self, opts):
|
||||
self.tested = self.matched = 0
|
||||
self.matched_lines = []
|
||||
self.missed = 0
|
||||
self.missed_lines = []
|
||||
self.ignored = 0
|
||||
self.ignored_lines = []
|
||||
if opts.debuggex:
|
||||
self.matched_lines_timeextracted = []
|
||||
self.missed_lines_timeextracted = []
|
||||
self.ignored_lines_timeextracted = []
|
||||
|
||||
def __str__(self):
|
||||
return "%(tested)d lines, %(ignored)d ignored, %(matched)d matched, %(missed)d missed" % self
|
||||
|
||||
# just for convenient str
|
||||
def __getitem__(self, key):
|
||||
return getattr(self, key) if hasattr(self, key) else ''
|
||||
|
||||
|
||||
class Fail2banRegex(object):
|
||||
|
||||
def __init__(self, opts):
|
||||
# set local protected members from given options:
|
||||
self.__dict__.update(dict(('_'+o,v) for o,v in opts.__dict__.items()))
|
||||
self._opts = opts
|
||||
self._maxlines_set = False # so we allow to override maxlines in cmdline
|
||||
self._datepattern_set = False
|
||||
self._journalmatch = None
|
||||
|
||||
self.share_config=dict()
|
||||
self._filter = Filter(None)
|
||||
self._prefREMatched = 0
|
||||
self._prefREGroups = list()
|
||||
self._ignoreregex = list()
|
||||
self._failregex = list()
|
||||
self._time_elapsed = None
|
||||
self._line_stats = LineStats(opts)
|
||||
|
||||
if opts.maxlines:
|
||||
self.setMaxLines(opts.maxlines)
|
||||
else:
|
||||
self._maxlines = 20
|
||||
if opts.journalmatch is not None:
|
||||
self.setJournalMatch(shlex.split(opts.journalmatch))
|
||||
if opts.timezone:
|
||||
self._filter.setLogTimeZone(opts.timezone)
|
||||
self._filter.checkFindTime = False
|
||||
if True: # not opts.out:
|
||||
MyTime.setAlternateNow(0); # accept every date (years from 19xx up to end of current century, '%ExY' and 'Exy' patterns)
|
||||
from ..server.strptime import _updateTimeRE
|
||||
_updateTimeRE()
|
||||
if opts.datepattern:
|
||||
self.setDatePattern(opts.datepattern)
|
||||
if opts.usedns:
|
||||
self._filter.setUseDns(opts.usedns)
|
||||
self._filter.returnRawHost = opts.raw
|
||||
self._filter.checkAllRegex = opts.checkAllRegex and not opts.out
|
||||
# ignore pending (without ID/IP), added to matches if it hits later (if ID/IP can be retrieved)
|
||||
self._filter.ignorePending = bool(opts.out)
|
||||
# callback to increment ignored RE's by index (during process):
|
||||
self._filter.onIgnoreRegex = self._onIgnoreRegex
|
||||
self._backend = 'auto'
|
||||
|
||||
def output(self, line):
|
||||
if not self._opts.out: output(line)
|
||||
|
||||
def encode_line(self, line):
|
||||
return line.encode(self._encoding, 'ignore')
|
||||
|
||||
def setDatePattern(self, pattern):
|
||||
if not self._datepattern_set:
|
||||
self._filter.setDatePattern(pattern)
|
||||
self._datepattern_set = True
|
||||
if pattern is not None:
|
||||
self.output( "Use datepattern : %s : %s" % (
|
||||
pattern, self._filter.getDatePattern()[1], ) )
|
||||
|
||||
def setMaxLines(self, v):
|
||||
if not self._maxlines_set:
|
||||
self._filter.setMaxLines(int(v))
|
||||
self._maxlines_set = True
|
||||
self.output( "Use maxlines : %d" % self._filter.getMaxLines() )
|
||||
|
||||
def setJournalMatch(self, v):
|
||||
self._journalmatch = v
|
||||
|
||||
def _dumpRealOptions(self, reader, fltOpt):
|
||||
realopts = {}
|
||||
combopts = reader.getCombined()
|
||||
if isinstance(reader, FilterReader):
|
||||
_get_opt = lambda k: reader.get('Definition', k)
|
||||
elif reader.filter: # JailReader for jail with filter:
|
||||
_get_opt = lambda k: reader.filter.get('Definition', k)
|
||||
else: # JailReader for jail without filter:
|
||||
_get_opt = lambda k: None
|
||||
# output all options that are specified in filter-argument as well as some special (mostly interested):
|
||||
for k in ['logtype', 'datepattern'] + list(fltOpt.keys()):
|
||||
# combined options win, but they contain only a sub-set in filter expected keys,
|
||||
# so get the rest from definition section:
|
||||
try:
|
||||
realopts[k] = combopts[k] if k in combopts else _get_opt(k)
|
||||
except NoOptionError: # pragma: no cover
|
||||
pass
|
||||
self.output("Real filter options : %r" % realopts)
|
||||
|
||||
def readRegex(self, value, regextype):
|
||||
assert(regextype in ('fail', 'ignore'))
|
||||
regex = regextype + 'regex'
|
||||
# try to check - we've case filter?[options...]?:
|
||||
basedir = self._opts.config
|
||||
fltName = value
|
||||
fltFile = None
|
||||
fltOpt = {}
|
||||
jail = None
|
||||
if regextype == 'fail':
|
||||
if re.search(r'(?ms)^/{0,3}[\w/_\-.]+(?:\[.*\])?$', value):
|
||||
try:
|
||||
fltName, fltOpt = extractOptions(value)
|
||||
if not re.search(r'(?ms)(?:/|\.(?:conf|local)$)', fltName): # name of jail?
|
||||
try:
|
||||
jail = JailReader(fltName, force_enable=True,
|
||||
share_config=self.share_config, basedir=basedir)
|
||||
jail.read()
|
||||
except NoJailError:
|
||||
jail = None
|
||||
if "." in fltName[~5:]:
|
||||
tryNames = (fltName,)
|
||||
else:
|
||||
tryNames = (fltName, fltName + '.conf', fltName + '.local')
|
||||
for fltFile in tryNames:
|
||||
if os.path.dirname(fltFile) == 'filter.d':
|
||||
fltFile = os.path.join(basedir, fltFile)
|
||||
elif not "/" in fltFile:
|
||||
if os.path.basename(basedir) == 'filter.d':
|
||||
fltFile = os.path.join(basedir, fltFile)
|
||||
else:
|
||||
fltFile = os.path.join(basedir, 'filter.d', fltFile)
|
||||
else:
|
||||
basedir = os.path.dirname(fltFile)
|
||||
if os.path.isfile(fltFile):
|
||||
break
|
||||
fltFile = None
|
||||
except Exception as e:
|
||||
output("ERROR: Wrong filter name or options: %s" % (str(e),))
|
||||
output(" while parsing: %s" % (value,))
|
||||
if self._verbose: raise(e)
|
||||
return False
|
||||
elif self._ignoreregex:
|
||||
# clear ignoreregex that could be previously loaded from filter:
|
||||
self._filter.delIgnoreRegex()
|
||||
|
||||
readercommands = None
|
||||
# if it is jail:
|
||||
if jail:
|
||||
self.output( "Use %11s jail : %s" % ('', fltName) )
|
||||
if fltOpt:
|
||||
self.output( "Use jail/flt options : %r" % fltOpt )
|
||||
if not fltOpt: fltOpt = {}
|
||||
fltOpt['backend'] = self._backend
|
||||
ret = jail.getOptions(addOpts=fltOpt)
|
||||
if not ret:
|
||||
output('ERROR: Failed to get jail for %r' % (value,))
|
||||
return False
|
||||
# show real options if expected:
|
||||
if self._verbose > 1 or logSys.getEffectiveLevel()<=logging.DEBUG:
|
||||
self._dumpRealOptions(jail, fltOpt)
|
||||
readercommands = jail.convert(allow_no_files=True)
|
||||
# if it is filter file:
|
||||
elif fltFile is not None:
|
||||
if (basedir == self._opts.config
|
||||
or os.path.basename(basedir) == 'filter.d'
|
||||
or ("." not in fltName[~5:] and "/" not in fltName)
|
||||
):
|
||||
## within filter.d folder - use standard loading algorithm to load filter completely (with .local etc.):
|
||||
if os.path.basename(basedir) == 'filter.d':
|
||||
basedir = os.path.dirname(basedir)
|
||||
fltName = os.path.splitext(os.path.basename(fltName))[0]
|
||||
self.output( "Use %11s file : %s, basedir: %s" % ('filter', fltName, basedir) )
|
||||
else:
|
||||
## foreign file - readexplicit this file and includes if possible:
|
||||
self.output( "Use %11s file : %s" % ('filter', fltName) )
|
||||
basedir = None
|
||||
if not os.path.isabs(fltName): # avoid join with "filter.d" inside FilterReader
|
||||
fltName = os.path.abspath(fltName)
|
||||
if fltOpt:
|
||||
self.output( "Use filter options : %r" % fltOpt )
|
||||
reader = FilterReader(fltName, 'fail2ban-regex-jail', fltOpt,
|
||||
share_config=self.share_config, basedir=basedir)
|
||||
ret = None
|
||||
try:
|
||||
if basedir is not None:
|
||||
ret = reader.read()
|
||||
else:
|
||||
## foreign file - readexplicit this file and includes if possible:
|
||||
reader.setBaseDir(None)
|
||||
ret = reader.readexplicit()
|
||||
except Exception as e:
|
||||
output("Wrong config file: %s" % (str(e),))
|
||||
if self._verbose: raise(e)
|
||||
if not ret:
|
||||
output( "ERROR: failed to load filter %s" % value )
|
||||
return False
|
||||
# set backend-related options (logtype):
|
||||
reader.applyAutoOptions(self._backend)
|
||||
# get, interpolate and convert options:
|
||||
reader.getOptions(None)
|
||||
# show real options if expected:
|
||||
if self._verbose > 1 or logSys.getEffectiveLevel()<=logging.DEBUG:
|
||||
self._dumpRealOptions(reader, fltOpt)
|
||||
# to stream:
|
||||
readercommands = reader.convert()
|
||||
|
||||
regex_values = {}
|
||||
if readercommands:
|
||||
for opt in readercommands:
|
||||
if opt[0] == 'multi-set':
|
||||
optval = opt[3]
|
||||
elif opt[0] == 'set':
|
||||
optval = opt[3:]
|
||||
else: # pragma: no cover
|
||||
continue
|
||||
try:
|
||||
if opt[2] == "prefregex":
|
||||
for optval in optval:
|
||||
self._filter.prefRegex = optval
|
||||
elif opt[2] == "addfailregex":
|
||||
stor = regex_values.get('fail')
|
||||
if not stor: stor = regex_values['fail'] = list()
|
||||
for optval in optval:
|
||||
stor.append(RegexStat(optval))
|
||||
#self._filter.addFailRegex(optval)
|
||||
elif opt[2] == "addignoreregex":
|
||||
stor = regex_values.get('ignore')
|
||||
if not stor: stor = regex_values['ignore'] = list()
|
||||
for optval in optval:
|
||||
stor.append(RegexStat(optval))
|
||||
#self._filter.addIgnoreRegex(optval)
|
||||
elif opt[2] == "maxlines":
|
||||
for optval in optval:
|
||||
self.setMaxLines(optval)
|
||||
elif opt[2] == "datepattern":
|
||||
for optval in optval:
|
||||
self.setDatePattern(optval)
|
||||
elif opt[2] == "addjournalmatch": # pragma: no cover
|
||||
if self._opts.journalmatch is None:
|
||||
self.setJournalMatch(optval)
|
||||
except ValueError as e: # pragma: no cover
|
||||
output( "ERROR: Invalid value for %s (%r) " \
|
||||
"read from %s: %s" % (opt[2], optval, value, e) )
|
||||
return False
|
||||
|
||||
else:
|
||||
self.output( "Use %11s line : %s" % (regex, shortstr(value)) )
|
||||
regex_values[regextype] = [RegexStat(value)]
|
||||
|
||||
for regextype, regex_values in regex_values.items():
|
||||
regex = regextype + 'regex'
|
||||
setattr(self, "_" + regex, regex_values)
|
||||
for regex in regex_values:
|
||||
getattr(
|
||||
self._filter,
|
||||
'add%sRegex' % regextype.title())(regex.getFailRegex())
|
||||
return True
|
||||
|
||||
def _onIgnoreRegex(self, idx, ignoreRegex):
|
||||
self._lineIgnored = True
|
||||
self._ignoreregex[idx].inc()
|
||||
|
||||
def testRegex(self, line, date=None):
|
||||
orgLineBuffer = self._filter._Filter__lineBuffer
|
||||
# duplicate line buffer (list can be changed inplace during processLine):
|
||||
if self._filter.getMaxLines() > 1:
|
||||
orgLineBuffer = orgLineBuffer[:]
|
||||
fullBuffer = len(orgLineBuffer) >= self._filter.getMaxLines()
|
||||
is_ignored = self._lineIgnored = False
|
||||
try:
|
||||
found = self._filter.processLine(line, date)
|
||||
lines = []
|
||||
ret = []
|
||||
for match in found:
|
||||
if not self._opts.out:
|
||||
# Append True/False flag depending if line was matched by
|
||||
# more than one regex
|
||||
match.append(len(ret)>1)
|
||||
regex = self._failregex[match[0]]
|
||||
regex.inc()
|
||||
regex.appendIP(match)
|
||||
if not match[3].get('nofail'):
|
||||
ret.append(match)
|
||||
else:
|
||||
is_ignored = True
|
||||
if self._opts.out: # (formatted) output - don't need stats:
|
||||
return None, ret, None
|
||||
# prefregex stats:
|
||||
if self._filter.prefRegex:
|
||||
pre = self._filter.prefRegex
|
||||
if pre.hasMatched():
|
||||
self._prefREMatched += 1
|
||||
if self._verbose:
|
||||
if len(self._prefREGroups) < self._maxlines:
|
||||
self._prefREGroups.append(pre.getGroups())
|
||||
else:
|
||||
if len(self._prefREGroups) == self._maxlines:
|
||||
self._prefREGroups.append('...')
|
||||
except RegexException as e: # pragma: no cover
|
||||
output( 'ERROR: %s' % e )
|
||||
return None, 0, None
|
||||
if self._filter.getMaxLines() > 1 and not self._opts.out:
|
||||
for bufLine in orgLineBuffer[int(fullBuffer):]:
|
||||
if bufLine not in self._filter._Filter__lineBuffer:
|
||||
try:
|
||||
self._line_stats.missed_lines.pop(
|
||||
self._line_stats.missed_lines.index("".join(bufLine)))
|
||||
if self._debuggex:
|
||||
self._line_stats.missed_lines_timeextracted.pop(
|
||||
self._line_stats.missed_lines_timeextracted.index(
|
||||
"".join(bufLine[::2])))
|
||||
except ValueError:
|
||||
pass
|
||||
# if buffering - add also another lines from match:
|
||||
if self._print_all_matched:
|
||||
if not self._debuggex:
|
||||
self._line_stats.matched_lines.append("".join(bufLine))
|
||||
else:
|
||||
lines.append(bufLine[0] + bufLine[2])
|
||||
self._line_stats.matched += 1
|
||||
self._line_stats.missed -= 1
|
||||
if lines: # pre-lines parsed in multiline mode (buffering)
|
||||
lines.append(self._filter.processedLine())
|
||||
line = "\n".join(lines)
|
||||
return line, ret, (is_ignored or self._lineIgnored)
|
||||
|
||||
def _prepaireOutput(self):
|
||||
"""Prepares output- and fetch-function corresponding given '--out' option (format)"""
|
||||
ofmt = self._opts.out
|
||||
if ofmt in ('id', 'fid'):
|
||||
def _out(ret):
|
||||
for r in ret:
|
||||
output(r[1])
|
||||
elif ofmt == 'ip':
|
||||
def _out(ret):
|
||||
for r in ret:
|
||||
output(r[3].get('ip', r[1]))
|
||||
elif ofmt == 'msg':
|
||||
def _out(ret):
|
||||
for r in ret:
|
||||
for r in r[3].get('matches'):
|
||||
if not isinstance(r, str):
|
||||
r = ''.join(r for r in r)
|
||||
output(r)
|
||||
elif ofmt == 'row':
|
||||
def _out(ret):
|
||||
for r in ret:
|
||||
output('[%r,\t%r,\t%r],' % (r[1],r[2],dict((k,v) for k, v in r[3].items() if k != 'matches')))
|
||||
elif '<' not in ofmt:
|
||||
def _out(ret):
|
||||
for r in ret:
|
||||
output(r[3].get(ofmt))
|
||||
else: # extended format with tags substitution:
|
||||
from ..server.actions import Actions, CommandAction, BanTicket
|
||||
def _escOut(t, v):
|
||||
# use safe escape (avoid inject on pseudo tag "\x00msg\x00"):
|
||||
if t not in ('msg',):
|
||||
return v.replace('\x00', '\\x00')
|
||||
return v
|
||||
def _out(ret):
|
||||
rows = []
|
||||
wrap = {'NL':0}
|
||||
for r in ret:
|
||||
ticket = BanTicket(r[1], time=r[2], data=r[3])
|
||||
aInfo = Actions.ActionInfo(ticket)
|
||||
# if msg tag is used - output if single line (otherwise let it as is to wrap multilines later):
|
||||
def _get_msg(self):
|
||||
if not wrap['NL'] and len(r[3].get('matches', [])) <= 1:
|
||||
return self['matches']
|
||||
else: # pseudo tag for future replacement:
|
||||
wrap['NL'] = 1
|
||||
return "\x00msg\x00"
|
||||
aInfo['msg'] = _get_msg
|
||||
# not recursive interpolation (use safe escape):
|
||||
v = CommandAction.replaceDynamicTags(ofmt, aInfo, escapeVal=_escOut)
|
||||
if wrap['NL']: # contains multiline tags (msg):
|
||||
rows.append((r, v))
|
||||
continue
|
||||
output(v)
|
||||
# wrap multiline tag (msg) interpolations to single line:
|
||||
for r, v in rows:
|
||||
for r in r[3].get('matches'):
|
||||
if not isinstance(r, str):
|
||||
r = ''.join(r for r in r)
|
||||
r = v.replace("\x00msg\x00", r)
|
||||
output(r)
|
||||
return _out
|
||||
|
||||
|
||||
def process(self, test_lines):
|
||||
t0 = time.time()
|
||||
out = None
|
||||
if self._opts.out: # get out function
|
||||
out = self._prepaireOutput()
|
||||
outinv = self._opts.invert
|
||||
for line in test_lines:
|
||||
if isinstance(line, tuple):
|
||||
line_datetimestripped, ret, is_ignored = self.testRegex(line[0], line[1])
|
||||
line = "".join(line[0])
|
||||
else:
|
||||
line = line.rstrip('\r\n')
|
||||
if line.startswith('#') or not line:
|
||||
# skip comment and empty lines
|
||||
continue
|
||||
line_datetimestripped, ret, is_ignored = self.testRegex(line)
|
||||
|
||||
if out: # (formatted) output:
|
||||
if len(ret) > 0 and not is_ignored:
|
||||
if not outinv: out(ret)
|
||||
elif outinv: # inverted output (currently only time and message as matches):
|
||||
if not len(ret): # [failRegexIndex, fid, date, fail]
|
||||
ret = [[-1, "", self._filter._Filter__lastDate, {"fid":"", "matches":[line]}]]
|
||||
out(ret)
|
||||
continue
|
||||
|
||||
if is_ignored:
|
||||
self._line_stats.ignored += 1
|
||||
if not self._print_no_ignored and (self._print_all_ignored or self._line_stats.ignored <= self._maxlines + 1):
|
||||
self._line_stats.ignored_lines.append(line)
|
||||
if self._debuggex:
|
||||
self._line_stats.ignored_lines_timeextracted.append(line_datetimestripped)
|
||||
elif len(ret) > 0:
|
||||
self._line_stats.matched += 1
|
||||
if self._print_all_matched:
|
||||
self._line_stats.matched_lines.append(line)
|
||||
if self._debuggex:
|
||||
self._line_stats.matched_lines_timeextracted.append(line_datetimestripped)
|
||||
else:
|
||||
self._line_stats.missed += 1
|
||||
if not self._print_no_missed and (self._print_all_missed or self._line_stats.missed <= self._maxlines + 1):
|
||||
self._line_stats.missed_lines.append(line)
|
||||
if self._debuggex:
|
||||
self._line_stats.missed_lines_timeextracted.append(line_datetimestripped)
|
||||
self._line_stats.tested += 1
|
||||
|
||||
self._time_elapsed = time.time() - t0
|
||||
|
||||
def printLines(self, ltype):
|
||||
lstats = self._line_stats
|
||||
assert(lstats.missed == lstats.tested - (lstats.matched + lstats.ignored))
|
||||
lines = lstats[ltype]
|
||||
l = lstats[ltype + '_lines']
|
||||
multiline = self._filter.getMaxLines() > 1
|
||||
if lines:
|
||||
header = "%s line(s):" % (ltype.capitalize(),)
|
||||
if self._debuggex:
|
||||
if ltype == 'missed' or ltype == 'matched':
|
||||
regexlist = self._failregex
|
||||
else:
|
||||
regexlist = self._ignoreregex
|
||||
l = lstats[ltype + '_lines_timeextracted']
|
||||
if lines < self._maxlines or getattr(self, '_print_all_' + ltype):
|
||||
ans = [[]]
|
||||
for arg in [l, regexlist]:
|
||||
ans = [ x + [y] for x in ans for y in arg ]
|
||||
b = [a[0] + ' | ' + a[1].getFailRegex() + ' | ' +
|
||||
debuggexURL(self.encode_line(a[0]), a[1].getFailRegex(),
|
||||
multiline, self._opts.usedns) for a in ans]
|
||||
pprint_list([x.rstrip() for x in b], header)
|
||||
else:
|
||||
output( "%s too many to print. Use --print-all-%s " \
|
||||
"to print all %d lines" % (header, ltype, lines) )
|
||||
elif lines < self._maxlines or getattr(self, '_print_all_' + ltype):
|
||||
pprint_list([x.rstrip() for x in l], header)
|
||||
else:
|
||||
output( "%s too many to print. Use --print-all-%s " \
|
||||
"to print all %d lines" % (header, ltype, lines) )
|
||||
|
||||
def printStats(self):
|
||||
if self._opts.out: return True
|
||||
output( "" )
|
||||
output( "Results" )
|
||||
output( "=======" )
|
||||
|
||||
def print_failregexes(title, failregexes):
|
||||
# Print title
|
||||
total, out = 0, []
|
||||
for cnt, failregex in enumerate(failregexes):
|
||||
match = failregex.getStats()
|
||||
total += match
|
||||
if (match or self._verbose):
|
||||
out.append("%2d) [%d] %s" % (cnt+1, match, failregex.getFailRegex()))
|
||||
|
||||
if self._verbose and len(failregex.getIPList()):
|
||||
for ip in failregex.getIPList():
|
||||
timeTuple = time.localtime(ip[2])
|
||||
timeString = time.strftime("%a %b %d %H:%M:%S %Y", timeTuple)
|
||||
out.append(
|
||||
" %s %s%s" % (
|
||||
ip[1],
|
||||
timeString,
|
||||
ip[-1] and " (multiple regex matched)" or ""))
|
||||
|
||||
output( "\n%s: %d total" % (title, total) )
|
||||
pprint_list(out, " #) [# of hits] regular expression")
|
||||
return total
|
||||
|
||||
# Print prefregex:
|
||||
if self._filter.prefRegex:
|
||||
#self._filter.prefRegex.hasMatched()
|
||||
pre = self._filter.prefRegex
|
||||
out = [pre.getRegex()]
|
||||
if self._verbose:
|
||||
for grp in self._prefREGroups:
|
||||
out.append(" %s" % (grp,))
|
||||
output( "\n%s: %d total" % ("Prefregex", self._prefREMatched) )
|
||||
pprint_list(out)
|
||||
|
||||
# Print regex's:
|
||||
total = print_failregexes("Failregex", self._failregex)
|
||||
_ = print_failregexes("Ignoreregex", self._ignoreregex)
|
||||
|
||||
|
||||
if self._filter.dateDetector is not None:
|
||||
output( "\nDate template hits:" )
|
||||
out = []
|
||||
for template in self._filter.dateDetector.templates:
|
||||
if self._verbose or template.hits:
|
||||
out.append("[%d] %s" % (template.hits, template.name))
|
||||
if self._verbose_date:
|
||||
out.append(" # weight: %.3f (%.3f), pattern: %s" % (
|
||||
template.weight, template.template.weight,
|
||||
getattr(template, 'pattern', ''),))
|
||||
out.append(" # regex: %s" % (getattr(template, 'regex', ''),))
|
||||
pprint_list(out, "[# of hits] date format")
|
||||
|
||||
output( "\nLines: %s" % self._line_stats, )
|
||||
if self._time_elapsed is not None:
|
||||
output( "[processed in %.2f sec]" % self._time_elapsed, )
|
||||
output( "" )
|
||||
|
||||
if self._print_all_matched:
|
||||
self.printLines('matched')
|
||||
if not self._print_no_ignored:
|
||||
self.printLines('ignored')
|
||||
if not self._print_no_missed:
|
||||
self.printLines('missed')
|
||||
|
||||
return True
|
||||
|
||||
def start(self, args):
|
||||
|
||||
cmd_log, cmd_regex = args[:2]
|
||||
|
||||
if cmd_log.startswith("systemd-journal"): # pragma: no cover
|
||||
self._backend = 'systemd'
|
||||
|
||||
try:
|
||||
if not self.readRegex(cmd_regex, 'fail'): # pragma: no cover
|
||||
return False
|
||||
if len(args) == 3 and not self.readRegex(args[2], 'ignore'): # pragma: no cover
|
||||
return False
|
||||
except RegexException as e:
|
||||
output( 'ERROR: %s' % e )
|
||||
return False
|
||||
|
||||
if os.path.isfile(cmd_log):
|
||||
try:
|
||||
test_lines = FileContainer(cmd_log, self._encoding, doOpen=True)
|
||||
|
||||
self.output( "Use log file : %s" % cmd_log )
|
||||
self.output( "Use encoding : %s" % self._encoding )
|
||||
except IOError as e: # pragma: no cover
|
||||
output( e )
|
||||
return False
|
||||
elif cmd_log.startswith("systemd-journal"): # pragma: no cover
|
||||
if not FilterSystemd:
|
||||
output( "Error: systemd library not found. Exiting..." )
|
||||
return False
|
||||
self.output( "Use systemd journal" )
|
||||
self.output( "Use encoding : %s" % self._encoding )
|
||||
backend, beArgs = extractOptions(cmd_log)
|
||||
flt = FilterSystemd(None, **beArgs)
|
||||
flt.setLogEncoding(self._encoding)
|
||||
myjournal = flt.getJournalReader()
|
||||
journalmatch = self._journalmatch
|
||||
self.setDatePattern(None)
|
||||
if journalmatch:
|
||||
flt.addJournalMatch(journalmatch)
|
||||
self.output( "Use journal match : %s" % " ".join(journalmatch) )
|
||||
test_lines = journal_lines_gen(flt, myjournal)
|
||||
else:
|
||||
# if single line parsing (without buffering)
|
||||
if self._filter.getMaxLines() <= 1 and '\n' not in cmd_log:
|
||||
self.output( "Use single line : %s" % shortstr(cmd_log.replace("\n", r"\n")) )
|
||||
test_lines = [ cmd_log ]
|
||||
else: # multi line parsing (with and without buffering)
|
||||
test_lines = cmd_log.split("\n")
|
||||
self.output( "Use multi line : %s line(s)" % len(test_lines) )
|
||||
for i, l in enumerate(test_lines):
|
||||
if i >= 5:
|
||||
self.output( "| ..." ); break
|
||||
self.output( "| %2.2s: %s" % (i+1, shortstr(l)) )
|
||||
self.output( "`-" )
|
||||
|
||||
self.output( "" )
|
||||
|
||||
self.process(test_lines)
|
||||
|
||||
if not self.printStats():
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def _loc_except_hook(exctype, value, traceback):
|
||||
if (exctype != BrokenPipeError and exctype != IOError or value.errno != 32):
|
||||
return sys.__excepthook__(exctype, value, traceback)
|
||||
# pipe seems to be closed (head / tail / etc), thus simply exit:
|
||||
sys.exit(0)
|
||||
|
||||
def exec_command_line(*args):
|
||||
sys.excepthook = _loc_except_hook; # stop on closed/broken pipe
|
||||
|
||||
logging.exitOnIOError = True
|
||||
parser = get_opt_parser()
|
||||
(opts, args) = parser.parse_args(*args)
|
||||
errors = []
|
||||
if opts.print_no_missed and opts.print_all_missed: # pragma: no cover
|
||||
errors.append("ERROR: --print-no-missed and --print-all-missed are mutually exclusive.")
|
||||
if opts.print_no_ignored and opts.print_all_ignored: # pragma: no cover
|
||||
errors.append("ERROR: --print-no-ignored and --print-all-ignored are mutually exclusive.")
|
||||
|
||||
# We need 2 or 3 parameters
|
||||
if not len(args) in (2, 3):
|
||||
errors.append("ERROR: provide both <LOG> and <REGEX>.")
|
||||
if errors:
|
||||
parser.print_help()
|
||||
sys.stderr.write("\n" + "\n".join(errors) + "\n")
|
||||
sys.exit(255)
|
||||
|
||||
if not opts.out:
|
||||
output( "" )
|
||||
output( "Running tests" )
|
||||
output( "=============" )
|
||||
output( "" )
|
||||
|
||||
# Log level (default critical):
|
||||
opts.log_level = str2LogLevel(opts.log_level)
|
||||
logSys.setLevel(opts.log_level)
|
||||
|
||||
# Add the default logging handler
|
||||
stdout = logging.StreamHandler(sys.stdout)
|
||||
|
||||
fmt = '%(levelname)-1.1s: %(message)s' if opts.verbose <= 1 else ' %(message)s'
|
||||
|
||||
if opts.log_traceback:
|
||||
Formatter = FormatterWithTraceBack
|
||||
fmt = (opts.full_traceback and ' %(tb)s' or ' %(tbc)s') + fmt
|
||||
else:
|
||||
Formatter = logging.Formatter
|
||||
|
||||
# Custom log format for the verbose tests runs
|
||||
stdout.setFormatter(Formatter(getVerbosityFormat(opts.verbose, fmt)))
|
||||
logSys.addHandler(stdout)
|
||||
|
||||
try:
|
||||
fail2banRegex = Fail2banRegex(opts)
|
||||
except Exception as e:
|
||||
if opts.verbose or logSys.getEffectiveLevel()<=logging.DEBUG:
|
||||
logSys.critical(e, exc_info=True)
|
||||
else:
|
||||
output( 'ERROR: %s' % e )
|
||||
sys.exit(255)
|
||||
|
||||
if not fail2banRegex.start(args):
|
||||
sys.exit(255)
|
||||
237
fail2ban-master/fail2ban/client/fail2banserver.py
Normal file
237
fail2ban-master/fail2ban/client/fail2banserver.py
Normal file
@@ -0,0 +1,237 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
#
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
__author__ = "Fail2Ban Developers"
|
||||
__copyright__ = "Copyright (c) 2004-2008 Cyril Jaquier, 2012-2014 Yaroslav Halchenko, 2014-2016 Serg G. Brester"
|
||||
__license__ = "GPL"
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
from .fail2bancmdline import Fail2banCmdLine, ServerExecutionException, \
|
||||
logSys, PRODUCTION, exit
|
||||
|
||||
SERVER = "fail2ban-server"
|
||||
|
||||
##
|
||||
# \mainpage Fail2Ban
|
||||
#
|
||||
# \section Introduction
|
||||
#
|
||||
class Fail2banServer(Fail2banCmdLine):
|
||||
|
||||
# def __init__(self):
|
||||
# Fail2banCmdLine.__init__(self)
|
||||
|
||||
##
|
||||
# Start Fail2Ban server in main thread without fork (direct, it can fork itself in Server if daemon=True).
|
||||
#
|
||||
# Start the Fail2ban server in background/foreground (daemon mode or not).
|
||||
|
||||
@staticmethod
|
||||
def startServerDirect(conf, daemon=True, setServer=None):
|
||||
logSys.debug(" direct starting of server in %s, daemon: %s", os.getpid(), daemon)
|
||||
from ..server.server import Server
|
||||
server = None
|
||||
try:
|
||||
# Start it in foreground (current thread, not new process),
|
||||
# server object will internally fork self if daemon is True
|
||||
server = Server(daemon)
|
||||
# notify caller - set server handle:
|
||||
if setServer:
|
||||
setServer(server)
|
||||
# run:
|
||||
server.start(conf["socket"],
|
||||
conf["pidfile"], conf["force"],
|
||||
conf=conf)
|
||||
except Exception as e: # pragma: no cover
|
||||
try:
|
||||
if server:
|
||||
server.quit()
|
||||
except Exception as e2:
|
||||
if conf["verbose"] > 1:
|
||||
logSys.exception(e2)
|
||||
raise
|
||||
finally:
|
||||
# notify waiting thread server ready resp. done (background execution, error case, etc):
|
||||
if conf.get('onstart'):
|
||||
conf['onstart']()
|
||||
|
||||
return server
|
||||
|
||||
##
|
||||
# Start Fail2Ban server.
|
||||
#
|
||||
# Start the Fail2ban server in daemon mode (background, start from client).
|
||||
|
||||
@staticmethod
|
||||
def startServerAsync(conf):
|
||||
# Forks the current process, don't fork if async specified (ex: test cases)
|
||||
pid = 0
|
||||
frk = not conf["async"] and PRODUCTION
|
||||
if frk: # pragma: no cover
|
||||
pid = os.fork()
|
||||
logSys.debug(" async starting of server in %s, fork: %s - %s", os.getpid(), frk, pid)
|
||||
if pid == 0:
|
||||
args = list()
|
||||
args.append(SERVER)
|
||||
# Start async (don't read config) and in background as requested.
|
||||
args.append("--async")
|
||||
args.append("-b")
|
||||
# Set the socket path.
|
||||
args.append("-s")
|
||||
args.append(conf["socket"])
|
||||
# Set the pidfile
|
||||
args.append("-p")
|
||||
args.append(conf["pidfile"])
|
||||
# Force the execution if needed.
|
||||
if conf["force"]:
|
||||
args.append("-x")
|
||||
if conf["verbose"] > 1:
|
||||
args.append("-" + "v"*(conf["verbose"]-1))
|
||||
# Logging parameters:
|
||||
for o in ('loglevel', 'logtarget', 'syslogsocket'):
|
||||
args.append("--"+o)
|
||||
args.append(conf[o])
|
||||
try:
|
||||
# Directory of client (to try the first start from current or the same directory as client, and from relative bin):
|
||||
exe = Fail2banServer.getServerPath()
|
||||
if not frk:
|
||||
# Wrapr args to use the same python version in client/server (important for multi-python systems):
|
||||
args[0] = exe
|
||||
exe = sys.executable
|
||||
args[0:0] = [exe]
|
||||
logSys.debug("Starting %r with args %r", exe, args)
|
||||
if frk: # pragma: no cover
|
||||
os.execv(exe, args)
|
||||
else:
|
||||
# use P_WAIT instead of P_NOWAIT (to prevent defunct-zomby process), it started as daemon, so parent exit fast after fork):
|
||||
ret = os.spawnv(os.P_WAIT, exe, args)
|
||||
if ret != 0: # pragma: no cover
|
||||
raise OSError(ret, "Unknown error by executing server %r with %r" % (args[1], exe))
|
||||
except OSError as e: # pragma: no cover
|
||||
if not frk: #not PRODUCTION:
|
||||
raise
|
||||
# Use the PATH env.
|
||||
logSys.warning("Initial start attempt failed (%s). Starting %r with the same args", e, SERVER)
|
||||
if frk: # pragma: no cover
|
||||
os.execvp(SERVER, args)
|
||||
|
||||
@staticmethod
|
||||
def getServerPath():
|
||||
startdir = sys.path[0]
|
||||
exe = os.path.abspath(os.path.join(startdir, SERVER))
|
||||
if not os.path.isfile(exe): # may be unresolved in test-cases, so get relative starter (client):
|
||||
startdir = os.path.dirname(sys.argv[0])
|
||||
exe = os.path.abspath(os.path.join(startdir, SERVER))
|
||||
if not os.path.isfile(exe): # may be unresolved in test-cases, so try to get relative bin-directory:
|
||||
startdir = os.path.dirname(os.path.abspath(__file__))
|
||||
startdir = os.path.join(os.path.dirname(os.path.dirname(startdir)), "bin")
|
||||
exe = os.path.abspath(os.path.join(startdir, SERVER))
|
||||
return exe
|
||||
|
||||
def _Fail2banClient(self):
|
||||
from .fail2banclient import Fail2banClient
|
||||
cli = Fail2banClient()
|
||||
cli.applyMembers(self)
|
||||
return cli
|
||||
|
||||
def start(self, argv):
|
||||
server = None
|
||||
try:
|
||||
# Command line options
|
||||
ret = self.initCmdLine(argv)
|
||||
if ret is not None:
|
||||
return ret
|
||||
|
||||
# Commands
|
||||
args = self._args
|
||||
|
||||
cli = None
|
||||
# Just start:
|
||||
if len(args) == 1 and args[0] == 'start' and not self._conf.get("interactive", False):
|
||||
pass
|
||||
else:
|
||||
# If client mode - whole processing over client:
|
||||
if len(args) or self._conf.get("interactive", False):
|
||||
cli = self._Fail2banClient()
|
||||
return cli.start(argv)
|
||||
|
||||
# Start the server, corresponding options:
|
||||
# background = True, if should be new process running in background, otherwise start in
|
||||
# foreground process will be forked in daemonize, inside of Server module.
|
||||
# nonsync = True, normally internal call only, if started from client, so configures
|
||||
# the server via asynchronous thread.
|
||||
background = self._conf["background"]
|
||||
nonsync = self._conf.get("async", False)
|
||||
|
||||
# If was started not from the client:
|
||||
if not nonsync:
|
||||
# Load requirements on demand (we need utils only when asynchronous handling):
|
||||
from ..server.utils import Utils
|
||||
# Start new thread with client to read configuration and
|
||||
# transfer it to the server:
|
||||
cli = self._Fail2banClient()
|
||||
cli._conf = self._conf
|
||||
phase = dict()
|
||||
logSys.debug('Configure via async client thread')
|
||||
cli.configureServer(phase=phase)
|
||||
|
||||
# Start server, daemonize it, etc.
|
||||
pid = os.getpid()
|
||||
server = Fail2banServer.startServerDirect(self._conf, background,
|
||||
cli._set_server if cli else None)
|
||||
# If forked - just exit other processes
|
||||
if pid != os.getpid(): # pragma: no cover
|
||||
os._exit(0)
|
||||
if cli:
|
||||
cli._server = server
|
||||
|
||||
# wait for client answer "done":
|
||||
if not nonsync and cli:
|
||||
Utils.wait_for(lambda: phase.get('done', None) is not None, self._conf["timeout"], 0.001)
|
||||
if not phase.get('done', False):
|
||||
if server: # pragma: no cover
|
||||
server.quit()
|
||||
exit(255)
|
||||
if background:
|
||||
logSys.debug('Starting server done')
|
||||
|
||||
except Exception as e:
|
||||
if self._conf["verbose"] > 1:
|
||||
logSys.exception(e)
|
||||
else:
|
||||
logSys.error(e)
|
||||
if server: # pragma: no cover
|
||||
server.quit()
|
||||
exit(255)
|
||||
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def exit(code=0): # pragma: no cover
|
||||
if code != 0:
|
||||
logSys.error("Could not start %s", SERVER)
|
||||
exit(code)
|
||||
|
||||
def exec_command_line(argv):
|
||||
server = Fail2banServer()
|
||||
if server.start(argv):
|
||||
exit(0)
|
||||
else:
|
||||
exit(255)
|
||||
100
fail2ban-master/fail2ban/client/filterreader.py
Normal file
100
fail2ban-master/fail2ban/client/filterreader.py
Normal file
@@ -0,0 +1,100 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Author: Cyril Jaquier
|
||||
#
|
||||
|
||||
__author__ = "Cyril Jaquier"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
|
||||
__license__ = "GPL"
|
||||
|
||||
import os
|
||||
import shlex
|
||||
|
||||
from .configreader import DefinitionInitConfigReader
|
||||
from ..helpers import getLogger
|
||||
|
||||
# Gets the instance of the logger.
|
||||
logSys = getLogger(__name__)
|
||||
|
||||
|
||||
class FilterReader(DefinitionInitConfigReader):
|
||||
|
||||
_configOpts = {
|
||||
"usedns": ["string", None],
|
||||
"prefregex": ["string", None],
|
||||
"ignoreregex": ["string", None],
|
||||
"failregex": ["string", None],
|
||||
"maxlines": ["int", None],
|
||||
"datepattern": ["string", None],
|
||||
"journalmatch": ["string", None],
|
||||
}
|
||||
|
||||
def setFile(self, fileName):
|
||||
self.__file = fileName
|
||||
DefinitionInitConfigReader.setFile(self, os.path.join("filter.d", fileName))
|
||||
|
||||
def getFile(self):
|
||||
return self.__file
|
||||
|
||||
def applyAutoOptions(self, backend):
|
||||
# set init option to backend-related logtype, considering
|
||||
# that the filter settings may be overwritten in its local:
|
||||
if (not self._initOpts.get('logtype') and
|
||||
not self.has_option('Definition', 'logtype', False)
|
||||
):
|
||||
self._initOpts['logtype'] = ['file','journal'][int(backend.startswith("systemd"))]
|
||||
|
||||
def convert(self):
|
||||
stream = list()
|
||||
opts = self.getCombined()
|
||||
if not len(opts):
|
||||
return stream
|
||||
return FilterReader._fillStream(stream, opts, self._jailName)
|
||||
|
||||
@staticmethod
|
||||
def _fillStream(stream, opts, jailName):
|
||||
prio0idx = 0
|
||||
for opt, value in opts.items():
|
||||
# Do not send a command if the value is not set (empty).
|
||||
if value is None: continue
|
||||
if opt in ("failregex", "ignoreregex"):
|
||||
multi = []
|
||||
for regex in value.split('\n'):
|
||||
# Do not send a command if the rule is empty.
|
||||
if regex != '':
|
||||
multi.append(regex)
|
||||
if len(multi) > 1:
|
||||
stream.append(["multi-set", jailName, "add" + opt, multi])
|
||||
elif len(multi):
|
||||
stream.append(["set", jailName, "add" + opt, multi[0]])
|
||||
elif opt in ('usedns', 'maxlines', 'prefregex'):
|
||||
# Be sure we set this options first, and usedns is before all regex(s).
|
||||
stream.insert(0 if opt == 'usedns' else prio0idx,
|
||||
["set", jailName, opt, value])
|
||||
prio0idx += 1
|
||||
elif opt == 'datepattern':
|
||||
stream.append(["set", jailName, opt, value])
|
||||
elif opt == 'journalmatch':
|
||||
for match in value.split("\n"):
|
||||
if match == '': continue
|
||||
stream.append(
|
||||
["set", jailName, "addjournalmatch"] + shlex.split(match))
|
||||
return stream
|
||||
|
||||
316
fail2ban-master/fail2ban/client/jailreader.py
Normal file
316
fail2ban-master/fail2ban/client/jailreader.py
Normal file
@@ -0,0 +1,316 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Author: Cyril Jaquier
|
||||
#
|
||||
|
||||
__author__ = "Cyril Jaquier"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
|
||||
__license__ = "GPL"
|
||||
|
||||
import glob
|
||||
import json
|
||||
import os.path
|
||||
import re
|
||||
|
||||
from .configreader import ConfigReaderUnshared, ConfigReader, NoSectionError
|
||||
from .filterreader import FilterReader
|
||||
from .actionreader import ActionReader
|
||||
from ..version import version
|
||||
from ..helpers import _merge_dicts, getLogger, extractOptions, splitWithOptions, splitwords
|
||||
|
||||
# Gets the instance of the logger.
|
||||
logSys = getLogger(__name__)
|
||||
|
||||
|
||||
class NoJailError(ValueError):
|
||||
pass
|
||||
|
||||
class JailReader(ConfigReader):
|
||||
|
||||
def __init__(self, name, force_enable=False, **kwargs):
|
||||
ConfigReader.__init__(self, **kwargs)
|
||||
self.__name = name
|
||||
self.__filter = None
|
||||
self.__force_enable = force_enable
|
||||
self.__actions = list()
|
||||
self.__opts = None
|
||||
|
||||
@property
|
||||
def options(self):
|
||||
return self.__opts
|
||||
|
||||
def setName(self, value):
|
||||
self.__name = value
|
||||
|
||||
def getName(self):
|
||||
return self.__name
|
||||
|
||||
def read(self):
|
||||
out = ConfigReader.read(self, "jail")
|
||||
# Before returning -- verify that requested section
|
||||
# exists at all
|
||||
if not (self.__name in self.sections()):
|
||||
raise NoJailError("Jail %r was not found among available"
|
||||
% self.__name)
|
||||
return out
|
||||
|
||||
def isEnabled(self):
|
||||
return self.__force_enable or (
|
||||
self.__opts and self.__opts.get("enabled", False))
|
||||
|
||||
@staticmethod
|
||||
def _glob(path):
|
||||
"""Given a path for glob return list of files to be passed to server.
|
||||
|
||||
Dangling symlinks are warned about and not returned
|
||||
"""
|
||||
pathList = []
|
||||
for p in glob.glob(path):
|
||||
if os.path.exists(p):
|
||||
pathList.append(p)
|
||||
else:
|
||||
logSys.warning("File %s is a dangling link, thus cannot be monitored" % p)
|
||||
return pathList
|
||||
|
||||
_configOpts1st = {
|
||||
"enabled": ["bool", False],
|
||||
"backend": ["string", "auto"],
|
||||
"filter": ["string", ""]
|
||||
}
|
||||
_configOpts = {
|
||||
"enabled": ["bool", False],
|
||||
"backend": ["string", "auto"],
|
||||
"maxretry": ["int", None],
|
||||
"maxmatches": ["int", None],
|
||||
"findtime": ["string", None],
|
||||
"bantime": ["string", None],
|
||||
"bantime.increment": ["bool", None],
|
||||
"bantime.factor": ["string", None],
|
||||
"bantime.formula": ["string", None],
|
||||
"bantime.multipliers": ["string", None],
|
||||
"bantime.maxtime": ["string", None],
|
||||
"bantime.rndtime": ["string", None],
|
||||
"bantime.overalljails": ["bool", None],
|
||||
"ignorecommand": ["string", None],
|
||||
"ignoreself": ["bool", None],
|
||||
"ignoreip": ["string", None],
|
||||
"ignorecache": ["string", None],
|
||||
"filter": ["string", ""],
|
||||
"logtimezone": ["string", None],
|
||||
"logencoding": ["string", None],
|
||||
"logpath": ["string", None],
|
||||
"skip_if_nologs": ["bool", False],
|
||||
"systemd_if_nologs": ["bool", True],
|
||||
"action": ["string", ""]
|
||||
}
|
||||
_configOpts.update(FilterReader._configOpts)
|
||||
|
||||
_ignoreOpts = set(
|
||||
['action', 'filter', 'enabled', 'backend', 'skip_if_nologs', 'systemd_if_nologs'] +
|
||||
list(FilterReader._configOpts.keys())
|
||||
)
|
||||
|
||||
def getOptions(self, addOpts=None):
|
||||
|
||||
basedir = self.getBaseDir()
|
||||
|
||||
# Before interpolation (substitution) add static options always available as default:
|
||||
self.merge_defaults({
|
||||
"fail2ban_version": version,
|
||||
"fail2ban_confpath": basedir
|
||||
})
|
||||
|
||||
try:
|
||||
|
||||
# Read first options only needed for merge defaults ('known/...' from filter):
|
||||
self.__opts = ConfigReader.getOptions(self, self.__name, self._configOpts1st,
|
||||
shouldExist=True)
|
||||
if not self.__opts: # pragma: no cover
|
||||
raise JailDefError("Init jail options failed")
|
||||
if addOpts:
|
||||
self.__opts = _merge_dicts(self.__opts, addOpts)
|
||||
|
||||
if not self.isEnabled():
|
||||
return True
|
||||
|
||||
# Read filter
|
||||
flt = self.__opts["filter"]
|
||||
if flt:
|
||||
try:
|
||||
filterName, filterOpt = extractOptions(flt)
|
||||
except ValueError as e:
|
||||
raise JailDefError("Invalid filter definition %r: %s" % (flt, e))
|
||||
if addOpts:
|
||||
filterOpt = _merge_dicts(filterOpt, addOpts)
|
||||
self.__filter = FilterReader(
|
||||
filterName, self.__name, filterOpt,
|
||||
share_config=self.share_config, basedir=basedir)
|
||||
ret = self.__filter.read()
|
||||
if not ret:
|
||||
raise JailDefError("Unable to read the filter %r" % filterName)
|
||||
# set backend-related options (logtype):
|
||||
self.__filter.applyAutoOptions(self.__opts.get('backend', ''))
|
||||
# merge options from filter as 'known/...' (all options unfiltered):
|
||||
self.__filter.getOptions(self.__opts, all=True)
|
||||
ConfigReader.merge_section(self, self.__name, self.__filter.getCombined(), 'known/')
|
||||
else:
|
||||
self.__filter = None
|
||||
logSys.warning("No filter set for jail %s" % self.__name)
|
||||
|
||||
# Read second all options (so variables like %(known/param) can be interpolated):
|
||||
self.__opts = ConfigReader.getOptions(self, self.__name, self._configOpts)
|
||||
if not self.__opts: # pragma: no cover
|
||||
raise JailDefError("Read jail options failed")
|
||||
|
||||
# cumulate filter options again (ignore given in jail):
|
||||
if self.__filter:
|
||||
self.__filter.getOptions(self.__opts)
|
||||
|
||||
# Read action
|
||||
for act in splitWithOptions(self.__opts["action"]):
|
||||
try:
|
||||
act = act.strip()
|
||||
if not act: # skip empty actions
|
||||
continue
|
||||
# join with previous line if needed (consider possible new-line):
|
||||
try:
|
||||
actName, actOpt = extractOptions(act)
|
||||
except ValueError as e:
|
||||
raise JailDefError("Invalid action definition %r: %s" % (act, e))
|
||||
if actName.endswith(".py"):
|
||||
self.__actions.append([
|
||||
"set",
|
||||
self.__name,
|
||||
"addaction",
|
||||
actOpt.pop("actname", os.path.splitext(actName)[0]),
|
||||
os.path.join(
|
||||
basedir, "action.d", actName),
|
||||
json.dumps(actOpt),
|
||||
])
|
||||
else:
|
||||
action = ActionReader(
|
||||
actName, self.__name, actOpt,
|
||||
share_config=self.share_config, basedir=basedir)
|
||||
ret = action.read()
|
||||
if ret:
|
||||
action.getOptions(self.__opts)
|
||||
self.__actions.append(action)
|
||||
else:
|
||||
raise JailDefError("Unable to read action %r" % actName)
|
||||
except JailDefError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logSys.debug("Caught exception: %s", e, exc_info=True)
|
||||
raise ValueError("Error in action definition %r: %r" % (act, e))
|
||||
if not len(self.__actions):
|
||||
logSys.warning("No actions were defined for %s" % self.__name)
|
||||
|
||||
except JailDefError as e:
|
||||
e = str(e)
|
||||
logSys.error(e)
|
||||
if not self.__opts:
|
||||
self.__opts = dict()
|
||||
self.__opts['config-error'] = e
|
||||
return False
|
||||
return True
|
||||
|
||||
@property
|
||||
def filter(self):
|
||||
return self.__filter
|
||||
|
||||
def getCombined(self):
|
||||
if not self.__filter:
|
||||
return self.__opts
|
||||
return _merge_dicts(self.__opts, self.__filter.getCombined())
|
||||
|
||||
def convert(self, allow_no_files=False, systemd_if_nologs=True):
|
||||
"""Convert read before __opts to the commands stream
|
||||
|
||||
Parameters
|
||||
----------
|
||||
allow_missing : bool
|
||||
Either to allow log files to be missing entirely. Primarily is
|
||||
used for testing
|
||||
"""
|
||||
|
||||
stream = []
|
||||
stream2 = []
|
||||
e = self.__opts.get('config-error')
|
||||
if e:
|
||||
stream.extend([['config-error', "Jail '%s' skipped, because of wrong configuration: %s" % (self.__name, e)]])
|
||||
return stream
|
||||
# fill jail with filter options, using filter (only not overridden in jail):
|
||||
if self.__filter:
|
||||
stream.extend(self.__filter.convert())
|
||||
# and using options from jail:
|
||||
FilterReader._fillStream(stream, self.__opts, self.__name)
|
||||
backend = self.__opts.get('backend', 'auto')
|
||||
for opt, value in self.__opts.items():
|
||||
if opt == "logpath":
|
||||
if backend.startswith("systemd"): continue
|
||||
found_files = 0
|
||||
for path in value.split("\n"):
|
||||
path = path.rsplit(" ", 1)
|
||||
path, tail = path if len(path) > 1 else (path[0], "head")
|
||||
pathList = JailReader._glob(path)
|
||||
if len(pathList) == 0:
|
||||
logSys.notice("No file(s) found for glob %s" % path)
|
||||
for p in pathList:
|
||||
found_files += 1
|
||||
# logpath after all log-related data (backend, date-pattern, etc)
|
||||
stream2.append(
|
||||
["set", self.__name, "addlogpath", p, tail])
|
||||
if not found_files:
|
||||
msg = "Have not found any log file for '%s' jail." % self.__name
|
||||
skip_if_nologs = self.__opts.get('skip_if_nologs', False)
|
||||
# if auto and we can switch to systemd backend (only possible if jail have journalmatch):
|
||||
if backend.startswith("auto") and systemd_if_nologs and (
|
||||
self.__opts.get('systemd_if_nologs', True) and
|
||||
self.__opts.get('journalmatch', None) is not None
|
||||
):
|
||||
# switch backend to systemd:
|
||||
backend = 'systemd'
|
||||
msg += " Jail will monitor systemd journal."
|
||||
skip_if_nologs = False
|
||||
elif not allow_no_files and not skip_if_nologs:
|
||||
raise ValueError(msg)
|
||||
logSys.warning(msg)
|
||||
if skip_if_nologs:
|
||||
self.__opts['runtime-error'] = msg
|
||||
msg = "Jail '%s' skipped, because of missing log files." % (self.__name,)
|
||||
logSys.warning(msg)
|
||||
stream = [['config-error', msg]]
|
||||
return stream
|
||||
elif opt == "ignoreip":
|
||||
stream.append(["set", self.__name, "addignoreip"] + splitwords(value))
|
||||
elif opt not in JailReader._ignoreOpts:
|
||||
stream.append(["set", self.__name, opt, value])
|
||||
# consider options order (after other options):
|
||||
if stream2: stream += stream2
|
||||
for action in self.__actions:
|
||||
if isinstance(action, (ConfigReaderUnshared, ConfigReader)):
|
||||
stream.extend(action.convert())
|
||||
else:
|
||||
stream.append(action)
|
||||
stream.insert(0, ["add", self.__name, backend])
|
||||
return stream
|
||||
|
||||
class JailDefError(Exception):
|
||||
pass
|
||||
114
fail2ban-master/fail2ban/client/jailsreader.py
Normal file
114
fail2ban-master/fail2ban/client/jailsreader.py
Normal file
@@ -0,0 +1,114 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Author: Cyril Jaquier
|
||||
#
|
||||
|
||||
__author__ = "Cyril Jaquier"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
|
||||
__license__ = "GPL"
|
||||
|
||||
from .configreader import ConfigReader
|
||||
from .jailreader import JailReader
|
||||
from ..helpers import getLogger
|
||||
|
||||
# Gets the instance of the logger.
|
||||
logSys = getLogger(__name__)
|
||||
|
||||
|
||||
class JailsReader(ConfigReader):
|
||||
|
||||
def __init__(self, force_enable=False, **kwargs):
|
||||
"""
|
||||
Parameters
|
||||
----------
|
||||
force_enable : bool, optional
|
||||
Passed to JailReader to force enable the jails.
|
||||
It is for internal use
|
||||
"""
|
||||
ConfigReader.__init__(self, **kwargs)
|
||||
self.__jails = list()
|
||||
self.__force_enable = force_enable
|
||||
|
||||
@property
|
||||
def jails(self):
|
||||
return self.__jails
|
||||
|
||||
def read(self):
|
||||
self.__jails = list()
|
||||
return ConfigReader.read(self, "jail")
|
||||
|
||||
def getOptions(self, section=None, ignoreWrong=True):
|
||||
"""Reads configuration for jail(s) and adds enabled jails to __jails
|
||||
"""
|
||||
opts = []
|
||||
self.__opts = ConfigReader.getOptions(self, "Definition", opts)
|
||||
|
||||
if section is None:
|
||||
sections = self.sections()
|
||||
else:
|
||||
sections = [ section ]
|
||||
|
||||
# Get the options of all jails.
|
||||
parse_status = 0
|
||||
for sec in sections:
|
||||
if sec == 'INCLUDES':
|
||||
continue
|
||||
# use the cfg_share for filter/action caching and the same config for all
|
||||
# jails (use_config=...), therefore don't read it here:
|
||||
jail = JailReader(sec, force_enable=self.__force_enable,
|
||||
share_config=self.share_config, use_config=self._cfg)
|
||||
ret = jail.getOptions()
|
||||
if ret:
|
||||
if jail.isEnabled():
|
||||
# at least one jail was successful:
|
||||
parse_status |= 1
|
||||
# We only add enabled jails
|
||||
self.__jails.append(jail)
|
||||
else:
|
||||
logSys.error("Errors in jail %r.%s", sec, " Skipping..." if ignoreWrong else "")
|
||||
self.__jails.append(jail)
|
||||
# at least one jail was invalid:
|
||||
parse_status |= 2
|
||||
return ((ignoreWrong and parse_status & 1) or not (parse_status & 2))
|
||||
|
||||
def convert(self, allow_no_files=False, systemd_if_nologs=True):
|
||||
"""Convert read before __opts and jails to the commands stream
|
||||
|
||||
Parameters
|
||||
----------
|
||||
allow_missing : bool
|
||||
Either to allow log files to be missing entirely. Primarily is
|
||||
used for testing
|
||||
"""
|
||||
|
||||
stream = list()
|
||||
# Convert jails
|
||||
for jail in self.__jails:
|
||||
stream.extend(jail.convert(allow_no_files, systemd_if_nologs))
|
||||
# Start jails
|
||||
for jail in self.__jails:
|
||||
if not jail.options.get('config-error') and not jail.options.get('runtime-error'):
|
||||
stream.append(["start", jail.getName()])
|
||||
else:
|
||||
# just delete rtm-errors (to check next time if cached)
|
||||
jail.options.pop('runtime-error', None)
|
||||
|
||||
return stream
|
||||
|
||||
310
fail2ban-master/fail2ban/compat/asynchat.py
Normal file
310
fail2ban-master/fail2ban/compat/asynchat.py
Normal file
@@ -0,0 +1,310 @@
|
||||
# -*- Mode: Python; tab-width: 4 -*-
|
||||
# Id: asynchat.py,v 2.26 2000/09/07 22:29:26 rushing Exp
|
||||
# Author: Sam Rushing <rushing@nightmare.com>
|
||||
|
||||
# ======================================================================
|
||||
# Copyright 1996 by Sam Rushing
|
||||
#
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software and
|
||||
# its documentation for any purpose and without fee is hereby
|
||||
# granted, provided that the above copyright notice appear in all
|
||||
# copies and that both that copyright notice and this permission
|
||||
# notice appear in supporting documentation, and that the name of Sam
|
||||
# Rushing not be used in advertising or publicity pertaining to
|
||||
# distribution of the software without specific, written prior
|
||||
# permission.
|
||||
#
|
||||
# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
|
||||
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
|
||||
# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
|
||||
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
|
||||
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
|
||||
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
|
||||
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
# ======================================================================
|
||||
|
||||
r"""A class supporting chat-style (command/response) protocols.
|
||||
|
||||
This class adds support for 'chat' style protocols - where one side
|
||||
sends a 'command', and the other sends a response (examples would be
|
||||
the common internet protocols - smtp, nntp, ftp, etc..).
|
||||
|
||||
The handle_read() method looks at the input stream for the current
|
||||
'terminator' (usually '\r\n' for single-line responses, '\r\n.\r\n'
|
||||
for multi-line output), calling self.found_terminator() on its
|
||||
receipt.
|
||||
|
||||
for example:
|
||||
Say you build an async nntp client using this class. At the start
|
||||
of the connection, you'll have self.terminator set to '\r\n', in
|
||||
order to process the single-line greeting. Just before issuing a
|
||||
'LIST' command you'll set it to '\r\n.\r\n'. The output of the LIST
|
||||
command will be accumulated (using your own 'collect_incoming_data'
|
||||
method) up to the terminator, and then control will be returned to
|
||||
you - by calling your self.found_terminator() method.
|
||||
"""
|
||||
try:
|
||||
import asyncore
|
||||
except ImportError:
|
||||
from . import asyncore
|
||||
from collections import deque
|
||||
|
||||
|
||||
class async_chat(asyncore.dispatcher):
|
||||
"""This is an abstract class. You must derive from this class, and add
|
||||
the two methods collect_incoming_data() and found_terminator()"""
|
||||
|
||||
# these are overridable defaults
|
||||
|
||||
ac_in_buffer_size = 65536
|
||||
ac_out_buffer_size = 65536
|
||||
|
||||
# we don't want to enable the use of encoding by default, because that is a
|
||||
# sign of an application bug that we don't want to pass silently
|
||||
|
||||
use_encoding = 0
|
||||
encoding = 'latin-1'
|
||||
|
||||
def __init__(self, sock=None, map=None):
|
||||
# for string terminator matching
|
||||
self.ac_in_buffer = b''
|
||||
|
||||
# we use a list here rather than io.BytesIO for a few reasons...
|
||||
# del lst[:] is faster than bio.truncate(0)
|
||||
# lst = [] is faster than bio.truncate(0)
|
||||
self.incoming = []
|
||||
|
||||
# we toss the use of the "simple producer" and replace it with
|
||||
# a pure deque, which the original fifo was a wrapping of
|
||||
self.producer_fifo = deque()
|
||||
asyncore.dispatcher.__init__(self, sock, map)
|
||||
|
||||
def collect_incoming_data(self, data):
|
||||
raise NotImplementedError("must be implemented in subclass")
|
||||
|
||||
def _collect_incoming_data(self, data):
|
||||
self.incoming.append(data)
|
||||
|
||||
def _get_data(self):
|
||||
d = b''.join(self.incoming)
|
||||
del self.incoming[:]
|
||||
return d
|
||||
|
||||
def found_terminator(self):
|
||||
raise NotImplementedError("must be implemented in subclass")
|
||||
|
||||
def set_terminator(self, term):
|
||||
"""Set the input delimiter.
|
||||
|
||||
Can be a fixed string of any length, an integer, or None.
|
||||
"""
|
||||
if isinstance(term, str) and self.use_encoding:
|
||||
term = bytes(term, self.encoding)
|
||||
elif isinstance(term, int) and term < 0:
|
||||
raise ValueError('the number of received bytes must be positive')
|
||||
self.terminator = term
|
||||
|
||||
def get_terminator(self):
|
||||
return self.terminator
|
||||
|
||||
# grab some more data from the socket,
|
||||
# throw it to the collector method,
|
||||
# check for the terminator,
|
||||
# if found, transition to the next state.
|
||||
|
||||
def handle_read(self):
|
||||
|
||||
try:
|
||||
data = self.recv(self.ac_in_buffer_size)
|
||||
except BlockingIOError:
|
||||
return
|
||||
except OSError:
|
||||
self.handle_error()
|
||||
return
|
||||
|
||||
if isinstance(data, str) and self.use_encoding:
|
||||
data = bytes(str, self.encoding)
|
||||
self.ac_in_buffer = self.ac_in_buffer + data
|
||||
|
||||
# Continue to search for self.terminator in self.ac_in_buffer,
|
||||
# while calling self.collect_incoming_data. The while loop
|
||||
# is necessary because we might read several data+terminator
|
||||
# combos with a single recv(4096).
|
||||
|
||||
while self.ac_in_buffer:
|
||||
lb = len(self.ac_in_buffer)
|
||||
terminator = self.get_terminator()
|
||||
if not terminator:
|
||||
# no terminator, collect it all
|
||||
self.collect_incoming_data(self.ac_in_buffer)
|
||||
self.ac_in_buffer = b''
|
||||
elif isinstance(terminator, int):
|
||||
# numeric terminator
|
||||
n = terminator
|
||||
if lb < n:
|
||||
self.collect_incoming_data(self.ac_in_buffer)
|
||||
self.ac_in_buffer = b''
|
||||
self.terminator = self.terminator - lb
|
||||
else:
|
||||
self.collect_incoming_data(self.ac_in_buffer[:n])
|
||||
self.ac_in_buffer = self.ac_in_buffer[n:]
|
||||
self.terminator = 0
|
||||
self.found_terminator()
|
||||
else:
|
||||
# 3 cases:
|
||||
# 1) end of buffer matches terminator exactly:
|
||||
# collect data, transition
|
||||
# 2) end of buffer matches some prefix:
|
||||
# collect data to the prefix
|
||||
# 3) end of buffer does not match any prefix:
|
||||
# collect data
|
||||
terminator_len = len(terminator)
|
||||
index = self.ac_in_buffer.find(terminator)
|
||||
if index != -1:
|
||||
# we found the terminator
|
||||
if index > 0:
|
||||
# don't bother reporting the empty string
|
||||
# (source of subtle bugs)
|
||||
self.collect_incoming_data(self.ac_in_buffer[:index])
|
||||
self.ac_in_buffer = self.ac_in_buffer[index+terminator_len:]
|
||||
# This does the Right Thing if the terminator
|
||||
# is changed here.
|
||||
self.found_terminator()
|
||||
else:
|
||||
# check for a prefix of the terminator
|
||||
index = find_prefix_at_end(self.ac_in_buffer, terminator)
|
||||
if index:
|
||||
if index != lb:
|
||||
# we found a prefix, collect up to the prefix
|
||||
self.collect_incoming_data(self.ac_in_buffer[:-index])
|
||||
self.ac_in_buffer = self.ac_in_buffer[-index:]
|
||||
break
|
||||
else:
|
||||
# no prefix, collect it all
|
||||
self.collect_incoming_data(self.ac_in_buffer)
|
||||
self.ac_in_buffer = b''
|
||||
|
||||
def handle_write(self):
|
||||
self.initiate_send()
|
||||
|
||||
def handle_close(self):
|
||||
self.close()
|
||||
|
||||
def push(self, data):
|
||||
if not isinstance(data, (bytes, bytearray, memoryview)):
|
||||
raise TypeError('data argument must be byte-ish (%r)',
|
||||
type(data))
|
||||
sabs = self.ac_out_buffer_size
|
||||
if len(data) > sabs:
|
||||
for i in range(0, len(data), sabs):
|
||||
self.producer_fifo.append(data[i:i+sabs])
|
||||
else:
|
||||
self.producer_fifo.append(data)
|
||||
self.initiate_send()
|
||||
|
||||
def push_with_producer(self, producer):
|
||||
self.producer_fifo.append(producer)
|
||||
self.initiate_send()
|
||||
|
||||
def readable(self):
|
||||
"predicate for inclusion in the readable for select()"
|
||||
# cannot use the old predicate, it violates the claim of the
|
||||
# set_terminator method.
|
||||
|
||||
# return (len(self.ac_in_buffer) <= self.ac_in_buffer_size)
|
||||
return 1
|
||||
|
||||
def writable(self):
|
||||
"predicate for inclusion in the writable for select()"
|
||||
return self.producer_fifo or (not self.connected)
|
||||
|
||||
def close_when_done(self):
|
||||
"automatically close this channel once the outgoing queue is empty"
|
||||
self.producer_fifo.append(None)
|
||||
|
||||
def initiate_send(self):
|
||||
while self.producer_fifo and self.connected:
|
||||
first = self.producer_fifo[0]
|
||||
# handle empty string/buffer or None entry
|
||||
if not first:
|
||||
del self.producer_fifo[0]
|
||||
if first is None:
|
||||
self.handle_close()
|
||||
return
|
||||
|
||||
# handle classic producer behavior
|
||||
obs = self.ac_out_buffer_size
|
||||
try:
|
||||
data = first[:obs]
|
||||
except TypeError:
|
||||
data = first.more()
|
||||
if data:
|
||||
self.producer_fifo.appendleft(data)
|
||||
else:
|
||||
del self.producer_fifo[0]
|
||||
continue
|
||||
|
||||
if isinstance(data, str) and self.use_encoding:
|
||||
data = bytes(data, self.encoding)
|
||||
|
||||
# send the data
|
||||
try:
|
||||
num_sent = self.send(data)
|
||||
except OSError:
|
||||
self.handle_error()
|
||||
return
|
||||
|
||||
if num_sent:
|
||||
if num_sent < len(data) or obs < len(first):
|
||||
self.producer_fifo[0] = first[num_sent:]
|
||||
else:
|
||||
del self.producer_fifo[0]
|
||||
# we tried to send some actual data
|
||||
return
|
||||
|
||||
def discard_buffers(self):
|
||||
# Emergencies only!
|
||||
self.ac_in_buffer = b''
|
||||
del self.incoming[:]
|
||||
self.producer_fifo.clear()
|
||||
|
||||
|
||||
class simple_producer:
|
||||
|
||||
def __init__(self, data, buffer_size=512):
|
||||
self.data = data
|
||||
self.buffer_size = buffer_size
|
||||
|
||||
def more(self):
|
||||
if len(self.data) > self.buffer_size:
|
||||
result = self.data[:self.buffer_size]
|
||||
self.data = self.data[self.buffer_size:]
|
||||
return result
|
||||
else:
|
||||
result = self.data
|
||||
self.data = b''
|
||||
return result
|
||||
|
||||
|
||||
# Given 'haystack', see if any prefix of 'needle' is at its end. This
|
||||
# assumes an exact match has already been checked. Return the number of
|
||||
# characters matched.
|
||||
# for example:
|
||||
# f_p_a_e("qwerty\r", "\r\n") => 1
|
||||
# f_p_a_e("qwertydkjf", "\r\n") => 0
|
||||
# f_p_a_e("qwerty\r\n", "\r\n") => <undefined>
|
||||
|
||||
# this could maybe be made faster with a computed regex?
|
||||
# [answer: no; circa Python-2.0, Jan 2001]
|
||||
# new python: 28961/s
|
||||
# old python: 18307/s
|
||||
# re: 12820/s
|
||||
# regex: 14035/s
|
||||
|
||||
def find_prefix_at_end(haystack, needle):
|
||||
l = len(needle) - 1
|
||||
while l and not haystack.endswith(needle[:l]):
|
||||
l -= 1
|
||||
return l
|
||||
642
fail2ban-master/fail2ban/compat/asyncore.py
Normal file
642
fail2ban-master/fail2ban/compat/asyncore.py
Normal file
@@ -0,0 +1,642 @@
|
||||
# -*- Mode: Python -*-
|
||||
# Id: asyncore.py,v 2.51 2000/09/07 22:29:26 rushing Exp
|
||||
# Author: Sam Rushing <rushing@nightmare.com>
|
||||
|
||||
# ======================================================================
|
||||
# Copyright 1996 by Sam Rushing
|
||||
#
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software and
|
||||
# its documentation for any purpose and without fee is hereby
|
||||
# granted, provided that the above copyright notice appear in all
|
||||
# copies and that both that copyright notice and this permission
|
||||
# notice appear in supporting documentation, and that the name of Sam
|
||||
# Rushing not be used in advertising or publicity pertaining to
|
||||
# distribution of the software without specific, written prior
|
||||
# permission.
|
||||
#
|
||||
# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
|
||||
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
|
||||
# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
|
||||
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
|
||||
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
|
||||
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
|
||||
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
# ======================================================================
|
||||
|
||||
"""Basic infrastructure for asynchronous socket service clients and servers.
|
||||
|
||||
There are only two ways to have a program on a single processor do "more
|
||||
than one thing at a time". Multi-threaded programming is the simplest and
|
||||
most popular way to do it, but there is another very different technique,
|
||||
that lets you have nearly all the advantages of multi-threading, without
|
||||
actually using multiple threads. it's really only practical if your program
|
||||
is largely I/O bound. If your program is CPU bound, then pre-emptive
|
||||
scheduled threads are probably what you really need. Network servers are
|
||||
rarely CPU-bound, however.
|
||||
|
||||
If your operating system supports the select() system call in its I/O
|
||||
library (and nearly all do), then you can use it to juggle multiple
|
||||
communication channels at once; doing other work while your I/O is taking
|
||||
place in the "background." Although this strategy can seem strange and
|
||||
complex, especially at first, it is in many ways easier to understand and
|
||||
control than multi-threaded programming. The module documented here solves
|
||||
many of the difficult problems for you, making the task of building
|
||||
sophisticated high-performance network servers and clients a snap.
|
||||
"""
|
||||
|
||||
import select
|
||||
import socket
|
||||
import sys
|
||||
import time
|
||||
import warnings
|
||||
|
||||
import os
|
||||
from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, ECONNRESET, EINVAL, \
|
||||
ENOTCONN, ESHUTDOWN, EISCONN, EBADF, ECONNABORTED, EPIPE, EAGAIN, \
|
||||
errorcode
|
||||
|
||||
_DISCONNECTED = frozenset({ECONNRESET, ENOTCONN, ESHUTDOWN, ECONNABORTED, EPIPE,
|
||||
EBADF})
|
||||
|
||||
try:
|
||||
socket_map
|
||||
except NameError:
|
||||
socket_map = {}
|
||||
|
||||
def _strerror(err):
|
||||
try:
|
||||
return os.strerror(err)
|
||||
except (ValueError, OverflowError, NameError):
|
||||
if err in errorcode:
|
||||
return errorcode[err]
|
||||
return "Unknown error %s" %err
|
||||
|
||||
class ExitNow(Exception):
|
||||
pass
|
||||
|
||||
_reraised_exceptions = (ExitNow, KeyboardInterrupt, SystemExit)
|
||||
|
||||
def read(obj):
|
||||
try:
|
||||
obj.handle_read_event()
|
||||
except _reraised_exceptions:
|
||||
raise
|
||||
except:
|
||||
obj.handle_error()
|
||||
|
||||
def write(obj):
|
||||
try:
|
||||
obj.handle_write_event()
|
||||
except _reraised_exceptions:
|
||||
raise
|
||||
except:
|
||||
obj.handle_error()
|
||||
|
||||
def _exception(obj):
|
||||
try:
|
||||
obj.handle_expt_event()
|
||||
except _reraised_exceptions:
|
||||
raise
|
||||
except:
|
||||
obj.handle_error()
|
||||
|
||||
def readwrite(obj, flags):
|
||||
try:
|
||||
if flags & select.POLLIN:
|
||||
obj.handle_read_event()
|
||||
if flags & select.POLLOUT:
|
||||
obj.handle_write_event()
|
||||
if flags & select.POLLPRI:
|
||||
obj.handle_expt_event()
|
||||
if flags & (select.POLLHUP | select.POLLERR | select.POLLNVAL):
|
||||
obj.handle_close()
|
||||
except OSError as e:
|
||||
if e.errno not in _DISCONNECTED:
|
||||
obj.handle_error()
|
||||
else:
|
||||
obj.handle_close()
|
||||
except _reraised_exceptions:
|
||||
raise
|
||||
except:
|
||||
obj.handle_error()
|
||||
|
||||
def poll(timeout=0.0, map=None):
|
||||
if map is None:
|
||||
map = socket_map
|
||||
if map:
|
||||
r = []; w = []; e = []
|
||||
for fd, obj in list(map.items()):
|
||||
is_r = obj.readable()
|
||||
is_w = obj.writable()
|
||||
if is_r:
|
||||
r.append(fd)
|
||||
# accepting sockets should not be writable
|
||||
if is_w and not obj.accepting:
|
||||
w.append(fd)
|
||||
if is_r or is_w:
|
||||
e.append(fd)
|
||||
if [] == r == w == e:
|
||||
time.sleep(timeout)
|
||||
return
|
||||
|
||||
r, w, e = select.select(r, w, e, timeout)
|
||||
|
||||
for fd in r:
|
||||
obj = map.get(fd)
|
||||
if obj is None:
|
||||
continue
|
||||
read(obj)
|
||||
|
||||
for fd in w:
|
||||
obj = map.get(fd)
|
||||
if obj is None:
|
||||
continue
|
||||
write(obj)
|
||||
|
||||
for fd in e:
|
||||
obj = map.get(fd)
|
||||
if obj is None:
|
||||
continue
|
||||
_exception(obj)
|
||||
|
||||
def poll2(timeout=0.0, map=None):
|
||||
# Use the poll() support added to the select module in Python 2.0
|
||||
if map is None:
|
||||
map = socket_map
|
||||
if timeout is not None:
|
||||
# timeout is in milliseconds
|
||||
timeout = int(timeout*1000)
|
||||
pollster = select.poll()
|
||||
if map:
|
||||
for fd, obj in list(map.items()):
|
||||
flags = 0
|
||||
if obj.readable():
|
||||
flags |= select.POLLIN | select.POLLPRI
|
||||
# accepting sockets should not be writable
|
||||
if obj.writable() and not obj.accepting:
|
||||
flags |= select.POLLOUT
|
||||
if flags:
|
||||
pollster.register(fd, flags)
|
||||
|
||||
r = pollster.poll(timeout)
|
||||
for fd, flags in r:
|
||||
obj = map.get(fd)
|
||||
if obj is None:
|
||||
continue
|
||||
readwrite(obj, flags)
|
||||
|
||||
poll3 = poll2 # Alias for backward compatibility
|
||||
|
||||
def loop(timeout=30.0, use_poll=False, map=None, count=None):
|
||||
if map is None:
|
||||
map = socket_map
|
||||
|
||||
if use_poll and hasattr(select, 'poll'):
|
||||
poll_fun = poll2
|
||||
else:
|
||||
poll_fun = poll
|
||||
|
||||
if count is None:
|
||||
while map:
|
||||
poll_fun(timeout, map)
|
||||
|
||||
else:
|
||||
while map and count > 0:
|
||||
poll_fun(timeout, map)
|
||||
count = count - 1
|
||||
|
||||
class dispatcher:
|
||||
|
||||
debug = False
|
||||
connected = False
|
||||
accepting = False
|
||||
connecting = False
|
||||
closing = False
|
||||
addr = None
|
||||
ignore_log_types = frozenset({'warning'})
|
||||
|
||||
def __init__(self, sock=None, map=None):
|
||||
if map is None:
|
||||
self._map = socket_map
|
||||
else:
|
||||
self._map = map
|
||||
|
||||
self._fileno = None
|
||||
|
||||
if sock:
|
||||
# Set to nonblocking just to make sure for cases where we
|
||||
# get a socket from a blocking source.
|
||||
sock.setblocking(False)
|
||||
self.set_socket(sock, map)
|
||||
self.connected = True
|
||||
# The constructor no longer requires that the socket
|
||||
# passed be connected.
|
||||
try:
|
||||
self.addr = sock.getpeername()
|
||||
except OSError as err:
|
||||
if err.errno in (ENOTCONN, EINVAL):
|
||||
# To handle the case where we got an unconnected
|
||||
# socket.
|
||||
self.connected = False
|
||||
else:
|
||||
# The socket is broken in some unknown way, alert
|
||||
# the user and remove it from the map (to prevent
|
||||
# polling of broken sockets).
|
||||
self.del_channel(map)
|
||||
raise
|
||||
else:
|
||||
self.socket = None
|
||||
|
||||
def __repr__(self):
|
||||
status = [self.__class__.__module__+"."+self.__class__.__qualname__]
|
||||
if self.accepting and self.addr:
|
||||
status.append('listening')
|
||||
elif self.connected:
|
||||
status.append('connected')
|
||||
if self.addr is not None:
|
||||
try:
|
||||
status.append('%s:%d' % self.addr)
|
||||
except TypeError:
|
||||
status.append(repr(self.addr))
|
||||
return '<%s at %#x>' % (' '.join(status), id(self))
|
||||
|
||||
def add_channel(self, map=None):
|
||||
#self.log_info('adding channel %s' % self)
|
||||
if map is None:
|
||||
map = self._map
|
||||
map[self._fileno] = self
|
||||
|
||||
def del_channel(self, map=None):
|
||||
fd = self._fileno
|
||||
if map is None:
|
||||
map = self._map
|
||||
if fd in map:
|
||||
#self.log_info('closing channel %d:%s' % (fd, self))
|
||||
del map[fd]
|
||||
self._fileno = None
|
||||
|
||||
def create_socket(self, family=socket.AF_INET, type=socket.SOCK_STREAM):
|
||||
self.family_and_type = family, type
|
||||
sock = socket.socket(family, type)
|
||||
sock.setblocking(False)
|
||||
self.set_socket(sock)
|
||||
|
||||
def set_socket(self, sock, map=None):
|
||||
self.socket = sock
|
||||
self._fileno = sock.fileno()
|
||||
self.add_channel(map)
|
||||
|
||||
def set_reuse_addr(self):
|
||||
# try to re-use a server port if possible
|
||||
try:
|
||||
self.socket.setsockopt(
|
||||
socket.SOL_SOCKET, socket.SO_REUSEADDR,
|
||||
self.socket.getsockopt(socket.SOL_SOCKET,
|
||||
socket.SO_REUSEADDR) | 1
|
||||
)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
# ==================================================
|
||||
# predicates for select()
|
||||
# these are used as filters for the lists of sockets
|
||||
# to pass to select().
|
||||
# ==================================================
|
||||
|
||||
def readable(self):
|
||||
return True
|
||||
|
||||
def writable(self):
|
||||
return True
|
||||
|
||||
# ==================================================
|
||||
# socket object methods.
|
||||
# ==================================================
|
||||
|
||||
def listen(self, num):
|
||||
self.accepting = True
|
||||
if os.name == 'nt' and num > 5:
|
||||
num = 5
|
||||
return self.socket.listen(num)
|
||||
|
||||
def bind(self, addr):
|
||||
self.addr = addr
|
||||
return self.socket.bind(addr)
|
||||
|
||||
def connect(self, address):
|
||||
self.connected = False
|
||||
self.connecting = True
|
||||
err = self.socket.connect_ex(address)
|
||||
if err in (EINPROGRESS, EALREADY, EWOULDBLOCK) \
|
||||
or err == EINVAL and os.name == 'nt':
|
||||
self.addr = address
|
||||
return
|
||||
if err in (0, EISCONN):
|
||||
self.addr = address
|
||||
self.handle_connect_event()
|
||||
else:
|
||||
raise OSError(err, errorcode[err])
|
||||
|
||||
def accept(self):
|
||||
# XXX can return either an address pair or None
|
||||
try:
|
||||
conn, addr = self.socket.accept()
|
||||
except TypeError:
|
||||
return None
|
||||
except OSError as why:
|
||||
if why.errno in (EWOULDBLOCK, ECONNABORTED, EAGAIN):
|
||||
return None
|
||||
else:
|
||||
raise
|
||||
else:
|
||||
return conn, addr
|
||||
|
||||
def send(self, data):
|
||||
try:
|
||||
result = self.socket.send(data)
|
||||
return result
|
||||
except OSError as why:
|
||||
if why.errno == EWOULDBLOCK:
|
||||
return 0
|
||||
elif why.errno in _DISCONNECTED:
|
||||
self.handle_close()
|
||||
return 0
|
||||
else:
|
||||
raise
|
||||
|
||||
def recv(self, buffer_size):
|
||||
try:
|
||||
data = self.socket.recv(buffer_size)
|
||||
if not data:
|
||||
# a closed connection is indicated by signaling
|
||||
# a read condition, and having recv() return 0.
|
||||
self.handle_close()
|
||||
return b''
|
||||
else:
|
||||
return data
|
||||
except OSError as why:
|
||||
# winsock sometimes raises ENOTCONN
|
||||
if why.errno in _DISCONNECTED:
|
||||
self.handle_close()
|
||||
return b''
|
||||
else:
|
||||
raise
|
||||
|
||||
def close(self):
|
||||
self.connected = False
|
||||
self.accepting = False
|
||||
self.connecting = False
|
||||
self.del_channel()
|
||||
if self.socket is not None:
|
||||
try:
|
||||
self.socket.close()
|
||||
except OSError as why:
|
||||
if why.errno not in (ENOTCONN, EBADF):
|
||||
raise
|
||||
|
||||
# log and log_info may be overridden to provide more sophisticated
|
||||
# logging and warning methods. In general, log is for 'hit' logging
|
||||
# and 'log_info' is for informational, warning and error logging.
|
||||
|
||||
def log(self, message):
|
||||
sys.stderr.write('log: %s\n' % str(message))
|
||||
|
||||
def log_info(self, message, type='info'):
|
||||
if type not in self.ignore_log_types:
|
||||
print('%s: %s' % (type, message))
|
||||
|
||||
def handle_read_event(self):
|
||||
if self.accepting:
|
||||
# accepting sockets are never connected, they "spawn" new
|
||||
# sockets that are connected
|
||||
self.handle_accept()
|
||||
elif not self.connected:
|
||||
if self.connecting:
|
||||
self.handle_connect_event()
|
||||
self.handle_read()
|
||||
else:
|
||||
self.handle_read()
|
||||
|
||||
def handle_connect_event(self):
|
||||
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
|
||||
if err != 0:
|
||||
raise OSError(err, _strerror(err))
|
||||
self.handle_connect()
|
||||
self.connected = True
|
||||
self.connecting = False
|
||||
|
||||
def handle_write_event(self):
|
||||
if self.accepting:
|
||||
# Accepting sockets shouldn't get a write event.
|
||||
# We will pretend it didn't happen.
|
||||
return
|
||||
|
||||
if not self.connected:
|
||||
if self.connecting:
|
||||
self.handle_connect_event()
|
||||
self.handle_write()
|
||||
|
||||
def handle_expt_event(self):
|
||||
# handle_expt_event() is called if there might be an error on the
|
||||
# socket, or if there is OOB data
|
||||
# check for the error condition first
|
||||
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
|
||||
if err != 0:
|
||||
# we can get here when select.select() says that there is an
|
||||
# exceptional condition on the socket
|
||||
# since there is an error, we'll go ahead and close the socket
|
||||
# like we would in a subclassed handle_read() that received no
|
||||
# data
|
||||
self.handle_close()
|
||||
else:
|
||||
self.handle_expt()
|
||||
|
||||
def handle_error(self):
|
||||
nil, t, v, tbinfo = compact_traceback()
|
||||
|
||||
# sometimes a user repr method will crash.
|
||||
try:
|
||||
self_repr = repr(self)
|
||||
except:
|
||||
self_repr = '<__repr__(self) failed for object at %0x>' % id(self)
|
||||
|
||||
self.log_info(
|
||||
'uncaptured python exception, closing channel %s (%s:%s %s)' % (
|
||||
self_repr,
|
||||
t,
|
||||
v,
|
||||
tbinfo
|
||||
),
|
||||
'error'
|
||||
)
|
||||
self.handle_close()
|
||||
|
||||
def handle_expt(self):
|
||||
self.log_info('unhandled incoming priority event', 'warning')
|
||||
|
||||
def handle_read(self):
|
||||
self.log_info('unhandled read event', 'warning')
|
||||
|
||||
def handle_write(self):
|
||||
self.log_info('unhandled write event', 'warning')
|
||||
|
||||
def handle_connect(self):
|
||||
self.log_info('unhandled connect event', 'warning')
|
||||
|
||||
def handle_accept(self):
|
||||
pair = self.accept()
|
||||
if pair is not None:
|
||||
self.handle_accepted(*pair)
|
||||
|
||||
def handle_accepted(self, sock, addr):
|
||||
sock.close()
|
||||
self.log_info('unhandled accepted event', 'warning')
|
||||
|
||||
def handle_close(self):
|
||||
self.log_info('unhandled close event', 'warning')
|
||||
self.close()
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# adds simple buffered output capability, useful for simple clients.
|
||||
# [for more sophisticated usage use asynchat.async_chat]
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class dispatcher_with_send(dispatcher):
|
||||
|
||||
def __init__(self, sock=None, map=None):
|
||||
dispatcher.__init__(self, sock, map)
|
||||
self.out_buffer = b''
|
||||
|
||||
def initiate_send(self):
|
||||
num_sent = 0
|
||||
num_sent = dispatcher.send(self, self.out_buffer[:65536])
|
||||
self.out_buffer = self.out_buffer[num_sent:]
|
||||
|
||||
def handle_write(self):
|
||||
self.initiate_send()
|
||||
|
||||
def writable(self):
|
||||
return (not self.connected) or len(self.out_buffer)
|
||||
|
||||
def send(self, data):
|
||||
if self.debug:
|
||||
self.log_info('sending %s' % repr(data))
|
||||
self.out_buffer = self.out_buffer + data
|
||||
self.initiate_send()
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# used for debugging.
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def compact_traceback():
|
||||
t, v, tb = sys.exc_info()
|
||||
tbinfo = []
|
||||
if not tb: # Must have a traceback
|
||||
raise AssertionError("traceback does not exist")
|
||||
while tb:
|
||||
tbinfo.append((
|
||||
tb.tb_frame.f_code.co_filename,
|
||||
tb.tb_frame.f_code.co_name,
|
||||
str(tb.tb_lineno)
|
||||
))
|
||||
tb = tb.tb_next
|
||||
|
||||
# just to be safe
|
||||
del tb
|
||||
|
||||
file, function, line = tbinfo[-1]
|
||||
info = ' '.join(['[%s|%s|%s]' % x for x in tbinfo])
|
||||
return (file, function, line), t, v, info
|
||||
|
||||
def close_all(map=None, ignore_all=False):
|
||||
if map is None:
|
||||
map = socket_map
|
||||
for x in list(map.values()):
|
||||
try:
|
||||
x.close()
|
||||
except OSError as x:
|
||||
if x.errno == EBADF:
|
||||
pass
|
||||
elif not ignore_all:
|
||||
raise
|
||||
except _reraised_exceptions:
|
||||
raise
|
||||
except:
|
||||
if not ignore_all:
|
||||
raise
|
||||
map.clear()
|
||||
|
||||
# Asynchronous File I/O:
|
||||
#
|
||||
# After a little research (reading man pages on various unixen, and
|
||||
# digging through the linux kernel), I've determined that select()
|
||||
# isn't meant for doing asynchronous file i/o.
|
||||
# Heartening, though - reading linux/mm/filemap.c shows that linux
|
||||
# supports asynchronous read-ahead. So _MOST_ of the time, the data
|
||||
# will be sitting in memory for us already when we go to read it.
|
||||
#
|
||||
# What other OS's (besides NT) support async file i/o? [VMS?]
|
||||
#
|
||||
# Regardless, this is useful for pipes, and stdin/stdout...
|
||||
|
||||
if os.name == 'posix':
|
||||
class file_wrapper:
|
||||
# Here we override just enough to make a file
|
||||
# look like a socket for the purposes of asyncore.
|
||||
# The passed fd is automatically os.dup()'d
|
||||
|
||||
def __init__(self, fd):
|
||||
self.fd = os.dup(fd)
|
||||
|
||||
def __del__(self):
|
||||
if self.fd >= 0:
|
||||
warnings.warn("unclosed file %r" % self, ResourceWarning,
|
||||
source=self)
|
||||
self.close()
|
||||
|
||||
def recv(self, *args):
|
||||
return os.read(self.fd, *args)
|
||||
|
||||
def send(self, *args):
|
||||
return os.write(self.fd, *args)
|
||||
|
||||
def getsockopt(self, level, optname, buflen=None):
|
||||
if (level == socket.SOL_SOCKET and
|
||||
optname == socket.SO_ERROR and
|
||||
not buflen):
|
||||
return 0
|
||||
raise NotImplementedError("Only asyncore specific behaviour "
|
||||
"implemented.")
|
||||
|
||||
read = recv
|
||||
write = send
|
||||
|
||||
def close(self):
|
||||
if self.fd < 0:
|
||||
return
|
||||
fd = self.fd
|
||||
self.fd = -1
|
||||
os.close(fd)
|
||||
|
||||
def fileno(self):
|
||||
return self.fd
|
||||
|
||||
class file_dispatcher(dispatcher):
|
||||
|
||||
def __init__(self, fd, map=None):
|
||||
dispatcher.__init__(self, None, map)
|
||||
self.connected = True
|
||||
try:
|
||||
fd = fd.fileno()
|
||||
except AttributeError:
|
||||
pass
|
||||
self.set_file(fd)
|
||||
# set it to non-blocking mode
|
||||
os.set_blocking(fd, False)
|
||||
|
||||
def set_file(self, fd):
|
||||
self.socket = file_wrapper(fd)
|
||||
self._fileno = self.socket.fileno()
|
||||
self.add_channel()
|
||||
38
fail2ban-master/fail2ban/exceptions.py
Normal file
38
fail2ban-master/fail2ban/exceptions.py
Normal file
@@ -0,0 +1,38 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
"""Fail2Ban exceptions used by both client and server
|
||||
|
||||
"""
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
__author__ = "Cyril Jaquier, Yaroslav Halchenko"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier, 2011-2012 Yaroslav Halchenko"
|
||||
__license__ = "GPL"
|
||||
|
||||
|
||||
#
|
||||
# Jails
|
||||
#
|
||||
class DuplicateJailException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class UnknownJailException(KeyError):
|
||||
pass
|
||||
|
||||
|
||||
|
||||
532
fail2ban-master/fail2ban/helpers.py
Normal file
532
fail2ban-master/fail2ban/helpers.py
Normal file
@@ -0,0 +1,532 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
__author__ = "Cyril Jaquier, Arturo 'Buanzo' Busleiman, Yaroslav Halchenko"
|
||||
__license__ = "GPL"
|
||||
|
||||
import gc
|
||||
import locale
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
from threading import Lock
|
||||
|
||||
from .server.mytime import MyTime
|
||||
import importlib
|
||||
|
||||
try:
|
||||
import ctypes
|
||||
_libcap = ctypes.CDLL('libcap.so.2')
|
||||
except:
|
||||
_libcap = None
|
||||
|
||||
|
||||
# some modules (like pyinotify, see #3487) may have dependency to asyncore, so ensure we've a path
|
||||
# to compat folder, otherwise python 3.12+ could miss them:
|
||||
def __extend_compat_path():
|
||||
cp = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'compat')
|
||||
if cp not in sys.path:
|
||||
sys.path.append(cp)
|
||||
__extend_compat_path()
|
||||
|
||||
PREFER_ENC = locale.getpreferredencoding()
|
||||
# correct preferred encoding if lang not set in environment:
|
||||
if PREFER_ENC.startswith('ANSI_'): # pragma: no cover
|
||||
if sys.stdout and sys.stdout.encoding is not None and not sys.stdout.encoding.startswith('ANSI_'):
|
||||
PREFER_ENC = sys.stdout.encoding
|
||||
elif all((os.getenv(v) in (None, "") for v in ('LANGUAGE', 'LC_ALL', 'LC_CTYPE', 'LANG'))):
|
||||
PREFER_ENC = 'UTF-8';
|
||||
|
||||
# todo: rewrite explicit (and implicit) str-conversions via encode/decode with IO-encoding (sys.stdout.encoding),
|
||||
# e. g. inside tags-replacement by command-actions, etc.
|
||||
|
||||
#
|
||||
# Following "uni_decode", "uni_string" functions unified python independent any
|
||||
# to string converting.
|
||||
#
|
||||
# Typical example resp. work-case for understanding the coding/decoding issues:
|
||||
#
|
||||
# [isinstance('', str), isinstance(b'', str), isinstance(u'', str)]
|
||||
# [True, True, False]; # -- python2
|
||||
# [True, False, True]; # -- python3
|
||||
#
|
||||
def uni_decode(x, enc=PREFER_ENC, errors='strict'):
|
||||
try:
|
||||
if isinstance(x, bytes):
|
||||
return x.decode(enc, errors)
|
||||
return x
|
||||
except (UnicodeDecodeError, UnicodeEncodeError): # pragma: no cover - unsure if reachable
|
||||
if errors != 'strict':
|
||||
raise
|
||||
return x.decode(enc, 'replace')
|
||||
def uni_string(x):
|
||||
if not isinstance(x, bytes):
|
||||
return str(x)
|
||||
return x.decode(PREFER_ENC, 'replace')
|
||||
def uni_bytes(x):
|
||||
return bytes(x, 'UTF-8')
|
||||
|
||||
def _as_bool(val):
|
||||
return bool(val) if not isinstance(val, str) \
|
||||
else val.lower() in ('1', 'on', 'true', 'yes')
|
||||
|
||||
|
||||
def formatExceptionInfo():
|
||||
""" Consistently format exception information """
|
||||
cla, exc = sys.exc_info()[:2]
|
||||
return (cla.__name__, uni_string(exc))
|
||||
|
||||
|
||||
#
|
||||
# Following "traceback" functions are adopted from PyMVPA distributed
|
||||
# under MIT/Expat and copyright by PyMVPA developers (i.e. me and
|
||||
# Michael). Hereby I re-license derivative work on these pieces under GPL
|
||||
# to stay in line with the main Fail2Ban license
|
||||
#
|
||||
def mbasename(s):
|
||||
"""Custom function to include directory name if filename is too common
|
||||
|
||||
Also strip .py at the end
|
||||
"""
|
||||
base = os.path.basename(s)
|
||||
if base.endswith('.py'):
|
||||
base = base[:-3]
|
||||
if base in set(['base', '__init__']):
|
||||
base = os.path.basename(os.path.dirname(s)) + '.' + base
|
||||
return base
|
||||
|
||||
|
||||
class TraceBack(object):
|
||||
"""Customized traceback to be included in debug messages
|
||||
"""
|
||||
|
||||
def __init__(self, compress=False):
|
||||
"""Initialize TrackBack metric
|
||||
|
||||
Parameters
|
||||
----------
|
||||
compress : bool
|
||||
if True then prefix common with previous invocation gets
|
||||
replaced with ...
|
||||
"""
|
||||
self.__prev = ""
|
||||
self.__compress = compress
|
||||
|
||||
def __call__(self):
|
||||
ftb = traceback.extract_stack(limit=100)[:-2]
|
||||
entries = [
|
||||
[mbasename(x[0]), os.path.dirname(x[0]), str(x[1])] for x in ftb]
|
||||
entries = [ [e[0], e[2]] for e in entries
|
||||
if not (e[0] in ['unittest', 'logging.__init__']
|
||||
or e[1].endswith('/unittest'))]
|
||||
|
||||
# lets make it more concise
|
||||
entries_out = [entries[0]]
|
||||
for entry in entries[1:]:
|
||||
if entry[0] == entries_out[-1][0]:
|
||||
entries_out[-1][1] += ',%s' % entry[1]
|
||||
else:
|
||||
entries_out.append(entry)
|
||||
sftb = '>'.join(['%s:%s' % (mbasename(x[0]),
|
||||
x[1]) for x in entries_out])
|
||||
if self.__compress:
|
||||
# lets remove part which is common with previous invocation
|
||||
prev_next = sftb
|
||||
common_prefix = os.path.commonprefix((self.__prev, sftb))
|
||||
common_prefix2 = re.sub('>[^>]*$', '', common_prefix)
|
||||
|
||||
if common_prefix2 != "":
|
||||
sftb = '...' + sftb[len(common_prefix2):]
|
||||
self.__prev = prev_next
|
||||
|
||||
return sftb
|
||||
|
||||
|
||||
class FormatterWithTraceBack(logging.Formatter):
|
||||
"""Custom formatter which expands %(tb) and %(tbc) with tracebacks
|
||||
|
||||
TODO: might need locking in case of compressed tracebacks
|
||||
"""
|
||||
def __init__(self, fmt, *args, **kwargs):
|
||||
logging.Formatter.__init__(self, fmt=fmt, *args, **kwargs)
|
||||
compress = '%(tbc)s' in fmt
|
||||
self._tb = TraceBack(compress=compress)
|
||||
|
||||
def format(self, record):
|
||||
record.tbc = record.tb = self._tb()
|
||||
return logging.Formatter.format(self, record)
|
||||
|
||||
|
||||
logging.exitOnIOError = False
|
||||
def __stopOnIOError(logSys=None, logHndlr=None): # pragma: no cover
|
||||
if logSys and len(logSys.handlers):
|
||||
logSys.removeHandler(logSys.handlers[0])
|
||||
if logHndlr:
|
||||
logHndlr.close = lambda: None
|
||||
logging.StreamHandler.flush = lambda self: None
|
||||
#sys.excepthook = lambda *args: None
|
||||
if logging.exitOnIOError:
|
||||
try:
|
||||
sys.stderr.close()
|
||||
except:
|
||||
pass
|
||||
sys.exit(0)
|
||||
|
||||
__origLog = logging.Logger._log
|
||||
def __safeLog(self, level, msg, args, **kwargs):
|
||||
"""Safe log inject to avoid possible errors by unsafe log-handlers,
|
||||
concat, str. conversion, representation fails, etc.
|
||||
|
||||
Used to intrude exception-safe _log-method instead of _log-method
|
||||
of Logger class to be always safe by logging and to get more-info about.
|
||||
|
||||
See testSafeLogging test-case for more information. At least the errors
|
||||
covered in phase 3 seems to affected in all known pypy/python versions
|
||||
until now.
|
||||
"""
|
||||
try:
|
||||
# if isEnabledFor(level) already called...
|
||||
__origLog(self, level, msg, args, **kwargs)
|
||||
except (BrokenPipeError, IOError) as e: # pragma: no cover
|
||||
if e.errno == 32: # closed / broken pipe
|
||||
__stopOnIOError(self)
|
||||
raise
|
||||
except Exception as e: # pragma: no cover - unreachable if log-handler safe in this python-version
|
||||
try:
|
||||
for args in (
|
||||
("logging failed: %r on %s", (e, uni_string(msg))),
|
||||
(" args: %r", ([uni_string(a) for a in args],))
|
||||
):
|
||||
try:
|
||||
__origLog(self, level, *args)
|
||||
except: # pragma: no cover
|
||||
pass
|
||||
except: # pragma: no cover
|
||||
pass
|
||||
logging.Logger._log = __safeLog
|
||||
|
||||
__origLogFlush = logging.StreamHandler.flush
|
||||
def __safeLogFlush(self):
|
||||
"""Safe flush inject stopping endless logging on closed streams (redirected pipe).
|
||||
"""
|
||||
try:
|
||||
__origLogFlush(self)
|
||||
except (BrokenPipeError, IOError) as e: # pragma: no cover
|
||||
if e.errno == 32: # closed / broken pipe
|
||||
__stopOnIOError(None, self)
|
||||
raise
|
||||
logging.StreamHandler.flush = __safeLogFlush
|
||||
|
||||
def getLogger(name):
|
||||
"""Get logging.Logger instance with Fail2Ban logger name convention
|
||||
"""
|
||||
if "." in name:
|
||||
name = "fail2ban.%s" % name.rpartition(".")[-1]
|
||||
return logging.getLogger(name)
|
||||
|
||||
def str2LogLevel(value):
|
||||
try:
|
||||
if isinstance(value, int) or value.isdigit():
|
||||
ll = int(value)
|
||||
else:
|
||||
ll = getattr(logging, value.upper())
|
||||
except AttributeError:
|
||||
raise ValueError("Invalid log level %r" % value)
|
||||
return ll
|
||||
|
||||
def getVerbosityFormat(verbosity, fmt=' %(message)s', addtime=True, padding=True):
|
||||
"""Custom log format for the verbose runs
|
||||
"""
|
||||
if verbosity > 1: # pragma: no cover
|
||||
if verbosity > 3:
|
||||
fmt = ' | %(module)15.15s-%(levelno)-2d: %(funcName)-20.20s |' + fmt
|
||||
if verbosity > 2:
|
||||
fmt = ' +%(relativeCreated)5d %(thread)X %(name)-25.25s %(levelname)-5.5s' + fmt
|
||||
else:
|
||||
fmt = ' %(thread)X %(levelname)-5.5s' + fmt
|
||||
if addtime:
|
||||
fmt = ' %(asctime)-15s' + fmt
|
||||
else: # default (not verbose):
|
||||
fmt = "%(name)-24s[%(process)d]: %(levelname)-7s" + fmt
|
||||
if addtime:
|
||||
fmt = "%(asctime)s " + fmt
|
||||
# remove padding if not needed:
|
||||
if not padding:
|
||||
fmt = re.sub(r'(?<=\))-?\d+(?:\.\d+)?s', lambda m: 's', fmt)
|
||||
return fmt
|
||||
|
||||
|
||||
def excepthook(exctype, value, traceback):
|
||||
"""Except hook used to log unhandled exceptions to Fail2Ban log
|
||||
"""
|
||||
getLogger("fail2ban").critical(
|
||||
"Unhandled exception in Fail2Ban:", exc_info=True)
|
||||
return sys.__excepthook__(exctype, value, traceback)
|
||||
|
||||
RE_REM_COMMENTS = re.compile(r'(?m)(?:^|\s)[\#;].*')
|
||||
def removeComments(s):
|
||||
"""Helper to remove comments:
|
||||
# comment ...
|
||||
; comment ...
|
||||
no comment # comment ...
|
||||
no comment ; comment ...
|
||||
"""
|
||||
return RE_REM_COMMENTS.sub('', s)
|
||||
|
||||
RE_SPLT_WORDS = re.compile(r'[\s,]+')
|
||||
def splitwords(s, ignoreComments=False):
|
||||
"""Helper to split words on any comma, space, or a new line
|
||||
|
||||
Returns empty list if input is empty (or None) and filters
|
||||
out empty entries
|
||||
"""
|
||||
if not s:
|
||||
return []
|
||||
if ignoreComments:
|
||||
s = removeComments(s)
|
||||
return list(filter(bool, [v.strip() for v in RE_SPLT_WORDS.split(s)]))
|
||||
|
||||
def _merge_dicts(x, y):
|
||||
"""Helper to merge dicts.
|
||||
"""
|
||||
if y:
|
||||
return {**x, **y}
|
||||
return x
|
||||
|
||||
def _merge_copy_dicts(x, y):
|
||||
"""Helper to merge dicts to guarantee a copy result (r is never x).
|
||||
"""
|
||||
return {**x, **y}
|
||||
|
||||
#
|
||||
# Following function used for parse options from parameter (e.g. `name[p1=0, p2="..."][p3='...']`).
|
||||
#
|
||||
|
||||
# regex, to extract list of options:
|
||||
OPTION_CRE = re.compile(r"^([^\[]+)(?:\[(.*)\])?\s*$", re.DOTALL)
|
||||
# regex, matching option name (inclusive conditional option, like n?family=inet6):
|
||||
OPTION_NAME_CRE = r'[\w\-_\.]+(?:\?[\w\-_\.]+=[\w\-_\.]+)?'
|
||||
# regex, to iterate over single option in option list, syntax:
|
||||
# `action = act[p1="...", p2='...', p3=...]`, where the p3=... not contains `,` or ']'
|
||||
# since v0.10 separator extended with `]\s*[` for support of multiple option groups, syntax
|
||||
# `action = act[p1=...][p2=...]`
|
||||
OPTION_EXTRACT_CRE = re.compile(
|
||||
r'\s*('+OPTION_NAME_CRE+r')=(?:"([^"]*)"|\'([^\']*)\'|([^,\]]*))(?:,|\]\s*\[|$|(?P<wrngA>.+))|,?\s*$|(?P<wrngB>.+)', re.DOTALL)
|
||||
# split by new-line considering possible new-lines within options [...]:
|
||||
OPTION_SPLIT_CRE = re.compile(
|
||||
r'(?:[^\[\s]+(?:\s*\[\s*(?:'+OPTION_NAME_CRE+r'=(?:"[^"]*"|\'[^\']*\'|[^,\]]*)\s*(?:,|\]\s*\[)?\s*)*\])?\s*|\S+)(?=\n\s*|\s+|$)', re.DOTALL)
|
||||
|
||||
def extractOptions(option):
|
||||
match = OPTION_CRE.match(option)
|
||||
if not match:
|
||||
raise ValueError("unexpected option syntax")
|
||||
option_name, optstr = match.groups()
|
||||
option_opts = dict()
|
||||
if optstr:
|
||||
for optmatch in OPTION_EXTRACT_CRE.finditer(optstr):
|
||||
if optmatch.group("wrngA"):
|
||||
raise ValueError("unexpected syntax at %d after option %r: %s" % (
|
||||
optmatch.start("wrngA"), optmatch.group(1), optmatch.group("wrngA")[0:25]))
|
||||
if optmatch.group("wrngB"):
|
||||
raise ValueError("expected option, wrong syntax at %d: %s" % (
|
||||
optmatch.start("wrngB"), optmatch.group("wrngB")[0:25]))
|
||||
opt = optmatch.group(1)
|
||||
if not opt: continue
|
||||
value = [
|
||||
val for val in optmatch.group(2,3,4) if val is not None][0]
|
||||
option_opts[opt.strip()] = value.strip()
|
||||
return option_name, option_opts
|
||||
|
||||
def splitWithOptions(option):
|
||||
return OPTION_SPLIT_CRE.findall(option)
|
||||
|
||||
#
|
||||
# Following facilities used for safe recursive interpolation of
|
||||
# tags (<tag>) in tagged options.
|
||||
#
|
||||
|
||||
# max tag replacement count (considering tag X in tag Y repeat):
|
||||
MAX_TAG_REPLACE_COUNT = 25
|
||||
|
||||
# compiled RE for tag name (replacement name)
|
||||
TAG_CRE = re.compile(r'<([^ <>]+)>')
|
||||
|
||||
def substituteRecursiveTags(inptags, conditional='',
|
||||
ignore=(), addrepl=None
|
||||
):
|
||||
"""Sort out tag definitions within other tags.
|
||||
Since v.0.9.2 supports embedded interpolation (see test cases for examples).
|
||||
|
||||
so: becomes:
|
||||
a = 3 a = 3
|
||||
b = <a>_3 b = 3_3
|
||||
|
||||
Parameters
|
||||
----------
|
||||
inptags : dict
|
||||
Dictionary of tags(keys) and their values.
|
||||
|
||||
Returns
|
||||
-------
|
||||
dict
|
||||
Dictionary of tags(keys) and their values, with tags
|
||||
within the values recursively replaced.
|
||||
"""
|
||||
#logSys = getLogger("fail2ban")
|
||||
tre_search = TAG_CRE.search
|
||||
tags = inptags
|
||||
# init:
|
||||
ignore = set(ignore)
|
||||
done = set()
|
||||
noRecRepl = hasattr(tags, "getRawItem")
|
||||
# repeat substitution while embedded-recursive (repFlag is True)
|
||||
repCounts = {}
|
||||
while True:
|
||||
repFlag = False
|
||||
# substitute each value:
|
||||
for tag in tags.keys():
|
||||
# ignore escaped or already done (or in ignore list):
|
||||
if tag in ignore or tag in done: continue
|
||||
# ignore replacing callable items from calling map - should be converted on demand only (by get):
|
||||
if noRecRepl and callable(tags.getRawItem(tag)): continue
|
||||
value = orgval = uni_string(tags[tag])
|
||||
# search and replace all tags within value, that can be interpolated using other tags:
|
||||
m = tre_search(value)
|
||||
rplc = repCounts.get(tag, {})
|
||||
#logSys.log(5, 'TAG: %s, value: %s' % (tag, value))
|
||||
while m:
|
||||
# found replacement tag:
|
||||
rtag = m.group(1)
|
||||
# don't replace tags that should be currently ignored (pre-replacement):
|
||||
if rtag in ignore:
|
||||
m = tre_search(value, m.end())
|
||||
continue
|
||||
#logSys.log(5, 'found: %s' % rtag)
|
||||
if rtag == tag or rplc.get(rtag, 1) > MAX_TAG_REPLACE_COUNT:
|
||||
# recursive definitions are bad
|
||||
#logSys.log(5, 'recursion fail tag: %s value: %s' % (tag, value) )
|
||||
raise ValueError(
|
||||
"properties contain self referencing definitions "
|
||||
"and cannot be resolved, fail tag: %s, found: %s in %s, value: %s" %
|
||||
(tag, rtag, rplc, value))
|
||||
repl = None
|
||||
if conditional:
|
||||
repl = tags.get(rtag + '?' + conditional)
|
||||
if repl is None:
|
||||
repl = tags.get(rtag)
|
||||
# try to find tag using additional replacement (callable):
|
||||
if repl is None and addrepl is not None:
|
||||
repl = addrepl(rtag)
|
||||
if repl is None:
|
||||
# Missing tags - just continue on searching after end of match
|
||||
# Missing tags are ok - cInfo can contain aInfo elements like <HOST> and valid shell
|
||||
# constructs like <STDIN>.
|
||||
m = tre_search(value, m.end())
|
||||
continue
|
||||
# if calling map - be sure we've string:
|
||||
if not isinstance(repl, str): repl = uni_string(repl)
|
||||
value = value.replace('<%s>' % rtag, repl)
|
||||
#logSys.log(5, 'value now: %s' % value)
|
||||
# increment reference count:
|
||||
rplc[rtag] = rplc.get(rtag, 0) + 1
|
||||
# the next match for replace:
|
||||
m = tre_search(value, m.start())
|
||||
#logSys.log(5, 'TAG: %s, newvalue: %s' % (tag, value))
|
||||
# was substituted?
|
||||
if orgval != value:
|
||||
# check still contains any tag - should be repeated (possible embedded-recursive substitution):
|
||||
if tre_search(value):
|
||||
repCounts[tag] = rplc
|
||||
repFlag = True
|
||||
# copy return tags dict to prevent modifying of inptags:
|
||||
if id(tags) == id(inptags):
|
||||
tags = inptags.copy()
|
||||
tags[tag] = value
|
||||
# no more sub tags (and no possible composite), add this tag to done set (just to be faster):
|
||||
if '<' not in value: done.add(tag)
|
||||
# stop interpolation, if no replacements anymore:
|
||||
if not repFlag:
|
||||
break
|
||||
return tags
|
||||
|
||||
|
||||
if _libcap:
|
||||
def prctl_set_th_name(name):
|
||||
"""Helper to set real thread name (used for identification and diagnostic purposes).
|
||||
|
||||
Side effect: name can be silently truncated to 15 bytes (16 bytes with NTS zero)
|
||||
"""
|
||||
try:
|
||||
name = name.encode()
|
||||
_libcap.prctl(15, name) # PR_SET_NAME = 15
|
||||
except: # pragma: no cover
|
||||
pass
|
||||
else: # pragma: no cover
|
||||
def prctl_set_th_name(name):
|
||||
pass
|
||||
|
||||
|
||||
class BgService(object):
|
||||
"""Background servicing
|
||||
|
||||
Prevents memory leak on some platforms/python versions,
|
||||
using forced GC in periodical intervals.
|
||||
"""
|
||||
|
||||
_mutex = Lock()
|
||||
_instance = None
|
||||
def __new__(cls):
|
||||
if not cls._instance:
|
||||
cls._instance = \
|
||||
super(BgService, cls).__new__(cls)
|
||||
return cls._instance
|
||||
|
||||
def __init__(self):
|
||||
self.__serviceTime = -0x7fffffff
|
||||
self.__periodTime = 30
|
||||
self.__threshold = 100;
|
||||
self.__count = self.__threshold;
|
||||
if hasattr(gc, 'set_threshold'):
|
||||
gc.set_threshold(0)
|
||||
# don't disable auto garbage, because of non-reference-counting python's (like pypy),
|
||||
# otherwise it may leak there on objects like unix-socket, etc.
|
||||
#gc.disable()
|
||||
|
||||
def service(self, force=False, wait=False):
|
||||
self.__count -= 1
|
||||
# avoid locking if next service time don't reached
|
||||
if not force and (self.__count > 0 or MyTime.time() < self.__serviceTime):
|
||||
return False
|
||||
# return immediately if mutex already locked (other thread in servicing):
|
||||
if not BgService._mutex.acquire(wait):
|
||||
return False
|
||||
try:
|
||||
# check again in lock:
|
||||
if MyTime.time() < self.__serviceTime:
|
||||
return False
|
||||
gc.collect()
|
||||
self.__serviceTime = MyTime.time() + self.__periodTime
|
||||
self.__count = self.__threshold
|
||||
return True
|
||||
finally:
|
||||
BgService._mutex.release()
|
||||
return False
|
||||
208
fail2ban-master/fail2ban/protocol.py
Normal file
208
fail2ban-master/fail2ban/protocol.py
Normal file
@@ -0,0 +1,208 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Author: Cyril Jaquier
|
||||
#
|
||||
|
||||
__author__ = "Cyril Jaquier"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
|
||||
__license__ = "GPL"
|
||||
|
||||
import textwrap
|
||||
|
||||
def output(s):
|
||||
"""Default output handler for printing protocol.
|
||||
Used to ease mocking in the test cases.
|
||||
"""
|
||||
print(s)
|
||||
|
||||
##
|
||||
# Describes the protocol used to communicate with the server.
|
||||
|
||||
class dotdict(dict):
|
||||
def __getattr__(self, name):
|
||||
return self[name]
|
||||
|
||||
CSPROTO = dotdict({
|
||||
"EMPTY": b"",
|
||||
"END": b"<F2B_END_COMMAND>",
|
||||
"CLOSE": b"<F2B_CLOSE_COMMAND>"
|
||||
})
|
||||
|
||||
protocol = [
|
||||
['', "BASIC", ""],
|
||||
["start", "starts the server and the jails"],
|
||||
["restart", "restarts the server"],
|
||||
["restart [--unban] [--if-exists] <JAIL>", "restarts the jail <JAIL> (alias for 'reload --restart ... <JAIL>')"],
|
||||
["reload [--restart] [--unban] [--all]", "reloads the configuration without restarting of the server, the option '--restart' activates completely restarting of affected jails, thereby can unban IP addresses (if option '--unban' specified)"],
|
||||
["reload [--restart] [--unban] [--if-exists] <JAIL>", "reloads the jail <JAIL>, or restarts it (if option '--restart' specified)"],
|
||||
["stop", "stops all jails and terminate the server"],
|
||||
["unban --all", "unbans all IP addresses (in all jails and database)"],
|
||||
["unban <IP> ... <IP>", "unbans <IP> (in all jails and database)"],
|
||||
["banned", "return jails with banned IPs as dictionary"],
|
||||
["banned <IP> ... <IP>]", "return list(s) of jails where given IP(s) are banned"],
|
||||
["status", "gets the current status of the server"],
|
||||
["status --all [FLAVOR]", "gets the current status of all jails, with optional output style [FLAVOR]. Flavors: 'basic' (default), 'cymru', 'short', 'stats'"],
|
||||
["stat[istic]s", "gets the current statistics of all jails as table"],
|
||||
["ping", "tests if the server is alive"],
|
||||
["echo", "for internal usage, returns back and outputs a given string"],
|
||||
["version", "return the server version"],
|
||||
['', "LOGGING", ""],
|
||||
["set loglevel <LEVEL>", "sets logging level to <LEVEL>. Levels: CRITICAL, ERROR, WARNING, NOTICE, INFO, "
|
||||
"DEBUG, TRACEDEBUG, HEAVYDEBUG or corresponding numeric value (50-5)"],
|
||||
["get loglevel", "gets the logging level"],
|
||||
["set logtarget <TARGET>", "sets logging target to <TARGET>. Can be STDOUT, STDERR, SYSLOG, SYSTEMD-JOURNAL or a file"],
|
||||
["get logtarget", "gets logging target"],
|
||||
["set syslogsocket auto|<SOCKET>", "sets the syslog socket path to auto or <SOCKET>. Only used if logtarget is SYSLOG"],
|
||||
["get syslogsocket", "gets syslog socket path"],
|
||||
["flushlogs", "flushes the logtarget if a file and reopens it. For log rotation."],
|
||||
['', "DATABASE", ""],
|
||||
["set dbfile <FILE>", "set the location of fail2ban persistent datastore. Set to \"None\" to disable"],
|
||||
["get dbfile", "get the location of fail2ban persistent datastore"],
|
||||
["set dbmaxmatches <INT>", "sets the max number of matches stored in database per ticket"],
|
||||
["get dbmaxmatches", "gets the max number of matches stored in database per ticket"],
|
||||
["set dbpurgeage <SECONDS>", "sets the max age in <SECONDS> that history of bans will be kept"],
|
||||
["get dbpurgeage", "gets the max age in seconds that history of bans will be kept"],
|
||||
['', "JAIL CONTROL", ""],
|
||||
["add <JAIL> <BACKEND>", "creates <JAIL> using <BACKEND>"],
|
||||
["start <JAIL>", "starts the jail <JAIL>"],
|
||||
["stop <JAIL>", "stops the jail <JAIL>. The jail is removed"],
|
||||
["status <JAIL> [FLAVOR]", "gets the current status of <JAIL>, with optional output style [FLAVOR]. Flavors: 'basic' (default), 'cymru', 'short', 'stats'"],
|
||||
['', "JAIL CONFIGURATION", ""],
|
||||
["set <JAIL> idle on|off", "sets the idle state of <JAIL>"],
|
||||
["set <JAIL> ignoreself true|false", "allows the ignoring of own IP addresses"],
|
||||
["set <JAIL> addignoreip <IP>", "adds <IP> to the ignore list of <JAIL>"],
|
||||
["set <JAIL> delignoreip <IP>", "removes <IP> from the ignore list of <JAIL>"],
|
||||
["set <JAIL> ignorecommand <VALUE>", "sets ignorecommand of <JAIL>"],
|
||||
["set <JAIL> ignorecache <VALUE>", "sets ignorecache of <JAIL>"],
|
||||
["set <JAIL> addlogpath <FILE> ['tail']", "adds <FILE> to the monitoring list of <JAIL>, optionally starting at the 'tail' of the file (default 'head')."],
|
||||
["set <JAIL> dellogpath <FILE>", "removes <FILE> from the monitoring list of <JAIL>"],
|
||||
["set <JAIL> logencoding <ENCODING>", "sets the <ENCODING> of the log files for <JAIL>"],
|
||||
["set <JAIL> addjournalmatch <MATCH>", "adds <MATCH> to the journal filter of <JAIL>"],
|
||||
["set <JAIL> deljournalmatch <MATCH>", "removes <MATCH> from the journal filter of <JAIL>"],
|
||||
["set <JAIL> addfailregex <REGEX>", "adds the regular expression <REGEX> which must match failures for <JAIL>"],
|
||||
["set <JAIL> delfailregex <INDEX>", "removes the regular expression at <INDEX> for failregex"],
|
||||
["set <JAIL> addignoreregex <REGEX>", "adds the regular expression <REGEX> which should match pattern to exclude for <JAIL>"],
|
||||
["set <JAIL> delignoreregex <INDEX>", "removes the regular expression at <INDEX> for ignoreregex"],
|
||||
["set <JAIL> findtime <TIME>", "sets the number of seconds <TIME> for which the filter will look back for <JAIL>"],
|
||||
["set <JAIL> bantime <TIME>", "sets the number of seconds <TIME> a host will be banned for <JAIL>"],
|
||||
["set <JAIL> datepattern <PATTERN>", "sets the <PATTERN> used to match date/times for <JAIL>"],
|
||||
["set <JAIL> usedns <VALUE>", "sets the usedns mode for <JAIL>"],
|
||||
["set <JAIL> attempt <IP> [<failure1> ... <failureN>]", "manually notify about <IP> failure"],
|
||||
["set <JAIL> banip <IP> ... <IP>", "manually Ban <IP> for <JAIL>"],
|
||||
["set <JAIL> unbanip [--report-absent] <IP> ... <IP>", "manually Unban <IP> in <JAIL>"],
|
||||
["set <JAIL> maxretry <RETRY>", "sets the number of failures <RETRY> before banning the host for <JAIL>"],
|
||||
["set <JAIL> maxmatches <INT>", "sets the max number of matches stored in memory per ticket in <JAIL>"],
|
||||
["set <JAIL> maxlines <LINES>", "sets the number of <LINES> to buffer for regex search for <JAIL>"],
|
||||
["set <JAIL> addaction <ACT>[ <PYTHONFILE> <JSONKWARGS>]", "adds a new action named <ACT> for <JAIL>. Optionally for a Python based action, a <PYTHONFILE> and <JSONKWARGS> can be specified, else will be a Command Action"],
|
||||
["set <JAIL> delaction <ACT>", "removes the action <ACT> from <JAIL>"],
|
||||
["", "COMMAND ACTION CONFIGURATION", ""],
|
||||
["set <JAIL> action <ACT> actionstart <CMD>", "sets the start command <CMD> of the action <ACT> for <JAIL>"],
|
||||
["set <JAIL> action <ACT> actionstop <CMD>", "sets the stop command <CMD> of the action <ACT> for <JAIL>"],
|
||||
["set <JAIL> action <ACT> actioncheck <CMD>", "sets the check command <CMD> of the action <ACT> for <JAIL>"],
|
||||
["set <JAIL> action <ACT> actionban <CMD>", "sets the ban command <CMD> of the action <ACT> for <JAIL>"],
|
||||
["set <JAIL> action <ACT> actionunban <CMD>", "sets the unban command <CMD> of the action <ACT> for <JAIL>"],
|
||||
["set <JAIL> action <ACT> timeout <TIMEOUT>", "sets <TIMEOUT> as the command timeout in seconds for the action <ACT> for <JAIL>"],
|
||||
["", "GENERAL ACTION CONFIGURATION", ""],
|
||||
["set <JAIL> action <ACT> <PROPERTY> <VALUE>", "sets the <VALUE> of <PROPERTY> for the action <ACT> for <JAIL>"],
|
||||
["set <JAIL> action <ACT> <METHOD>[ <JSONKWARGS>]", "calls the <METHOD> with <JSONKWARGS> for the action <ACT> for <JAIL>"],
|
||||
['', "JAIL INFORMATION", ""],
|
||||
["get <JAIL> banned", "return banned IPs of <JAIL>"],
|
||||
["get <JAIL> banned <IP> ... <IP>]", "return 1 if IP is banned in <JAIL> otherwise 0, or a list of 1/0 for multiple IPs"],
|
||||
["get <JAIL> logpath", "gets the list of the monitored files for <JAIL>"],
|
||||
["get <JAIL> logencoding", "gets the encoding of the log files for <JAIL>"],
|
||||
["get <JAIL> journalmatch", "gets the journal filter match for <JAIL>"],
|
||||
["get <JAIL> ignoreself", "gets the current value of the ignoring the own IP addresses"],
|
||||
["get <JAIL> ignoreip", "gets the list of ignored IP addresses for <JAIL>"],
|
||||
["get <JAIL> ignorecommand", "gets ignorecommand of <JAIL>"],
|
||||
["get <JAIL> failregex", "gets the list of regular expressions which matches the failures for <JAIL>"],
|
||||
["get <JAIL> ignoreregex", "gets the list of regular expressions which matches patterns to ignore for <JAIL>"],
|
||||
["get <JAIL> findtime", "gets the time for which the filter will look back for failures for <JAIL>"],
|
||||
["get <JAIL> bantime", "gets the time a host is banned for <JAIL>"],
|
||||
["get <JAIL> datepattern", "gets the pattern used to match date/times for <JAIL>"],
|
||||
["get <JAIL> usedns", "gets the usedns setting for <JAIL>"],
|
||||
["get <JAIL> banip [<SEP>|--with-time]", "gets the list of of banned IP addresses for <JAIL>. Optionally the separator character ('<SEP>', default is space) or the option '--with-time' (printing the times of ban) may be specified. The IPs are ordered by end of ban."],
|
||||
["get <JAIL> maxretry", "gets the number of failures allowed for <JAIL>"],
|
||||
["get <JAIL> maxmatches", "gets the max number of matches stored in memory per ticket in <JAIL>"],
|
||||
["get <JAIL> maxlines", "gets the number of lines to buffer for <JAIL>"],
|
||||
["get <JAIL> actions", "gets a list of actions for <JAIL>"],
|
||||
["", "COMMAND ACTION INFORMATION",""],
|
||||
["get <JAIL> action <ACT> actionstart", "gets the start command for the action <ACT> for <JAIL>"],
|
||||
["get <JAIL> action <ACT> actionstop", "gets the stop command for the action <ACT> for <JAIL>"],
|
||||
["get <JAIL> action <ACT> actioncheck", "gets the check command for the action <ACT> for <JAIL>"],
|
||||
["get <JAIL> action <ACT> actionban", "gets the ban command for the action <ACT> for <JAIL>"],
|
||||
["get <JAIL> action <ACT> actionunban", "gets the unban command for the action <ACT> for <JAIL>"],
|
||||
["get <JAIL> action <ACT> timeout", "gets the command timeout in seconds for the action <ACT> for <JAIL>"],
|
||||
["", "GENERAL ACTION INFORMATION", ""],
|
||||
["get <JAIL> actionproperties <ACT>", "gets a list of properties for the action <ACT> for <JAIL>"],
|
||||
["get <JAIL> actionmethods <ACT>", "gets a list of methods for the action <ACT> for <JAIL>"],
|
||||
["get <JAIL> action <ACT> <PROPERTY>", "gets the value of <PROPERTY> for the action <ACT> for <JAIL>"],
|
||||
]
|
||||
|
||||
|
||||
##
|
||||
# Prints the protocol in a "man" format. This is used for the
|
||||
# "-h" output of fail2ban-client.
|
||||
|
||||
def printFormatted():
|
||||
INDENT=4
|
||||
MARGIN=41
|
||||
WIDTH=34
|
||||
firstHeading = False
|
||||
for m in protocol:
|
||||
if m[0] == '' and firstHeading:
|
||||
output("")
|
||||
firstHeading = True
|
||||
first = True
|
||||
if len(m[0]) >= MARGIN:
|
||||
m[1] = ' ' * WIDTH + m[1]
|
||||
for n in textwrap.wrap(m[1], WIDTH, drop_whitespace=False):
|
||||
if first:
|
||||
line = ' ' * INDENT + m[0] + ' ' * (MARGIN - len(m[0])) + n.strip()
|
||||
first = False
|
||||
else:
|
||||
line = ' ' * (INDENT + MARGIN) + n.strip()
|
||||
output(line)
|
||||
|
||||
|
||||
##
|
||||
# Prints the protocol in a "mediawiki" format.
|
||||
|
||||
def printWiki():
|
||||
firstHeading = False
|
||||
for m in protocol:
|
||||
if m[0] == '':
|
||||
if firstHeading:
|
||||
output("|}")
|
||||
__printWikiHeader(m[1], m[2])
|
||||
firstHeading = True
|
||||
else:
|
||||
output("|-")
|
||||
output("| <span style=\"white-space:nowrap;\"><tt>" + m[0] + "</tt></span> || || " + m[1])
|
||||
output("|}")
|
||||
|
||||
|
||||
def __printWikiHeader(section, desc):
|
||||
output("")
|
||||
output("=== " + section + " ===")
|
||||
output("")
|
||||
output(desc)
|
||||
output("")
|
||||
output("{|")
|
||||
output("| '''Command''' || || '''Description'''")
|
||||
25
fail2ban-master/fail2ban/server/__init__.py
Normal file
25
fail2ban-master/fail2ban/server/__init__.py
Normal file
@@ -0,0 +1,25 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Author: Cyril Jaquier
|
||||
#
|
||||
|
||||
__author__ = "Cyril Jaquier"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
|
||||
__license__ = "GPL"
|
||||
1042
fail2ban-master/fail2ban/server/action.py
Normal file
1042
fail2ban-master/fail2ban/server/action.py
Normal file
File diff suppressed because it is too large
Load Diff
745
fail2ban-master/fail2ban/server/actions.py
Normal file
745
fail2ban-master/fail2ban/server/actions.py
Normal file
@@ -0,0 +1,745 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Author: Cyril Jaquier
|
||||
#
|
||||
|
||||
__author__ = "Cyril Jaquier"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
|
||||
__license__ = "GPL"
|
||||
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
try:
|
||||
from collections.abc import Mapping
|
||||
except ImportError:
|
||||
from collections import Mapping
|
||||
from collections import OrderedDict
|
||||
|
||||
from .banmanager import BanManager, BanTicket
|
||||
from .ipdns import IPAddr
|
||||
from .jailthread import JailThread
|
||||
from .action import ActionBase, CommandAction, CallingMap
|
||||
from .mytime import MyTime
|
||||
from .observer import Observers
|
||||
from .utils import Utils
|
||||
from ..helpers import getLogger
|
||||
|
||||
# Gets the instance of the logger.
|
||||
logSys = getLogger(__name__)
|
||||
|
||||
|
||||
class Actions(JailThread, Mapping):
|
||||
"""Handles jail actions.
|
||||
|
||||
This class handles the actions of the jail. Creation, deletion or to
|
||||
actions must be done through this class. This class is based on the
|
||||
Mapping type, and the `add` method must be used to add new actions.
|
||||
This class also starts and stops the actions, and fetches bans from
|
||||
the jail executing these bans via the actions.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
jail: Jail
|
||||
The jail of which the actions belongs to.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
daemon
|
||||
ident
|
||||
name
|
||||
status
|
||||
active : bool
|
||||
Control the state of the thread.
|
||||
idle : bool
|
||||
Control the idle state of the thread.
|
||||
sleeptime : int
|
||||
The time the thread sleeps for in the loop.
|
||||
"""
|
||||
|
||||
def __init__(self, jail):
|
||||
JailThread.__init__(self, name="f2b/a."+jail.name)
|
||||
## The jail which contains this action.
|
||||
self._jail = jail
|
||||
self._actions = OrderedDict()
|
||||
## The ban manager.
|
||||
self.banManager = BanManager()
|
||||
self.banEpoch = 0
|
||||
self.__lastConsistencyCheckTM = 0
|
||||
## Precedence of ban (over unban), so max number of tickets banned (to call an unban check):
|
||||
self.banPrecedence = 10
|
||||
## Max count of outdated tickets to unban per each __checkUnBan operation:
|
||||
self.unbanMaxCount = self.banPrecedence * 2
|
||||
|
||||
@staticmethod
|
||||
def _load_python_module(pythonModule):
|
||||
mod = Utils.load_python_module(pythonModule)
|
||||
if not hasattr(mod, "Action"): # pragma: no cover
|
||||
raise RuntimeError(
|
||||
"%s module does not have 'Action' class" % pythonModule)
|
||||
elif not issubclass(mod.Action, ActionBase): # pragma: no cover
|
||||
raise RuntimeError(
|
||||
"%s module %s does not implement required methods" % (
|
||||
pythonModule, mod.Action.__name__))
|
||||
return mod
|
||||
|
||||
|
||||
def add(self, name, pythonModule=None, initOpts=None, reload=False):
|
||||
"""Adds a new action.
|
||||
|
||||
Add a new action if not already present, defaulting to standard
|
||||
`CommandAction`, or specified Python module.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
name : str
|
||||
The name of the action.
|
||||
pythonModule : str, optional
|
||||
Path to Python file which must contain `Action` class.
|
||||
Default None, which means `CommandAction` is used.
|
||||
initOpts : dict, optional
|
||||
Options for Python Action, used as keyword arguments for
|
||||
initialisation. Default None.
|
||||
|
||||
Raises
|
||||
------
|
||||
ValueError
|
||||
If action name already exists.
|
||||
RuntimeError
|
||||
If external Python module does not have `Action` class
|
||||
or does not implement necessary methods as per `ActionBase`
|
||||
abstract class.
|
||||
"""
|
||||
# Check is action name already exists
|
||||
if name in self._actions:
|
||||
if not reload:
|
||||
raise ValueError("Action %s already exists" % name)
|
||||
# don't create new action if reload supported:
|
||||
action = self._actions[name]
|
||||
if hasattr(action, 'reload'):
|
||||
# don't execute reload right now, reload after all parameters are actualized
|
||||
if hasattr(action, 'clearAllParams'):
|
||||
action.clearAllParams()
|
||||
self._reload_actions[name] = initOpts
|
||||
return
|
||||
## Create new action:
|
||||
if pythonModule is None:
|
||||
action = CommandAction(self._jail, name)
|
||||
else:
|
||||
customActionModule = self._load_python_module(pythonModule)
|
||||
action = customActionModule.Action(self._jail, name, **initOpts)
|
||||
self._actions[name] = action
|
||||
|
||||
def reload(self, begin=True):
|
||||
""" Begin or end of reloading resp. refreshing of all parameters
|
||||
"""
|
||||
if begin:
|
||||
self._reload_actions = dict()
|
||||
else:
|
||||
if hasattr(self, '_reload_actions'):
|
||||
# reload actions after all parameters set via stream:
|
||||
for name, initOpts in self._reload_actions.items():
|
||||
if name in self._actions:
|
||||
self._actions[name].reload(**(initOpts if initOpts else {}))
|
||||
# remove obsolete actions (untouched by reload process):
|
||||
delacts = OrderedDict((name, action) for name, action in self._actions.items()
|
||||
if name not in self._reload_actions)
|
||||
if len(delacts):
|
||||
# unban all tickets using removed actions only:
|
||||
self.__flushBan(db=False, actions=delacts, stop=True)
|
||||
# stop and remove it:
|
||||
self.stopActions(actions=delacts)
|
||||
delattr(self, '_reload_actions')
|
||||
|
||||
def __getitem__(self, name):
|
||||
try:
|
||||
return self._actions[name]
|
||||
except KeyError:
|
||||
raise KeyError("Invalid Action name: %s" % name)
|
||||
|
||||
def __delitem__(self, name):
|
||||
try:
|
||||
del self._actions[name]
|
||||
except KeyError:
|
||||
raise KeyError("Invalid Action name: %s" % name)
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self._actions)
|
||||
|
||||
def __len__(self):
|
||||
return len(self._actions)
|
||||
|
||||
def __eq__(self, other): # Required for Threading
|
||||
return False
|
||||
|
||||
def __hash__(self): # Required for Threading
|
||||
return id(self)
|
||||
|
||||
##
|
||||
# Set the ban time.
|
||||
#
|
||||
# @param value the time
|
||||
|
||||
def setBanTime(self, value):
|
||||
value = MyTime.str2seconds(value)
|
||||
self.banManager.setBanTime(value)
|
||||
logSys.info(" banTime: %s" % value)
|
||||
|
||||
##
|
||||
# Get the ban time.
|
||||
#
|
||||
# @return the time
|
||||
|
||||
def getBanTime(self):
|
||||
return self.banManager.getBanTime()
|
||||
|
||||
def getBanned(self, ids):
|
||||
lst = self.banManager.getBanList()
|
||||
if not ids:
|
||||
return lst
|
||||
if len(ids) == 1:
|
||||
return 1 if ids[0] in lst else 0
|
||||
return [1 if ip in lst else 0 for ip in ids]
|
||||
|
||||
def getBanList(self, withTime=False):
|
||||
"""Returns the list of banned IP addresses.
|
||||
|
||||
Returns
|
||||
-------
|
||||
list
|
||||
The list of banned IP addresses.
|
||||
"""
|
||||
return self.banManager.getBanList(ordered=True, withTime=withTime)
|
||||
|
||||
def addBannedIP(self, ip):
|
||||
"""Ban an IP or list of IPs."""
|
||||
unixTime = MyTime.time()
|
||||
|
||||
if isinstance(ip, list):
|
||||
# Multiple IPs:
|
||||
tickets = (BanTicket(ip, unixTime) for ip in ip)
|
||||
else:
|
||||
# Single IP:
|
||||
tickets = (BanTicket(ip, unixTime),)
|
||||
|
||||
return self.__checkBan(tickets)
|
||||
|
||||
def removeBannedIP(self, ip=None, db=True, ifexists=False):
|
||||
"""Removes banned IP calling actions' unban method
|
||||
|
||||
Remove a banned IP now, rather than waiting for it to expire,
|
||||
even if set to never expire.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ip : list, str, IPAddr or None
|
||||
The IP address (or multiple IPs as list) to unban or all IPs if None
|
||||
|
||||
Raises
|
||||
------
|
||||
ValueError
|
||||
If `ip` is not banned
|
||||
"""
|
||||
# Unban all?
|
||||
if ip is None:
|
||||
return self.__flushBan(db)
|
||||
# Multiple IPs:
|
||||
if isinstance(ip, (list, tuple)):
|
||||
missed = []
|
||||
cnt = 0
|
||||
for i in ip:
|
||||
try:
|
||||
cnt += self.removeBannedIP(i, db, ifexists)
|
||||
except ValueError:
|
||||
if not ifexists:
|
||||
missed.append(i)
|
||||
if missed:
|
||||
raise ValueError("not banned: %r" % missed)
|
||||
return cnt
|
||||
# Single IP:
|
||||
# Always delete ip from database (also if currently not banned)
|
||||
if db and self._jail.database is not None:
|
||||
self._jail.database.delBan(self._jail, ip)
|
||||
# Find the ticket with the IP.
|
||||
ticket = self.banManager.getTicketByID(ip)
|
||||
if ticket is not None:
|
||||
# Unban the IP.
|
||||
self.__unBan(ticket)
|
||||
else:
|
||||
# Multiple IPs by subnet or dns:
|
||||
if not isinstance(ip, IPAddr):
|
||||
ipa = IPAddr(ip)
|
||||
if not ipa.isSingle: # subnet (mask/cidr) or raw (may be dns/hostname):
|
||||
ips = list(filter(ipa.contains, self.banManager.getBanList()))
|
||||
if ips:
|
||||
return self.removeBannedIP(ips, db, ifexists)
|
||||
# not found:
|
||||
msg = "%s is not banned" % ip
|
||||
logSys.log(logging.MSG, msg)
|
||||
if ifexists:
|
||||
return 0
|
||||
raise ValueError(msg)
|
||||
return 1
|
||||
|
||||
|
||||
def stopActions(self, actions=None):
|
||||
"""Stops the actions in reverse sequence (optionally filtered)
|
||||
"""
|
||||
if actions is None:
|
||||
actions = self._actions
|
||||
for name, action in reversed(list(actions.items())):
|
||||
try:
|
||||
action.stop()
|
||||
except Exception as e:
|
||||
logSys.error("Failed to stop jail '%s' action '%s': %s",
|
||||
self._jail.name, name, e,
|
||||
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
del self._actions[name]
|
||||
logSys.debug("%s: action %s terminated", self._jail.name, name)
|
||||
|
||||
|
||||
def run(self):
|
||||
"""Main loop for Threading.
|
||||
|
||||
This function is the main loop of the thread. It checks the jail
|
||||
queue and executes commands when an IP address is banned.
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True when the thread exits nicely.
|
||||
"""
|
||||
cnt = 0
|
||||
for name, action in self._actions.items():
|
||||
try:
|
||||
action.start()
|
||||
except Exception as e:
|
||||
logSys.error("Failed to start jail '%s' action '%s': %s",
|
||||
self._jail.name, name, e,
|
||||
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
while self.active:
|
||||
try:
|
||||
if self.idle:
|
||||
logSys.debug("Actions: enter idle mode")
|
||||
Utils.wait_for(lambda: not self.active or not self.idle,
|
||||
lambda: False, self.sleeptime)
|
||||
logSys.debug("Actions: leave idle mode")
|
||||
continue
|
||||
# wait for ban (stop if gets inactive, pending ban or unban):
|
||||
bancnt = 0
|
||||
wt = min(self.sleeptime, self.banManager._nextUnbanTime - MyTime.time())
|
||||
logSys.log(5, "Actions: wait for pending tickets %s (default %s)", wt, self.sleeptime)
|
||||
if Utils.wait_for(lambda: not self.active or self._jail.hasFailTickets, wt):
|
||||
bancnt = self.__checkBan()
|
||||
cnt += bancnt
|
||||
# unban if nothing is banned not later than banned tickets >= banPrecedence
|
||||
if not bancnt or cnt >= self.banPrecedence:
|
||||
if self.active:
|
||||
# let shrink the ban list faster
|
||||
bancnt *= 2
|
||||
logSys.log(5, "Actions: check-unban %s, bancnt %s, max: %s", bancnt if bancnt and bancnt < self.unbanMaxCount else self.unbanMaxCount, bancnt, self.unbanMaxCount)
|
||||
self.__checkUnBan(bancnt if bancnt and bancnt < self.unbanMaxCount else self.unbanMaxCount)
|
||||
cnt = 0
|
||||
except Exception as e: # pragma: no cover
|
||||
logSys.error("[%s] unhandled error in actions thread: %s",
|
||||
self._jail.name, e,
|
||||
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
|
||||
self.__flushBan(stop=True)
|
||||
self.stopActions()
|
||||
return True
|
||||
|
||||
class ActionInfo(CallingMap):
|
||||
|
||||
CM_REPR_ITEMS = ("fid", "raw-ticket")
|
||||
|
||||
AI_DICT = {
|
||||
"ip": lambda self: self.__ticket.getIP(),
|
||||
"family": lambda self: self['ip'].familyStr,
|
||||
"ip-rev": lambda self: self['ip'].getPTR(''),
|
||||
"ip-host": lambda self: self['ip'].getHost(),
|
||||
"fid": lambda self: self.__ticket.getID(),
|
||||
"failures": lambda self: self.__ticket.getAttempt(),
|
||||
"time": lambda self: self.__ticket.getTime(),
|
||||
"bantime": lambda self: self._getBanTime(),
|
||||
"bancount": lambda self: self.__ticket.getBanCount(),
|
||||
"matches": lambda self: "\n".join(self.__ticket.getMatches()),
|
||||
# to bypass actions, that should not be executed for restored tickets
|
||||
"restored": lambda self: (1 if self.__ticket.restored else 0),
|
||||
# extra-interpolation - all match-tags (captured from the filter):
|
||||
"F-*": lambda self, tag=None: self.__ticket.getData(tag),
|
||||
# merged info:
|
||||
"ipmatches": lambda self: "\n".join(self._mi4ip(True).getMatches()),
|
||||
"ipjailmatches": lambda self: "\n".join(self._mi4ip().getMatches()),
|
||||
"ipfailures": lambda self: self._mi4ip(True).getAttempt(),
|
||||
"ipjailfailures": lambda self: self._mi4ip().getAttempt(),
|
||||
# raw ticket info:
|
||||
"raw-ticket": lambda self: repr(self.__ticket),
|
||||
# jail info:
|
||||
"jail.banned": lambda self: self.__jail.actions.banManager.size(),
|
||||
"jail.banned_total": lambda self: self.__jail.actions.banManager.getBanTotal(),
|
||||
"jail.found": lambda self: self.__jail.filter.failManager.size(),
|
||||
"jail.found_total": lambda self: self.__jail.filter.failManager.getFailTotal()
|
||||
}
|
||||
|
||||
__slots__ = CallingMap.__slots__ + ('__ticket', '__jail', '__mi4ip')
|
||||
|
||||
def __init__(self, ticket, jail=None, immutable=True, data=AI_DICT):
|
||||
self.__ticket = ticket
|
||||
self.__jail = jail
|
||||
self.storage = dict()
|
||||
self.immutable = immutable
|
||||
self.data = data
|
||||
|
||||
def copy(self): # pragma: no cover
|
||||
return self.__class__(self.__ticket, self.__jail, self.immutable, self.data.copy())
|
||||
|
||||
def _getBanTime(self):
|
||||
btime = self.__ticket.getBanTime()
|
||||
if btime is None: btime = self.__jail.actions.getBanTime()
|
||||
return int(btime)
|
||||
|
||||
def _mi4ip(self, overalljails=False):
|
||||
"""Gets bans merged once, a helper for lambda(s), prevents stop of executing action by any exception inside.
|
||||
|
||||
This function never returns None for ainfo lambdas - always a ticket (merged or single one)
|
||||
and prevents any errors through merging (to guarantee ban actions will be executed).
|
||||
[TODO] move merging to observer - here we could wait for merge and read already merged info from a database
|
||||
|
||||
Parameters
|
||||
----------
|
||||
overalljails : bool
|
||||
switch to get a merged bans :
|
||||
False - (default) bans merged for current jail only
|
||||
True - bans merged for all jails of current ip address
|
||||
|
||||
Returns
|
||||
-------
|
||||
BanTicket
|
||||
merged or self ticket only
|
||||
"""
|
||||
if not hasattr(self, '__mi4ip'):
|
||||
self.__mi4ip = {}
|
||||
mi = self.__mi4ip
|
||||
idx = 'all' if overalljails else 'jail'
|
||||
if idx in mi:
|
||||
return mi[idx] if mi[idx] is not None else self.__ticket
|
||||
try:
|
||||
jail = self.__jail
|
||||
ip = self['ip']
|
||||
mi[idx] = None
|
||||
if not jail.database: # pragma: no cover
|
||||
return self.__ticket
|
||||
if overalljails:
|
||||
mi[idx] = jail.database.getBansMerged(ip=ip)
|
||||
else:
|
||||
mi[idx] = jail.database.getBansMerged(ip=ip, jail=jail)
|
||||
except Exception as e:
|
||||
logSys.error(
|
||||
"Failed to get %s bans merged, jail '%s': %s",
|
||||
idx, jail.name, e,
|
||||
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
return mi[idx] if mi[idx] is not None else self.__ticket
|
||||
|
||||
|
||||
def _getActionInfo(self, ticket):
|
||||
if not ticket:
|
||||
ticket = BanTicket("", MyTime.time())
|
||||
aInfo = Actions.ActionInfo(ticket, self._jail)
|
||||
return aInfo
|
||||
|
||||
def __getFailTickets(self, count=100):
|
||||
"""Generator to get maximal count failure tickets from fail-manager."""
|
||||
cnt = 0
|
||||
while cnt < count:
|
||||
ticket = self._jail.getFailTicket()
|
||||
if not ticket:
|
||||
break
|
||||
yield ticket
|
||||
cnt += 1
|
||||
|
||||
def __checkBan(self, tickets=None):
|
||||
"""Check for IP address to ban.
|
||||
|
||||
If tickets are not specified look in the jail queue for FailTicket. If a ticket is available,
|
||||
it executes the "ban" command and adds a ticket to the BanManager.
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if an IP address get banned.
|
||||
"""
|
||||
cnt = 0
|
||||
if not tickets:
|
||||
tickets = self.__getFailTickets(self.banPrecedence)
|
||||
rebanacts = None
|
||||
for ticket in tickets:
|
||||
|
||||
bTicket = BanTicket.wrap(ticket)
|
||||
btime = ticket.getBanTime(self.banManager.getBanTime())
|
||||
ip = bTicket.getID()
|
||||
aInfo = self._getActionInfo(bTicket)
|
||||
reason = {}
|
||||
if self.banManager.addBanTicket(bTicket, reason=reason):
|
||||
cnt += 1
|
||||
# report ticket to observer, to check time should be increased and hereafter observer writes ban to database (asynchronous)
|
||||
if Observers.Main is not None and not bTicket.restored:
|
||||
Observers.Main.add('banFound', bTicket, self._jail, btime)
|
||||
logSys.notice("[%s] %sBan %s", self._jail.name, ('' if not bTicket.restored else 'Restore '), ip)
|
||||
# do actions :
|
||||
for name, action in self._actions.items():
|
||||
try:
|
||||
if bTicket.restored and getattr(action, 'norestored', False):
|
||||
continue
|
||||
if not aInfo.immutable: aInfo.reset()
|
||||
action.ban(aInfo)
|
||||
except Exception as e:
|
||||
logSys.error(
|
||||
"Failed to execute ban jail '%s' action '%s' "
|
||||
"info '%r': %s",
|
||||
self._jail.name, name, aInfo, e,
|
||||
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
# after all actions are processed set banned flag:
|
||||
bTicket.banned = True
|
||||
if self.banEpoch: # be sure tickets always have the same ban epoch (default 0):
|
||||
bTicket.banEpoch = self.banEpoch
|
||||
else:
|
||||
if reason.get('expired', 0):
|
||||
logSys.info('[%s] Ignore %s, expired bantime', self._jail.name, ip)
|
||||
continue
|
||||
bTicket = reason.get('ticket', bTicket)
|
||||
# if already banned (otherwise still process some action)
|
||||
if bTicket.banned:
|
||||
# compare time of failure occurrence with time ticket was really banned:
|
||||
diftm = ticket.getTime() - bTicket.getTime()
|
||||
# log already banned with following level:
|
||||
# DEBUG - before 3 seconds - certain interval for it, because of possible latency by recognizing in backends, etc.
|
||||
# NOTICE - before 60 seconds - may still occur if action is slow, or very high load in backend,
|
||||
# WARNING - after 60 seconds - very long time, something may be wrong
|
||||
ll = logging.DEBUG if diftm < 3 \
|
||||
else logging.NOTICE if diftm < 60 \
|
||||
else logging.WARNING
|
||||
logSys.log(ll, "[%s] %s already banned", self._jail.name, ip)
|
||||
# if long time after ban - do consistency check (something is wrong here):
|
||||
if bTicket.banEpoch == self.banEpoch and diftm > 3:
|
||||
# avoid too often checks:
|
||||
if not rebanacts and MyTime.time() > self.__lastConsistencyCheckTM + 3:
|
||||
self.__lastConsistencyCheckTM = MyTime.time()
|
||||
for action in self._actions.values():
|
||||
if hasattr(action, 'consistencyCheck'):
|
||||
action.consistencyCheck()
|
||||
# check epoch in order to reban it:
|
||||
if bTicket.banEpoch < self.banEpoch:
|
||||
if not rebanacts: rebanacts = dict(
|
||||
(name, action) for name, action in self._actions.items()
|
||||
if action.banEpoch > bTicket.banEpoch)
|
||||
cnt += self.__reBan(bTicket, actions=rebanacts)
|
||||
else: # pragma: no cover - unexpected: ticket is not banned for some reasons - reban using all actions:
|
||||
cnt += self.__reBan(bTicket)
|
||||
# add ban to database moved to observer (should previously check not already banned
|
||||
# and increase ticket time if "bantime.increment" set)
|
||||
if cnt:
|
||||
logSys.debug("Banned %s / %s, %s ticket(s) in %r", cnt,
|
||||
self.banManager.getBanTotal(), self.banManager.size(), self._jail.name)
|
||||
return cnt
|
||||
|
||||
def __reBan(self, ticket, actions=None, log=True):
|
||||
"""Repeat bans for the ticket.
|
||||
|
||||
Executes the actions in order to reban the host given in the
|
||||
ticket.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ticket : Ticket
|
||||
Ticket to reban
|
||||
"""
|
||||
actions = actions or self._actions
|
||||
ip = ticket.getID()
|
||||
aInfo = self._getActionInfo(ticket)
|
||||
if log:
|
||||
logSys.notice("[%s] Reban %s%s", self._jail.name, ip, (', action %r' % list(actions.keys())[0] if len(actions) == 1 else ''))
|
||||
for name, action in actions.items():
|
||||
try:
|
||||
logSys.debug("[%s] action %r: reban %s", self._jail.name, name, ip)
|
||||
if not aInfo.immutable: aInfo.reset()
|
||||
action.reban(aInfo)
|
||||
except Exception as e:
|
||||
logSys.error(
|
||||
"Failed to execute reban jail '%s' action '%s' "
|
||||
"info '%r': %s",
|
||||
self._jail.name, name, aInfo, e,
|
||||
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
return 0
|
||||
# after all actions are processed set banned flag:
|
||||
ticket.banned = True
|
||||
if self.banEpoch: # be sure tickets always have the same ban epoch (default 0):
|
||||
ticket.banEpoch = self.banEpoch
|
||||
return 1
|
||||
|
||||
def _prolongBan(self, ticket):
|
||||
# prevent to prolong ticket that was removed in-between,
|
||||
# if it in ban list - ban time already prolonged (and it stays there):
|
||||
if not self.banManager._inBanList(ticket): return
|
||||
# do actions :
|
||||
aInfo = None
|
||||
for name, action in self._actions.items():
|
||||
try:
|
||||
if ticket.restored and getattr(action, 'norestored', False):
|
||||
continue
|
||||
if not action._prolongable:
|
||||
continue
|
||||
if aInfo is None:
|
||||
aInfo = self._getActionInfo(ticket)
|
||||
if not aInfo.immutable: aInfo.reset()
|
||||
action.prolong(aInfo)
|
||||
except Exception as e:
|
||||
logSys.error(
|
||||
"Failed to execute ban jail '%s' action '%s' "
|
||||
"info '%r': %s",
|
||||
self._jail.name, name, aInfo, e,
|
||||
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
|
||||
def __checkUnBan(self, maxCount=None):
|
||||
"""Check for IP address to unban.
|
||||
|
||||
Unban IP addresses which are outdated.
|
||||
"""
|
||||
lst = self.banManager.unBanList(MyTime.time(), maxCount)
|
||||
for ticket in lst:
|
||||
self.__unBan(ticket)
|
||||
cnt = len(lst)
|
||||
if cnt:
|
||||
logSys.debug("Unbanned %s, %s ticket(s) in %r",
|
||||
cnt, self.banManager.size(), self._jail.name)
|
||||
return cnt
|
||||
|
||||
def __flushBan(self, db=False, actions=None, stop=False):
|
||||
"""Flush the ban list.
|
||||
|
||||
Unban all IP address which are still in the banning list.
|
||||
|
||||
If actions specified, don't flush list - just execute unban for
|
||||
given actions (reload, obsolete resp. removed actions).
|
||||
"""
|
||||
log = "Unban" if not stop else "Repeal Ban"
|
||||
if actions is None:
|
||||
logSys.debug(" Flush ban list")
|
||||
lst = self.banManager.flushBanList()
|
||||
else:
|
||||
log = None # don't log "[jail] Unban ..." if removing actions only.
|
||||
lst = iter(self.banManager)
|
||||
cnt = 0
|
||||
# first we'll execute flush for actions supporting this operation:
|
||||
unbactions = {}
|
||||
for name, action in (actions if actions is not None else self._actions).items():
|
||||
try:
|
||||
if hasattr(action, 'flush') and (not isinstance(action, CommandAction) or action.actionflush):
|
||||
logSys.notice("[%s] Flush ticket(s) with %s", self._jail.name, name)
|
||||
if action.flush():
|
||||
continue
|
||||
except Exception as e:
|
||||
logSys.error("Failed to flush bans in jail '%s' action '%s': %s",
|
||||
self._jail.name, name, e,
|
||||
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
logSys.info("No flush occurred, do consistency check")
|
||||
if hasattr(action, 'consistencyCheck'):
|
||||
def _beforeRepair():
|
||||
if stop and not getattr(action, 'actionrepair_on_unban', None): # don't need repair on stop
|
||||
logSys.error("Invariant check failed. Flush is impossible.")
|
||||
return False
|
||||
return True
|
||||
action.consistencyCheck(_beforeRepair)
|
||||
continue
|
||||
# fallback to single unbans:
|
||||
logSys.debug(" Unban tickets each individually")
|
||||
unbactions[name] = action
|
||||
actions = unbactions
|
||||
# flush the database also:
|
||||
if db and self._jail.database is not None:
|
||||
logSys.debug(" Flush jail in database")
|
||||
self._jail.database.delBan(self._jail)
|
||||
# unban each ticket with non-flusheable actions:
|
||||
for ticket in lst:
|
||||
# unban ip:
|
||||
self.__unBan(ticket, actions=actions, log=log)
|
||||
cnt += 1
|
||||
logSys.debug(" Unbanned %s, %s ticket(s) in %r",
|
||||
cnt, self.banManager.size(), self._jail.name)
|
||||
return cnt
|
||||
|
||||
def __unBan(self, ticket, actions=None, log="Unban"):
|
||||
"""Unbans host corresponding to the ticket.
|
||||
|
||||
Executes the actions in order to unban the host given in the
|
||||
ticket.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ticket : FailTicket
|
||||
Ticket of failures of which to unban
|
||||
"""
|
||||
if actions is None:
|
||||
unbactions = self._actions
|
||||
else:
|
||||
unbactions = actions
|
||||
ip = ticket.getID()
|
||||
aInfo = self._getActionInfo(ticket)
|
||||
if log:
|
||||
logSys.notice("[%s] %s %s", self._jail.name, log, ip)
|
||||
for name, action in unbactions.items():
|
||||
try:
|
||||
logSys.debug("[%s] action %r: unban %s", self._jail.name, name, ip)
|
||||
if not aInfo.immutable: aInfo.reset()
|
||||
action.unban(aInfo)
|
||||
except Exception as e:
|
||||
logSys.error(
|
||||
"Failed to execute unban jail '%s' action '%s' "
|
||||
"info '%r': %s",
|
||||
self._jail.name, name, aInfo, e,
|
||||
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
|
||||
def status(self, flavor="basic"):
|
||||
"""Status of current and total ban counts and current banned IP list.
|
||||
"""
|
||||
# TODO: Allow this list to be printed as 'status' output
|
||||
supported_flavors = ["short", "basic", "stats", "cymru"]
|
||||
if flavor is None or flavor not in supported_flavors:
|
||||
logSys.warning("Unsupported extended jail status flavor %r. Supported: %s" % (flavor, supported_flavors))
|
||||
if flavor == "stats":
|
||||
return (self.banManager.size(), self.banManager.getBanTotal())
|
||||
# Always print this information (basic)
|
||||
if flavor != "short":
|
||||
banned = self.banManager.getBanList()
|
||||
cnt = len(banned)
|
||||
else:
|
||||
cnt = self.banManager.size()
|
||||
ret = [("Currently banned", cnt),
|
||||
("Total banned", self.banManager.getBanTotal())]
|
||||
if flavor != "short":
|
||||
ret += [("Banned IP list", banned)]
|
||||
if flavor == "cymru":
|
||||
cymru_info = self.banManager.getBanListExtendedCymruInfo()
|
||||
ret += \
|
||||
[("Banned ASN list", self.banManager.geBanListExtendedASN(cymru_info)),
|
||||
("Banned Country list", self.banManager.geBanListExtendedCountry(cymru_info)),
|
||||
("Banned RIR list", self.banManager.geBanListExtendedRIR(cymru_info))]
|
||||
return ret
|
||||
348
fail2ban-master/fail2ban/server/asyncserver.py
Normal file
348
fail2ban-master/fail2ban/server/asyncserver.py
Normal file
@@ -0,0 +1,348 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Author: Cyril Jaquier
|
||||
#
|
||||
|
||||
__author__ = "Cyril Jaquier"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
|
||||
__license__ = "GPL"
|
||||
|
||||
from pickle import dumps, loads, HIGHEST_PROTOCOL
|
||||
import errno
|
||||
import fcntl
|
||||
import os
|
||||
import socket
|
||||
import sys
|
||||
import threading
|
||||
import traceback
|
||||
|
||||
from .utils import Utils
|
||||
from ..protocol import CSPROTO
|
||||
from ..helpers import logging, getLogger, formatExceptionInfo
|
||||
|
||||
# load asyncore and asynchat after helper to ensure we've a path to compat folder:
|
||||
import asynchat
|
||||
if asynchat.asyncore:
|
||||
asyncore = asynchat.asyncore
|
||||
else: # pragma: no cover - normally unreachable
|
||||
import asyncore
|
||||
|
||||
# Gets the instance of the logger.
|
||||
logSys = getLogger(__name__)
|
||||
|
||||
|
||||
##
|
||||
# Request handler class.
|
||||
#
|
||||
# This class extends asynchat in order to provide a request handler for
|
||||
# incoming query.
|
||||
class RequestHandler(asynchat.async_chat):
|
||||
|
||||
def __init__(self, conn, transmitter):
|
||||
asynchat.async_chat.__init__(self, conn)
|
||||
self.__conn = conn
|
||||
self.__transmitter = transmitter
|
||||
self.__buffer = []
|
||||
# Sets the terminator.
|
||||
self.set_terminator(CSPROTO.END)
|
||||
|
||||
def __close(self):
|
||||
if self.__conn:
|
||||
conn = self.__conn
|
||||
self.__conn = None
|
||||
try:
|
||||
conn.shutdown(socket.SHUT_RDWR)
|
||||
conn.close()
|
||||
except socket.error: # pragma: no cover - normally unreachable
|
||||
pass
|
||||
|
||||
def handle_close(self):
|
||||
self.__close()
|
||||
asynchat.async_chat.handle_close(self)
|
||||
|
||||
def collect_incoming_data(self, data):
|
||||
#logSys.debug("Received raw data: " + str(data))
|
||||
self.__buffer.append(data)
|
||||
|
||||
# exception identifies deserialization errors (exception by load in pickle):
|
||||
class LoadError(Exception):
|
||||
pass
|
||||
|
||||
##
|
||||
# Handles a new request.
|
||||
#
|
||||
# This method is called once we have a complete request.
|
||||
|
||||
def found_terminator(self):
|
||||
try:
|
||||
# Pop whole buffer
|
||||
message = self.__buffer
|
||||
self.__buffer = []
|
||||
# Joins the buffer items.
|
||||
message = CSPROTO.EMPTY.join(message)
|
||||
# Closes the channel if close was received
|
||||
if message == CSPROTO.CLOSE:
|
||||
self.close_when_done()
|
||||
return
|
||||
# Deserialize
|
||||
try:
|
||||
message = loads(message)
|
||||
except Exception as e:
|
||||
logSys.error('PROTO-error: load message failed: %s', e,
|
||||
exc_info=logSys.getEffectiveLevel()<logging.DEBUG)
|
||||
raise RequestHandler.LoadError(e)
|
||||
# Gives the message to the transmitter.
|
||||
if self.__transmitter:
|
||||
message = self.__transmitter.proceed(message)
|
||||
else:
|
||||
message = ['SHUTDOWN']
|
||||
# Serializes the response.
|
||||
message = dumps(message, HIGHEST_PROTOCOL)
|
||||
# Sends the response to the client.
|
||||
self.push(message + CSPROTO.END)
|
||||
except Exception as e:
|
||||
if not isinstance(e, RequestHandler.LoadError): # pragma: no cover - normally unreachable
|
||||
logSys.error("Caught unhandled exception: %r", e,
|
||||
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
# Sends the response to the client.
|
||||
message = dumps("ERROR: %s" % e, HIGHEST_PROTOCOL)
|
||||
self.push(message + CSPROTO.END)
|
||||
|
||||
##
|
||||
# Handles an communication errors in request.
|
||||
#
|
||||
def handle_error(self):
|
||||
try:
|
||||
e1, e2 = formatExceptionInfo()
|
||||
logSys.error("Unexpected communication error: %s" % str(e2))
|
||||
logSys.error(traceback.format_exc().splitlines())
|
||||
# Sends the response to the client.
|
||||
message = dumps("ERROR: %s" % e2, HIGHEST_PROTOCOL)
|
||||
self.push(message + CSPROTO.END)
|
||||
except Exception as e: # pragma: no cover - normally unreachable
|
||||
pass
|
||||
self.close_when_done()
|
||||
|
||||
|
||||
def loop(active, timeout=None, use_poll=False, err_count=None):
|
||||
"""Custom event loop implementation
|
||||
|
||||
Uses poll instead of loop to respect `active` flag,
|
||||
to avoid loop timeout mistake: different in poll and poll2 (sec vs ms),
|
||||
and to prevent sporadic errors like EBADF 'Bad file descriptor' etc. (see gh-161)
|
||||
"""
|
||||
if not err_count: err_count={}
|
||||
err_count['listen'] = 0
|
||||
if timeout is None:
|
||||
timeout = Utils.DEFAULT_SLEEP_TIME
|
||||
poll = asyncore.poll
|
||||
if callable(use_poll):
|
||||
poll = use_poll
|
||||
elif use_poll and asyncore.poll2 and hasattr(asyncore.select, 'poll'): # pragma: no cover
|
||||
logSys.debug('Server listener (select) uses poll')
|
||||
# poll2 expected a timeout in milliseconds (but poll and loop in seconds):
|
||||
timeout = float(timeout) / 1000
|
||||
poll = asyncore.poll2
|
||||
# Poll as long as active:
|
||||
while active():
|
||||
try:
|
||||
poll(timeout)
|
||||
if err_count['listen']:
|
||||
err_count['listen'] -= 1
|
||||
except Exception as e:
|
||||
if not active():
|
||||
break
|
||||
err_count['listen'] += 1
|
||||
if err_count['listen'] < 20:
|
||||
# errno.ENOTCONN - 'Socket is not connected'
|
||||
# errno.EBADF - 'Bad file descriptor'
|
||||
if e.args[0] in (errno.ENOTCONN, errno.EBADF): # pragma: no cover (too sporadic)
|
||||
logSys.info('Server connection was closed: %s', str(e))
|
||||
else:
|
||||
logSys.error('Server connection was closed: %s', str(e))
|
||||
elif err_count['listen'] == 20:
|
||||
logSys.exception(e)
|
||||
logSys.error('Too many errors - stop logging connection errors')
|
||||
elif err_count['listen'] > 100: # pragma: no cover - normally unreachable
|
||||
if (
|
||||
e.args[0] == errno.EMFILE # [Errno 24] Too many open files
|
||||
or sum(err_count.values()) > 1000
|
||||
):
|
||||
logSys.critical("Too many errors - critical count reached %r", err_count)
|
||||
break
|
||||
|
||||
|
||||
##
|
||||
# Asynchronous server class.
|
||||
#
|
||||
# This class extends asyncore and dispatches connection requests to
|
||||
# RequestHandler.
|
||||
|
||||
class AsyncServer(asyncore.dispatcher):
|
||||
|
||||
def __init__(self, transmitter):
|
||||
asyncore.dispatcher.__init__(self)
|
||||
self.__transmitter = transmitter
|
||||
self.__sock = "/var/run/fail2ban/fail2ban.sock"
|
||||
self.__init = False
|
||||
self.__active = False
|
||||
self.__errCount = {'accept': 0, 'listen': 0}
|
||||
self.onstart = None
|
||||
|
||||
##
|
||||
# Returns False as we only read the socket first.
|
||||
|
||||
def writable(self):
|
||||
return False
|
||||
|
||||
def handle_accept(self):
|
||||
try:
|
||||
conn, addr = self.accept()
|
||||
except Exception as e: # pragma: no cover
|
||||
self.__errCount['accept'] += 1
|
||||
if self.__errCount['accept'] < 20:
|
||||
logSys.warning("Accept socket error: %s", e,
|
||||
exc_info=(self.__errCount['accept'] <= 1))
|
||||
elif self.__errCount['accept'] == 20:
|
||||
logSys.error("Too many acceptor errors - stop logging errors")
|
||||
elif self.__errCount['accept'] > 100:
|
||||
if (
|
||||
(isinstance(e, socket.error) and e.args[0] == errno.EMFILE) # [Errno 24] Too many open files
|
||||
or sum(self.__errCount.values()) > 1000
|
||||
):
|
||||
logSys.critical("Too many errors - critical count reached %r", self.__errCount)
|
||||
self.stop()
|
||||
return
|
||||
if self.__errCount['accept']:
|
||||
self.__errCount['accept'] -= 1;
|
||||
AsyncServer.__markCloseOnExec(conn)
|
||||
# Creates an instance of the handler class to handle the
|
||||
# request/response on the incoming connection.
|
||||
RequestHandler(conn, self.__transmitter)
|
||||
|
||||
##
|
||||
# Starts the communication server.
|
||||
#
|
||||
# @param sock: socket file.
|
||||
# @param force: remove the socket file if exists.
|
||||
|
||||
def start(self, sock, force, timeout=None, use_poll=False):
|
||||
self.__worker = threading.current_thread()
|
||||
self.__sock = sock
|
||||
# Remove socket
|
||||
if os.path.exists(sock):
|
||||
logSys.error("Fail2ban seems to be already running")
|
||||
if force:
|
||||
logSys.warning("Forcing execution of the server")
|
||||
self._remove_sock()
|
||||
else:
|
||||
raise AsyncServerException("Server already running")
|
||||
# Creates the socket.
|
||||
self.create_socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
self.set_reuse_addr()
|
||||
try:
|
||||
self.bind(sock)
|
||||
except Exception: # pragma: no cover
|
||||
raise AsyncServerException("Unable to bind socket %s" % self.__sock)
|
||||
AsyncServer.__markCloseOnExec(self.socket)
|
||||
self.listen(1)
|
||||
# Sets the init flag.
|
||||
self.__init = self.__loop = self.__active = True
|
||||
# Execute on start event (server ready):
|
||||
if self.onstart:
|
||||
self.onstart()
|
||||
# Event loop as long as active:
|
||||
loop(lambda: self.__loop, timeout=timeout, use_poll=use_poll, err_count=self.__errCount)
|
||||
self.__active = False
|
||||
# Cleanup all
|
||||
self.stop()
|
||||
|
||||
def close(self):
|
||||
stopflg = False
|
||||
if self.__active:
|
||||
self.__loop = False
|
||||
# shutdown socket here:
|
||||
if self.socket:
|
||||
try:
|
||||
self.socket.shutdown(socket.SHUT_RDWR)
|
||||
except socket.error: # pragma: no cover - normally unreachable
|
||||
pass
|
||||
# close connection:
|
||||
asyncore.dispatcher.close(self)
|
||||
# If not the loop thread (stops self in handler), wait (a little bit)
|
||||
# for the server leaves loop, before remove socket
|
||||
if threading.current_thread() != self.__worker:
|
||||
Utils.wait_for(lambda: not self.__active, 1)
|
||||
stopflg = True
|
||||
# Remove socket (file) only if it was created:
|
||||
if self.__init and os.path.exists(self.__sock):
|
||||
self._remove_sock()
|
||||
logSys.debug("Removed socket file " + self.__sock)
|
||||
if stopflg:
|
||||
logSys.debug("Socket shutdown")
|
||||
self.__active = False
|
||||
|
||||
##
|
||||
# Stops the communication server.
|
||||
|
||||
def stop_communication(self):
|
||||
if self.__transmitter:
|
||||
logSys.debug("Stop communication, shutdown")
|
||||
self.__transmitter = None
|
||||
|
||||
##
|
||||
# Stops the server.
|
||||
|
||||
def stop(self):
|
||||
self.stop_communication()
|
||||
self.close()
|
||||
|
||||
# better remains a method (not a property) since used as a callable for wait_for
|
||||
def isActive(self):
|
||||
return self.__active
|
||||
|
||||
##
|
||||
# Safe remove (in multithreaded mode):
|
||||
|
||||
def _remove_sock(self):
|
||||
try:
|
||||
os.remove(self.__sock)
|
||||
except OSError as e: # pragma: no cover
|
||||
if e.errno != errno.ENOENT:
|
||||
raise
|
||||
|
||||
##
|
||||
# Marks socket as close-on-exec to avoid leaking file descriptors when
|
||||
# running actions involving command execution.
|
||||
|
||||
# @param sock: socket file.
|
||||
|
||||
@staticmethod
|
||||
def __markCloseOnExec(sock):
|
||||
fd = sock.fileno()
|
||||
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
|
||||
fcntl.fcntl(fd, fcntl.F_SETFD, flags|fcntl.FD_CLOEXEC)
|
||||
|
||||
|
||||
##
|
||||
# AsyncServerException is used to wrap communication exceptions.
|
||||
|
||||
class AsyncServerException(Exception):
|
||||
pass
|
||||
386
fail2ban-master/fail2ban/server/banmanager.py
Normal file
386
fail2ban-master/fail2ban/server/banmanager.py
Normal file
@@ -0,0 +1,386 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Author: Cyril Jaquier
|
||||
#
|
||||
|
||||
__author__ = "Cyril Jaquier"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
|
||||
__license__ = "GPL"
|
||||
|
||||
from threading import Lock
|
||||
|
||||
from .ticket import BanTicket
|
||||
from .mytime import MyTime
|
||||
from ..helpers import getLogger, logging
|
||||
|
||||
# Gets the instance of the logger.
|
||||
logSys = getLogger(__name__)
|
||||
|
||||
|
||||
##
|
||||
# Banning Manager.
|
||||
#
|
||||
# Manage the banned IP addresses. Convert FailTicket to BanTicket.
|
||||
# This class is mainly used by the Action class.
|
||||
|
||||
class BanManager:
|
||||
|
||||
##
|
||||
# Constructor.
|
||||
#
|
||||
# Initialize members with default values.
|
||||
|
||||
def __init__(self):
|
||||
## Mutex used to protect the ban list.
|
||||
self.__lock = Lock()
|
||||
## The ban list.
|
||||
self.__banList = dict()
|
||||
## The amount of time an IP address gets banned.
|
||||
self.__banTime = 600
|
||||
## Total number of banned IP address
|
||||
self.__banTotal = 0
|
||||
## The time for next unban process (for performance and load reasons):
|
||||
self._nextUnbanTime = BanTicket.MAX_TIME
|
||||
|
||||
##
|
||||
# Set the ban time.
|
||||
#
|
||||
# Set the amount of time an IP address get banned.
|
||||
# @param value the time
|
||||
|
||||
def setBanTime(self, value):
|
||||
self.__banTime = int(value)
|
||||
|
||||
##
|
||||
# Get the ban time.
|
||||
#
|
||||
# Get the amount of time an IP address get banned.
|
||||
# @return the time
|
||||
|
||||
def getBanTime(self):
|
||||
return self.__banTime
|
||||
|
||||
##
|
||||
# Set the total number of banned address.
|
||||
#
|
||||
# @param value total number
|
||||
|
||||
def setBanTotal(self, value):
|
||||
self.__banTotal = value
|
||||
|
||||
##
|
||||
# Get the total number of banned address.
|
||||
#
|
||||
# @return the total number
|
||||
|
||||
def getBanTotal(self):
|
||||
return self.__banTotal
|
||||
|
||||
##
|
||||
# Returns a copy of the IP list.
|
||||
#
|
||||
# @return IP list
|
||||
|
||||
def getBanList(self, ordered=False, withTime=False):
|
||||
if not ordered:
|
||||
return list(self.__banList.keys())
|
||||
with self.__lock:
|
||||
lst = []
|
||||
for ticket in self.__banList.values():
|
||||
eob = ticket.getEndOfBanTime(self.__banTime)
|
||||
lst.append((ticket,eob))
|
||||
lst.sort(key=lambda t: t[1])
|
||||
t2s = MyTime.time2str
|
||||
if withTime:
|
||||
return ['%s \t%s + %d = %s' % (
|
||||
t[0].getID(),
|
||||
t2s(t[0].getTime()), t[0].getBanTime(self.__banTime), t2s(t[1])
|
||||
) for t in lst]
|
||||
return [t[0].getID() for t in lst]
|
||||
|
||||
##
|
||||
# Returns a iterator to ban list (used in reload, so idle).
|
||||
#
|
||||
# @return ban list iterator
|
||||
|
||||
def __iter__(self):
|
||||
# ensure iterator is safe - traverse over the list in snapshot created within lock (GIL):
|
||||
return iter(list(self.__banList.values()))
|
||||
|
||||
##
|
||||
# Returns normalized value
|
||||
#
|
||||
# @return value or "unknown" if value is None or empty string
|
||||
|
||||
@staticmethod
|
||||
def handleBlankResult(value):
|
||||
if value is None or len(value) == 0:
|
||||
return "unknown"
|
||||
else:
|
||||
return value
|
||||
|
||||
##
|
||||
# Returns Cymru DNS query information
|
||||
#
|
||||
# @return {"asn": [], "country": [], "rir": []} dict for self.__banList IPs
|
||||
|
||||
def getBanListExtendedCymruInfo(self, timeout=10):
|
||||
return_dict = {"asn": [], "country": [], "rir": []}
|
||||
if not hasattr(self, 'dnsResolver'):
|
||||
global dns
|
||||
try:
|
||||
import dns.exception
|
||||
import dns.resolver
|
||||
resolver = dns.resolver.Resolver()
|
||||
resolver.lifetime = timeout
|
||||
resolver.timeout = timeout / 2
|
||||
self.dnsResolver = resolver
|
||||
except ImportError as e: # pragma: no cover
|
||||
logSys.error("dnspython package is required but could not be imported")
|
||||
return_dict["error"] = repr(e)
|
||||
return_dict["asn"].append("error")
|
||||
return_dict["country"].append("error")
|
||||
return_dict["rir"].append("error")
|
||||
return return_dict
|
||||
# get ips in lock:
|
||||
with self.__lock:
|
||||
banIPs = [banData.getIP() for banData in list(self.__banList.values())]
|
||||
# get cymru info:
|
||||
try:
|
||||
for ip in banIPs:
|
||||
# Reference: https://www.team-cymru.com/IP-ASN-mapping.html#dns
|
||||
question = ip.getPTR(
|
||||
"origin.asn.cymru.com" if ip.isIPv4
|
||||
else "origin6.asn.cymru.com"
|
||||
)
|
||||
try:
|
||||
resolver = self.dnsResolver
|
||||
answers = resolver.query(question, "TXT")
|
||||
if not answers:
|
||||
raise ValueError("No data retrieved")
|
||||
asns = set()
|
||||
countries = set()
|
||||
rirs = set()
|
||||
for rdata in answers:
|
||||
asn, net, country, rir, changed =\
|
||||
[answer.strip("'\" ") for answer in rdata.to_text().split("|")]
|
||||
asn = self.handleBlankResult(asn)
|
||||
country = self.handleBlankResult(country)
|
||||
rir = self.handleBlankResult(rir)
|
||||
asns.add(self.handleBlankResult(asn))
|
||||
countries.add(self.handleBlankResult(country))
|
||||
rirs.add(self.handleBlankResult(rir))
|
||||
return_dict["asn"].append(', '.join(sorted(asns)))
|
||||
return_dict["country"].append(', '.join(sorted(countries)))
|
||||
return_dict["rir"].append(', '.join(sorted(rirs)))
|
||||
except dns.resolver.NXDOMAIN:
|
||||
return_dict["asn"].append("nxdomain")
|
||||
return_dict["country"].append("nxdomain")
|
||||
return_dict["rir"].append("nxdomain")
|
||||
except (dns.exception.DNSException, dns.resolver.NoNameservers, dns.exception.Timeout) as dnse: # pragma: no cover
|
||||
logSys.error("DNSException %r querying Cymru for %s TXT", dnse, question)
|
||||
if logSys.level <= logging.DEBUG:
|
||||
logSys.exception(dnse)
|
||||
return_dict["error"] = repr(dnse)
|
||||
break
|
||||
except Exception as e: # pragma: no cover
|
||||
logSys.error("Unhandled Exception %r querying Cymru for %s TXT", e, question)
|
||||
if logSys.level <= logging.DEBUG:
|
||||
logSys.exception(e)
|
||||
return_dict["error"] = repr(e)
|
||||
break
|
||||
except Exception as e: # pragma: no cover
|
||||
logSys.error("Failure looking up extended Cymru info: %s", e)
|
||||
if logSys.level <= logging.DEBUG:
|
||||
logSys.exception(e)
|
||||
return_dict["error"] = repr(e)
|
||||
return return_dict
|
||||
|
||||
##
|
||||
# Returns list of Banned ASNs from Cymru info
|
||||
#
|
||||
# Use getBanListExtendedCymruInfo() to provide cymru_info
|
||||
#
|
||||
# @return list of Banned ASNs
|
||||
|
||||
def geBanListExtendedASN(self, cymru_info):
|
||||
try:
|
||||
return [asn for asn in cymru_info["asn"]]
|
||||
except Exception as e:
|
||||
logSys.error("Failed to lookup ASN")
|
||||
logSys.exception(e)
|
||||
return []
|
||||
|
||||
##
|
||||
# Returns list of Banned Countries from Cymru info
|
||||
#
|
||||
# Use getBanListExtendedCymruInfo() to provide cymru_info
|
||||
#
|
||||
# @return list of Banned Countries
|
||||
|
||||
def geBanListExtendedCountry(self, cymru_info):
|
||||
try:
|
||||
return [country for country in cymru_info["country"]]
|
||||
except Exception as e:
|
||||
logSys.error("Failed to lookup Country")
|
||||
logSys.exception(e)
|
||||
return []
|
||||
|
||||
##
|
||||
# Returns list of Banned RIRs from Cymru info
|
||||
#
|
||||
# Use getBanListExtendedCymruInfo() to provide cymru_info
|
||||
#
|
||||
# @return list of Banned RIRs
|
||||
|
||||
def geBanListExtendedRIR(self, cymru_info):
|
||||
try:
|
||||
return [rir for rir in cymru_info["rir"]]
|
||||
except Exception as e:
|
||||
logSys.error("Failed to lookup RIR")
|
||||
logSys.exception(e)
|
||||
return []
|
||||
|
||||
##
|
||||
# Add a ban ticket.
|
||||
#
|
||||
# Add a BanTicket instance into the ban list.
|
||||
# @param ticket the ticket
|
||||
# @return True if the IP address is not in the ban list
|
||||
|
||||
def addBanTicket(self, ticket, reason={}):
|
||||
eob = ticket.getEndOfBanTime(self.__banTime)
|
||||
if eob < MyTime.time():
|
||||
reason['expired'] = 1
|
||||
return False
|
||||
with self.__lock:
|
||||
# check already banned
|
||||
fid = ticket.getID()
|
||||
oldticket = self.__banList.get(fid)
|
||||
if oldticket:
|
||||
reason['ticket'] = oldticket
|
||||
# if new time for end of ban is larger than already banned end-time:
|
||||
if eob > oldticket.getEndOfBanTime(self.__banTime):
|
||||
# we have longest ban - set new (increment) ban time
|
||||
reason['prolong'] = 1
|
||||
btm = ticket.getBanTime(self.__banTime)
|
||||
# if not permanent:
|
||||
if btm != -1:
|
||||
diftm = ticket.getTime() - oldticket.getTime()
|
||||
if diftm > 0:
|
||||
btm += diftm
|
||||
oldticket.setBanTime(btm)
|
||||
return False
|
||||
# not yet banned - add new one:
|
||||
self.__banList[fid] = ticket
|
||||
self.__banTotal += 1
|
||||
ticket.incrBanCount()
|
||||
# correct next unban time:
|
||||
if self._nextUnbanTime > eob:
|
||||
self._nextUnbanTime = eob
|
||||
return True
|
||||
|
||||
##
|
||||
# Get the size of the ban list.
|
||||
#
|
||||
# @return the size
|
||||
|
||||
def size(self):
|
||||
return len(self.__banList)
|
||||
|
||||
##
|
||||
# Check if a ticket is in the list.
|
||||
#
|
||||
# Check if a BanTicket with a given IP address is already in the
|
||||
# ban list.
|
||||
# @param ticket the ticket
|
||||
# @return True if a ticket already exists
|
||||
|
||||
def _inBanList(self, ticket):
|
||||
return ticket.getID() in self.__banList
|
||||
|
||||
##
|
||||
# Get the list of IP address to unban.
|
||||
#
|
||||
# Return a list of BanTicket which need to be unbanned.
|
||||
# @param time the time
|
||||
# @return the list of ticket to unban
|
||||
|
||||
def unBanList(self, time, maxCount=0x7fffffff):
|
||||
with self.__lock:
|
||||
# Check next unban time:
|
||||
nextUnbanTime = self._nextUnbanTime
|
||||
if nextUnbanTime > time:
|
||||
return list()
|
||||
|
||||
# Gets the list of ticket to remove (thereby correct next unban time).
|
||||
unBanList = {}
|
||||
nextUnbanTime = BanTicket.MAX_TIME
|
||||
for fid,ticket in self.__banList.items():
|
||||
# current time greater as end of ban - timed out:
|
||||
eob = ticket.getEndOfBanTime(self.__banTime)
|
||||
if time > eob:
|
||||
unBanList[fid] = ticket
|
||||
if len(unBanList) >= maxCount: # stop search cycle, so reset back the next check time
|
||||
nextUnbanTime = self._nextUnbanTime
|
||||
break
|
||||
elif nextUnbanTime > eob:
|
||||
nextUnbanTime = eob
|
||||
|
||||
self._nextUnbanTime = nextUnbanTime
|
||||
# Removes tickets.
|
||||
if len(unBanList):
|
||||
if len(unBanList) / 2.0 <= len(self.__banList) / 3.0:
|
||||
# few as 2/3 should be removed - remove particular items:
|
||||
for fid in unBanList.keys():
|
||||
del self.__banList[fid]
|
||||
else:
|
||||
# create new dictionary without items to be deleted:
|
||||
self.__banList = dict((fid,ticket) for fid,ticket in self.__banList.items() \
|
||||
if fid not in unBanList)
|
||||
|
||||
# return list of tickets:
|
||||
return list(unBanList.values())
|
||||
|
||||
##
|
||||
# Flush the ban list.
|
||||
#
|
||||
# Get the ban list and initialize it with an empty one.
|
||||
# @return the complete ban list
|
||||
|
||||
def flushBanList(self):
|
||||
with self.__lock:
|
||||
uBList = list(self.__banList.values())
|
||||
self.__banList = dict()
|
||||
return uBList
|
||||
|
||||
##
|
||||
# Gets the ticket for the specified ID (most of the time it is IP-address).
|
||||
#
|
||||
# @return the ticket or False.
|
||||
def getTicketByID(self, fid):
|
||||
with self.__lock:
|
||||
try:
|
||||
# Return the ticket after removing (popping)
|
||||
# if from the ban list.
|
||||
return self.__banList.pop(fid)
|
||||
except KeyError:
|
||||
pass
|
||||
return None # if none found
|
||||
890
fail2ban-master/fail2ban/server/database.py
Normal file
890
fail2ban-master/fail2ban/server/database.py
Normal file
@@ -0,0 +1,890 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
__author__ = "Steven Hiscocks"
|
||||
__copyright__ = "Copyright (c) 2013 Steven Hiscocks"
|
||||
__license__ = "GPL"
|
||||
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import sqlite3
|
||||
import sys
|
||||
import time
|
||||
from functools import wraps
|
||||
from threading import RLock
|
||||
|
||||
from .mytime import MyTime
|
||||
from .ticket import FailTicket
|
||||
from .utils import Utils
|
||||
from ..helpers import getLogger, uni_string, PREFER_ENC
|
||||
|
||||
# Gets the instance of the logger.
|
||||
logSys = getLogger(__name__)
|
||||
|
||||
|
||||
def _json_default(x):
|
||||
"""Avoid errors on types unknown in json-adapters."""
|
||||
if isinstance(x, set):
|
||||
x = list(x)
|
||||
return uni_string(x)
|
||||
|
||||
def _json_dumps_safe(x):
|
||||
try:
|
||||
x = json.dumps(x, ensure_ascii=False, default=_json_default).encode(
|
||||
PREFER_ENC, 'replace')
|
||||
except Exception as e:
|
||||
# adapter handler should be exception-safe
|
||||
logSys.error('json dumps failed: %r', e, exc_info=logSys.getEffectiveLevel() <= 4)
|
||||
x = '{}'
|
||||
return x
|
||||
|
||||
def _json_loads_safe(x):
|
||||
try:
|
||||
x = json.loads(x.decode(PREFER_ENC, 'replace'))
|
||||
except Exception as e:
|
||||
# converter handler should be exception-safe
|
||||
logSys.error('json loads failed: %r', e, exc_info=logSys.getEffectiveLevel() <= 4)
|
||||
x = {}
|
||||
return x
|
||||
|
||||
sqlite3.register_adapter(dict, _json_dumps_safe)
|
||||
sqlite3.register_converter("JSON", _json_loads_safe)
|
||||
|
||||
|
||||
def commitandrollback(f):
|
||||
@wraps(f)
|
||||
def wrapper(self, *args, **kwargs):
|
||||
with self._lock: # Threading lock
|
||||
with self._db: # Auto commit and rollback on exception
|
||||
cur = self._db.cursor()
|
||||
try:
|
||||
return f(self, cur, *args, **kwargs)
|
||||
finally:
|
||||
cur.close()
|
||||
return wrapper
|
||||
|
||||
|
||||
class Fail2BanDb(object):
|
||||
"""Fail2Ban database for storing persistent data.
|
||||
|
||||
This allows after Fail2Ban is restarted to reinstated bans and
|
||||
to continue monitoring logs from the same point.
|
||||
|
||||
This will either create a new Fail2Ban database, connect to an
|
||||
existing, and if applicable upgrade the schema in the process.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
filename : str
|
||||
File name for SQLite3 database, which will be created if
|
||||
doesn't already exist.
|
||||
purgeAge : int
|
||||
Purge age in seconds, used to remove old bans from
|
||||
database during purge.
|
||||
|
||||
Raises
|
||||
------
|
||||
sqlite3.OperationalError
|
||||
Error connecting/creating a SQLite3 database.
|
||||
RuntimeError
|
||||
If existing database fails to update to new schema.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
filename
|
||||
purgeage
|
||||
"""
|
||||
__version__ = 4
|
||||
# Note all SCRIPTS strings must end in ';' for py26 compatibility
|
||||
_CREATE_SCRIPTS = (
|
||||
('fail2banDb', "CREATE TABLE IF NOT EXISTS fail2banDb(version INTEGER);")
|
||||
,('jails', "CREATE TABLE IF NOT EXISTS jails(" \
|
||||
"name TEXT NOT NULL UNIQUE, " \
|
||||
"enabled INTEGER NOT NULL DEFAULT 1" \
|
||||
");" \
|
||||
"CREATE INDEX IF NOT EXISTS jails_name ON jails(name);")
|
||||
,('logs', "CREATE TABLE IF NOT EXISTS logs(" \
|
||||
"jail TEXT NOT NULL, " \
|
||||
"path TEXT, " \
|
||||
"firstlinemd5 TEXT, " \
|
||||
"lastfilepos INTEGER DEFAULT 0, " \
|
||||
"FOREIGN KEY(jail) REFERENCES jails(name) ON DELETE CASCADE, " \
|
||||
"UNIQUE(jail, path)," \
|
||||
"UNIQUE(jail, path, firstlinemd5)" \
|
||||
");" \
|
||||
"CREATE INDEX IF NOT EXISTS logs_path ON logs(path);" \
|
||||
"CREATE INDEX IF NOT EXISTS logs_jail_path ON logs(jail, path);")
|
||||
#TODO: systemd journal features \
|
||||
#"journalmatch TEXT, " \
|
||||
#"journlcursor TEXT, " \
|
||||
#"lastfiletime INTEGER DEFAULT 0, " # is this easily available
|
||||
,('bans', "CREATE TABLE IF NOT EXISTS bans(" \
|
||||
"jail TEXT NOT NULL, " \
|
||||
"ip TEXT, " \
|
||||
"timeofban INTEGER NOT NULL, " \
|
||||
"bantime INTEGER NOT NULL, " \
|
||||
"bancount INTEGER NOT NULL default 1, " \
|
||||
"data JSON, " \
|
||||
"FOREIGN KEY(jail) REFERENCES jails(name) " \
|
||||
");" \
|
||||
"CREATE INDEX IF NOT EXISTS bans_jail_timeofban_ip ON bans(jail, timeofban);" \
|
||||
"CREATE INDEX IF NOT EXISTS bans_jail_ip ON bans(jail, ip);" \
|
||||
"CREATE INDEX IF NOT EXISTS bans_ip ON bans(ip);")
|
||||
,('bips', "CREATE TABLE IF NOT EXISTS bips(" \
|
||||
"ip TEXT NOT NULL, " \
|
||||
"jail TEXT NOT NULL, " \
|
||||
"timeofban INTEGER NOT NULL, " \
|
||||
"bantime INTEGER NOT NULL, " \
|
||||
"bancount INTEGER NOT NULL default 1, " \
|
||||
"data JSON, " \
|
||||
"PRIMARY KEY(ip, jail), " \
|
||||
"FOREIGN KEY(jail) REFERENCES jails(name) " \
|
||||
");" \
|
||||
"CREATE INDEX IF NOT EXISTS bips_timeofban ON bips(timeofban);" \
|
||||
"CREATE INDEX IF NOT EXISTS bips_ip ON bips(ip);")
|
||||
)
|
||||
_CREATE_TABS = dict(_CREATE_SCRIPTS)
|
||||
|
||||
|
||||
def __init__(self, filename, purgeAge=24*60*60, outDatedFactor=3):
|
||||
self.maxMatches = 10
|
||||
self._lock = RLock()
|
||||
self._dbFilename = filename
|
||||
self._purgeAge = purgeAge
|
||||
self._outDatedFactor = outDatedFactor;
|
||||
self._connectDB()
|
||||
|
||||
def _connectDB(self, checkIntegrity=False):
|
||||
filename = self._dbFilename
|
||||
try:
|
||||
self._db = sqlite3.connect(
|
||||
filename, check_same_thread=False,
|
||||
detect_types=sqlite3.PARSE_DECLTYPES)
|
||||
# # to allow use multi-byte utf-8
|
||||
# self._db.text_factory = str
|
||||
|
||||
self._bansMergedCache = {}
|
||||
|
||||
logSys.info(
|
||||
"Connected to fail2ban persistent database '%s'", filename)
|
||||
except sqlite3.OperationalError as e:
|
||||
logSys.error(
|
||||
"Error connecting to fail2ban persistent database '%s': %s",
|
||||
filename, e.args[0])
|
||||
raise
|
||||
|
||||
# differentiate pypy: switch journal mode later (save it during the upgrade),
|
||||
# to prevent errors like "database table is locked":
|
||||
try:
|
||||
import __pypy__
|
||||
pypy = True
|
||||
except ImportError:
|
||||
pypy = False
|
||||
|
||||
cur = self._db.cursor()
|
||||
try:
|
||||
cur.execute("PRAGMA foreign_keys = ON")
|
||||
# speedup: write data through OS without syncing (no wait):
|
||||
cur.execute("PRAGMA synchronous = OFF")
|
||||
# speedup: transaction log in memory, alternate using OFF (disable, rollback will be impossible):
|
||||
if not pypy:
|
||||
cur.execute("PRAGMA journal_mode = MEMORY")
|
||||
# speedup: temporary tables and indices are kept in memory:
|
||||
cur.execute("PRAGMA temp_store = MEMORY")
|
||||
|
||||
cur.execute("SELECT version FROM fail2banDb LIMIT 1")
|
||||
except sqlite3.OperationalError:
|
||||
logSys.warning("New database created. Version '%r'",
|
||||
self.createDb())
|
||||
except sqlite3.Error as e:
|
||||
logSys.error(
|
||||
"Error opening fail2ban persistent database '%s': %s",
|
||||
filename, e.args[0])
|
||||
# if not a file - raise an error:
|
||||
if not os.path.isfile(filename):
|
||||
raise
|
||||
# try to repair it:
|
||||
cur.close()
|
||||
cur = None
|
||||
self.repairDB()
|
||||
else:
|
||||
version = cur.fetchone()[0]
|
||||
if version != Fail2BanDb.__version__:
|
||||
newversion = self.updateDb(version)
|
||||
if newversion == Fail2BanDb.__version__:
|
||||
logSys.warning( "Database updated from '%r' to '%r'",
|
||||
version, newversion)
|
||||
else: # pragma: no cover
|
||||
logSys.error( "Database update failed to achieve version '%r'"
|
||||
": updated from '%r' to '%r'",
|
||||
Fail2BanDb.__version__, version, newversion)
|
||||
raise RuntimeError('Failed to fully update')
|
||||
finally:
|
||||
if checkIntegrity:
|
||||
logSys.debug(" Create missing tables/indices ...")
|
||||
self._createDb(cur, incremental=True)
|
||||
logSys.debug(" -> ok")
|
||||
logSys.debug(" Check integrity ...")
|
||||
cur.execute("PRAGMA integrity_check")
|
||||
for s in cur.fetchall():
|
||||
logSys.debug(" -> %s", ' '.join(s))
|
||||
self._db.commit()
|
||||
if cur:
|
||||
# pypy: set journal mode after possible upgrade db:
|
||||
if pypy:
|
||||
cur.execute("PRAGMA journal_mode = MEMORY")
|
||||
cur.close()
|
||||
|
||||
def close(self):
|
||||
logSys.debug("Close connection to database ...")
|
||||
self._db.close()
|
||||
logSys.info("Connection to database closed.")
|
||||
|
||||
@property
|
||||
def _dbBackupFilename(self):
|
||||
try:
|
||||
return self.__dbBackupFilename
|
||||
except AttributeError:
|
||||
self.__dbBackupFilename = self._dbFilename + '.' + time.strftime('%Y%m%d-%H%M%S', MyTime.gmtime())
|
||||
return self.__dbBackupFilename
|
||||
|
||||
def repairDB(self):
|
||||
class RepairException(Exception):
|
||||
pass
|
||||
# avoid endless recursion if reconnect failed again for some reasons:
|
||||
_repairDB = self.repairDB
|
||||
self.repairDB = None
|
||||
try:
|
||||
# backup
|
||||
logSys.info("Trying to repair database %s", self._dbFilename)
|
||||
if not os.path.isfile(self._dbBackupFilename):
|
||||
shutil.move(self._dbFilename, self._dbBackupFilename)
|
||||
logSys.info(" Database backup created: %s", self._dbBackupFilename)
|
||||
elif os.path.isfile(self._dbFilename):
|
||||
os.remove(self._dbFilename)
|
||||
# first try to repair using dump/restore in order
|
||||
Utils.executeCmd((r"""f2b_db=$0; f2b_dbbk=$1; sqlite3 "$f2b_dbbk" ".dump" | sqlite3 "$f2b_db" """,
|
||||
self._dbFilename, self._dbBackupFilename))
|
||||
dbFileSize = os.stat(self._dbFilename).st_size
|
||||
if dbFileSize:
|
||||
logSys.info(" Repair seems to be successful, restored %d byte(s).", dbFileSize)
|
||||
# succeeded - try to reconnect:
|
||||
self._connectDB(checkIntegrity=True)
|
||||
else:
|
||||
logSys.info(" Repair seems to be failed, restored %d byte(s).", dbFileSize)
|
||||
raise RepairException('Recreate ...')
|
||||
except Exception as e:
|
||||
# if still failed, just recreate database as fallback:
|
||||
logSys.error(" Error repairing of fail2ban database '%s': %s",
|
||||
self._dbFilename, e.args[0],
|
||||
exc_info=(not isinstance(e, RepairException) and logSys.getEffectiveLevel() <= 10))
|
||||
os.remove(self._dbFilename)
|
||||
self._connectDB(checkIntegrity=True)
|
||||
finally:
|
||||
self.repairDB = _repairDB
|
||||
|
||||
@property
|
||||
def filename(self):
|
||||
"""File name of SQLite3 database file.
|
||||
"""
|
||||
return self._dbFilename
|
||||
|
||||
@property
|
||||
def purgeage(self):
|
||||
"""Purge age in seconds.
|
||||
"""
|
||||
return self._purgeAge
|
||||
|
||||
@purgeage.setter
|
||||
def purgeage(self, value):
|
||||
self._purgeAge = MyTime.str2seconds(value)
|
||||
|
||||
def _createDb(self, cur, incremental=False):
|
||||
"""Creates a new database, called during initialisation.
|
||||
"""
|
||||
# create all (if not exists):
|
||||
for (n, s) in Fail2BanDb._CREATE_SCRIPTS:
|
||||
cur.executescript(s)
|
||||
# save current database version (if not already set):
|
||||
cur.execute("INSERT INTO fail2banDb(version)"
|
||||
" SELECT ? WHERE NOT EXISTS (SELECT 1 FROM fail2banDb LIMIT 1)",
|
||||
(Fail2BanDb.__version__, ))
|
||||
cur.execute("SELECT version FROM fail2banDb LIMIT 1")
|
||||
return cur.fetchone()[0]
|
||||
|
||||
@commitandrollback
|
||||
def createDb(self, cur, incremental=False):
|
||||
return self._createDb(cur, incremental);
|
||||
|
||||
def _tableExists(self, cur, table):
|
||||
cur.execute("select 1 where exists ("
|
||||
"select 1 from sqlite_master WHERE type='table' AND name=?)", (table,))
|
||||
res = cur.fetchone()
|
||||
return res is not None and res[0]
|
||||
|
||||
@commitandrollback
|
||||
def updateDb(self, cur, version):
|
||||
"""Update an existing database, called during initialisation.
|
||||
|
||||
A timestamped backup is also created prior to attempting the update.
|
||||
"""
|
||||
if version > Fail2BanDb.__version__:
|
||||
raise NotImplementedError(
|
||||
"Attempt to travel to future version of database ...how did you get here??")
|
||||
try:
|
||||
logSys.info("Upgrade database: %s from version '%r'", self._dbBackupFilename, version)
|
||||
if not os.path.isfile(self._dbBackupFilename):
|
||||
shutil.copyfile(self.filename, self._dbBackupFilename)
|
||||
logSys.info(" Database backup created: %s", self._dbBackupFilename)
|
||||
|
||||
if version < 2 and self._tableExists(cur, "logs"):
|
||||
cur.executescript("BEGIN TRANSACTION;"
|
||||
"CREATE TEMPORARY TABLE logs_temp AS SELECT * FROM logs;"
|
||||
"DROP TABLE logs;"
|
||||
"%s;"
|
||||
"INSERT INTO logs SELECT * from logs_temp;"
|
||||
"DROP TABLE logs_temp;"
|
||||
"UPDATE fail2banDb SET version = 2;"
|
||||
"COMMIT;" % Fail2BanDb._CREATE_TABS['logs'])
|
||||
|
||||
if version < 3 and self._tableExists(cur, "bans"):
|
||||
# set ban-time to -2 (note it means rather unknown, as persistent, will be fixed by restore):
|
||||
cur.executescript("BEGIN TRANSACTION;"
|
||||
"CREATE TEMPORARY TABLE bans_temp AS SELECT jail, ip, timeofban, -2 as bantime, 1 as bancount, data FROM bans;"
|
||||
"DROP TABLE bans;"
|
||||
"%s;\n"
|
||||
"INSERT INTO bans SELECT * from bans_temp;"
|
||||
"DROP TABLE bans_temp;"
|
||||
"COMMIT;" % Fail2BanDb._CREATE_TABS['bans'])
|
||||
if version < 4 and not self._tableExists(cur, "bips"):
|
||||
cur.executescript("BEGIN TRANSACTION;"
|
||||
"%s;\n"
|
||||
"UPDATE fail2banDb SET version = 4;"
|
||||
"COMMIT;" % Fail2BanDb._CREATE_TABS['bips'])
|
||||
if self._tableExists(cur, "bans"):
|
||||
cur.execute(
|
||||
"INSERT OR REPLACE INTO bips(ip, jail, timeofban, bantime, bancount, data)"
|
||||
" SELECT ip, jail, timeofban, bantime, bancount, data FROM bans order by timeofban")
|
||||
|
||||
cur.execute("SELECT version FROM fail2banDb LIMIT 1")
|
||||
return cur.fetchone()[0]
|
||||
except Exception as e:
|
||||
# if still failed, just recreate database as fallback:
|
||||
logSys.error("Failed to upgrade database '%s': %s",
|
||||
self._dbFilename, e.args[0],
|
||||
exc_info=logSys.getEffectiveLevel() <= 10)
|
||||
self.repairDB()
|
||||
|
||||
@commitandrollback
|
||||
def addJail(self, cur, jail):
|
||||
"""Adds a jail to the database.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
jail : Jail
|
||||
Jail to be added to the database.
|
||||
"""
|
||||
cur.execute(
|
||||
"INSERT OR IGNORE INTO jails(name, enabled) VALUES(?, 1)",
|
||||
(jail.name,))
|
||||
if cur.rowcount <= 0:
|
||||
cur.execute(
|
||||
"UPDATE jails SET enabled = 1 WHERE name = ? AND enabled != 1",
|
||||
(jail.name,))
|
||||
|
||||
@commitandrollback
|
||||
def delJail(self, cur, jail):
|
||||
"""Deletes a jail from the database.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
jail : Jail
|
||||
Jail to be removed from the database.
|
||||
"""
|
||||
# Will be deleted by purge as appropriate
|
||||
cur.execute(
|
||||
"UPDATE jails SET enabled=0 WHERE name=?", (jail.name, ))
|
||||
|
||||
@commitandrollback
|
||||
def delAllJails(self, cur):
|
||||
"""Deletes all jails from the database.
|
||||
"""
|
||||
# Will be deleted by purge as appropriate
|
||||
cur.execute("UPDATE jails SET enabled=0")
|
||||
|
||||
@commitandrollback
|
||||
def getJailNames(self, cur, enabled=None):
|
||||
"""Get name of jails in database.
|
||||
|
||||
Currently only used for testing purposes.
|
||||
|
||||
Returns
|
||||
-------
|
||||
set
|
||||
Set of jail names.
|
||||
"""
|
||||
if enabled is None:
|
||||
cur.execute("SELECT name FROM jails")
|
||||
else:
|
||||
cur.execute("SELECT name FROM jails WHERE enabled=%s" %
|
||||
(int(enabled),))
|
||||
return set(row[0] for row in cur.fetchmany())
|
||||
|
||||
@commitandrollback
|
||||
def addLog(self, cur, jail, container):
|
||||
"""Adds a log to the database.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
jail : Jail
|
||||
Jail that log is being monitored by.
|
||||
container : FileContainer
|
||||
File container of the log file being added.
|
||||
|
||||
Returns
|
||||
-------
|
||||
int
|
||||
If log was already present in database, value of last position
|
||||
in the log file; else `None`
|
||||
"""
|
||||
return self._addLog(cur, jail, container.getFileName(), container.getPos(), container.getHash())
|
||||
|
||||
def _addLog(self, cur, jail, name, pos=0, md5=None):
|
||||
lastLinePos = None
|
||||
cur.execute(
|
||||
"SELECT firstlinemd5, lastfilepos FROM logs "
|
||||
"WHERE jail=? AND path=?",
|
||||
(jail.name, name))
|
||||
try:
|
||||
firstLineMD5, lastLinePos = cur.fetchone()
|
||||
except TypeError:
|
||||
firstLineMD5 = None
|
||||
|
||||
if firstLineMD5 is None and (pos or md5 is not None):
|
||||
cur.execute(
|
||||
"INSERT OR REPLACE INTO logs(jail, path, firstlinemd5, lastfilepos) "
|
||||
"VALUES(?, ?, ?, ?)", (jail.name, name, md5, pos))
|
||||
if md5 is not None and md5 != firstLineMD5:
|
||||
lastLinePos = None
|
||||
return lastLinePos
|
||||
|
||||
@commitandrollback
|
||||
def getLogPaths(self, cur, jail=None):
|
||||
"""Gets all the log paths from the database.
|
||||
|
||||
Currently only for testing purposes.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
jail : Jail
|
||||
If specified, will only return logs belonging to the jail.
|
||||
|
||||
Returns
|
||||
-------
|
||||
set
|
||||
Set of log paths.
|
||||
"""
|
||||
query = "SELECT path FROM logs"
|
||||
queryArgs = []
|
||||
if jail is not None:
|
||||
query += " WHERE jail=?"
|
||||
queryArgs.append(jail.name)
|
||||
cur.execute(query, queryArgs)
|
||||
return set(row[0] for row in cur.fetchmany())
|
||||
|
||||
@commitandrollback
|
||||
def updateLog(self, cur, jail, container):
|
||||
"""Updates hash and last position in log file.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
jail : Jail
|
||||
Jail of which the log file belongs to.
|
||||
container : FileContainer
|
||||
File container of the log file being updated.
|
||||
"""
|
||||
self._updateLog(cur, jail, container.getFileName(), container.getPos(), container.getHash())
|
||||
|
||||
def _updateLog(self, cur, jail, name, pos, md5):
|
||||
cur.execute(
|
||||
"UPDATE logs SET firstlinemd5=?, lastfilepos=? "
|
||||
"WHERE jail=? AND path=?", (md5, pos, jail.name, name))
|
||||
# be sure it is set (if not available):
|
||||
if not cur.rowcount:
|
||||
cur.execute(
|
||||
"INSERT OR REPLACE INTO logs(jail, path, firstlinemd5, lastfilepos) "
|
||||
"VALUES(?, ?, ?, ?)", (jail.name, name, md5, pos))
|
||||
|
||||
@commitandrollback
|
||||
def getJournalPos(self, cur, jail, name, time=0, iso=None):
|
||||
"""Get journal position from database.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
jail : Jail
|
||||
Jail of which the journal belongs to.
|
||||
name, time, iso :
|
||||
Journal name (typically systemd-journal) and last known time.
|
||||
|
||||
Returns
|
||||
-------
|
||||
int (or float)
|
||||
Last position (as time) if it was already present in database; else `None`
|
||||
"""
|
||||
return self._addLog(cur, jail, name, time, iso); # no hash, just time as iso
|
||||
|
||||
@commitandrollback
|
||||
def updateJournal(self, cur, jail, name, time, iso):
|
||||
"""Updates last position (as time) of journal.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
jail : Jail
|
||||
Jail of which the journal belongs to.
|
||||
name, time, iso :
|
||||
Journal name (typically systemd-journal) and last known time.
|
||||
"""
|
||||
self._updateLog(cur, jail, name, time, iso); # no hash, just time as iso
|
||||
|
||||
@commitandrollback
|
||||
def addBan(self, cur, jail, ticket):
|
||||
"""Add a ban to the database.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
jail : Jail
|
||||
Jail in which the ban has occurred.
|
||||
ticket : BanTicket
|
||||
Ticket of the ban to be added.
|
||||
"""
|
||||
ip = str(ticket.getID())
|
||||
try:
|
||||
del self._bansMergedCache[(ip, jail)]
|
||||
except KeyError:
|
||||
pass
|
||||
try:
|
||||
del self._bansMergedCache[(ip, None)]
|
||||
except KeyError:
|
||||
pass
|
||||
#TODO: Implement data parts once arbitrary match keys completed
|
||||
data = ticket.getData()
|
||||
matches = data.get('matches')
|
||||
if self.maxMatches:
|
||||
if matches and len(matches) > self.maxMatches:
|
||||
data = data.copy()
|
||||
data['matches'] = matches[-self.maxMatches:]
|
||||
elif matches:
|
||||
data = data.copy()
|
||||
del data['matches']
|
||||
cur.execute(
|
||||
"INSERT INTO bans(jail, ip, timeofban, bantime, bancount, data) VALUES(?, ?, ?, ?, ?, ?)",
|
||||
(jail.name, ip, int(round(ticket.getTime())), ticket.getBanTime(jail.actions.getBanTime()), ticket.getBanCount(),
|
||||
data))
|
||||
cur.execute(
|
||||
"INSERT OR REPLACE INTO bips(ip, jail, timeofban, bantime, bancount, data) VALUES(?, ?, ?, ?, ?, ?)",
|
||||
(ip, jail.name, int(round(ticket.getTime())), ticket.getBanTime(jail.actions.getBanTime()), ticket.getBanCount(),
|
||||
data))
|
||||
|
||||
@commitandrollback
|
||||
def delBan(self, cur, jail, *args):
|
||||
"""Delete a single or multiple tickets from the database.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
jail : Jail
|
||||
Jail in which the ticket(s) should be removed.
|
||||
args : list of IP
|
||||
IPs to be removed, if not given all tickets of jail will be removed.
|
||||
"""
|
||||
query1 = "DELETE FROM bips WHERE jail = ?"
|
||||
query2 = "DELETE FROM bans WHERE jail = ?"
|
||||
queryArgs = [jail.name];
|
||||
if not len(args):
|
||||
cur.execute(query1, queryArgs);
|
||||
cur.execute(query2, queryArgs);
|
||||
return
|
||||
query1 += " AND ip = ?"
|
||||
query2 += " AND ip = ?"
|
||||
queryArgs.append('');
|
||||
for ip in args:
|
||||
queryArgs[1] = str(ip);
|
||||
cur.execute(query1, queryArgs);
|
||||
cur.execute(query2, queryArgs);
|
||||
|
||||
@commitandrollback
|
||||
def _getBans(self, cur, jail=None, bantime=None, ip=None):
|
||||
query = "SELECT ip, timeofban, data FROM bans WHERE 1"
|
||||
queryArgs = []
|
||||
|
||||
if jail is not None:
|
||||
query += " AND jail=?"
|
||||
queryArgs.append(jail.name)
|
||||
if bantime is not None and bantime >= 0:
|
||||
query += " AND timeofban > ?"
|
||||
queryArgs.append(MyTime.time() - bantime)
|
||||
if ip is not None:
|
||||
query += " AND ip=?"
|
||||
queryArgs.append(str(ip))
|
||||
query += " ORDER BY ip, timeofban desc"
|
||||
|
||||
# repack iterator as long as in lock:
|
||||
return list(cur.execute(query, queryArgs))
|
||||
|
||||
def getBans(self, **kwargs):
|
||||
"""Get bans from the database.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
jail : Jail
|
||||
Jail that the ban belongs to. Default `None`; all jails.
|
||||
bantime : int
|
||||
Ban time in seconds, such that bans returned would still be
|
||||
valid now. Negative values are equivalent to `None`.
|
||||
Default `None`; no limit.
|
||||
ip : str
|
||||
IP Address to filter bans by. Default `None`; all IPs.
|
||||
|
||||
Returns
|
||||
-------
|
||||
list
|
||||
List of `Ticket`s for bans stored in database.
|
||||
"""
|
||||
tickets = []
|
||||
for ip, timeofban, data in self._getBans(**kwargs):
|
||||
#TODO: Implement data parts once arbitrary match keys completed
|
||||
tickets.append(FailTicket(ip, timeofban))
|
||||
tickets[-1].setData(data)
|
||||
return tickets
|
||||
|
||||
def getBansMerged(self, ip=None, jail=None, bantime=None):
|
||||
"""Get bans from the database, merged into single ticket.
|
||||
|
||||
This is the same as `getBans`, but bans merged into single
|
||||
ticket.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
jail : Jail
|
||||
Jail that the ban belongs to. Default `None`; all jails.
|
||||
bantime : int
|
||||
Ban time in seconds, such that bans returned would still be
|
||||
valid now. Negative values are equivalent to `None`.
|
||||
Default `None`; no limit.
|
||||
ip : str
|
||||
IP Address to filter bans by. Default `None`; all IPs.
|
||||
|
||||
Returns
|
||||
-------
|
||||
list or Ticket
|
||||
Single ticket representing bans stored in database per IP
|
||||
in a list. When `ip` argument passed, a single `Ticket` is
|
||||
returned.
|
||||
"""
|
||||
with self._lock:
|
||||
cacheKey = None
|
||||
if bantime is None or bantime < 0:
|
||||
cacheKey = (ip, jail)
|
||||
if cacheKey in self._bansMergedCache:
|
||||
return self._bansMergedCache[cacheKey]
|
||||
|
||||
tickets = []
|
||||
ticket = None
|
||||
|
||||
results = list(self._getBans(ip=ip, jail=jail, bantime=bantime))
|
||||
if results:
|
||||
prev_banip = results[0][0]
|
||||
matches = []
|
||||
failures = 0
|
||||
tickdata = {}
|
||||
for banip, timeofban, data in results:
|
||||
#TODO: Implement data parts once arbitrary match keys completed
|
||||
if banip != prev_banip:
|
||||
ticket = FailTicket(prev_banip, prev_timeofban, matches)
|
||||
ticket.setAttempt(failures)
|
||||
tickets.append(ticket)
|
||||
# Reset variables
|
||||
prev_banip = banip
|
||||
matches = []
|
||||
failures = 0
|
||||
tickdata = {}
|
||||
m = data.get('matches', [])
|
||||
# pre-insert "maxadd" entries (because tickets are ordered desc by time)
|
||||
maxadd = self.maxMatches - len(matches)
|
||||
if maxadd > 0:
|
||||
if len(m) <= maxadd:
|
||||
matches = m + matches
|
||||
else:
|
||||
matches = m[-maxadd:] + matches
|
||||
failures += data.get('failures', 1)
|
||||
data['failures'] = failures
|
||||
data['matches'] = matches
|
||||
tickdata.update(data)
|
||||
prev_timeofban = timeofban
|
||||
ticket = FailTicket(banip, prev_timeofban, data=tickdata)
|
||||
tickets.append(ticket)
|
||||
|
||||
if cacheKey:
|
||||
self._bansMergedCache[cacheKey] = tickets if ip is None else ticket
|
||||
return tickets if ip is None else ticket
|
||||
|
||||
@commitandrollback
|
||||
def getBan(self, cur, ip, jail=None, forbantime=None, overalljails=None, fromtime=None):
|
||||
ip = str(ip)
|
||||
if not overalljails:
|
||||
query = "SELECT bancount, timeofban, bantime FROM bips"
|
||||
else:
|
||||
query = "SELECT sum(bancount), max(timeofban), sum(bantime) FROM bips"
|
||||
query += " WHERE ip = ?"
|
||||
queryArgs = [ip]
|
||||
if not overalljails and jail is not None:
|
||||
query += " AND jail=?"
|
||||
queryArgs.append(jail.name)
|
||||
if forbantime is not None:
|
||||
query += " AND timeofban > ?"
|
||||
queryArgs.append(MyTime.time() - forbantime)
|
||||
if fromtime is not None:
|
||||
query += " AND timeofban > ?"
|
||||
queryArgs.append(fromtime)
|
||||
if overalljails or jail is None:
|
||||
query += " GROUP BY ip ORDER BY timeofban DESC LIMIT 1"
|
||||
# repack iterator as long as in lock:
|
||||
return list(cur.execute(query, queryArgs))
|
||||
|
||||
def _getCurrentBans(self, cur, jail = None, ip = None, forbantime=None, fromtime=None):
|
||||
queryArgs = []
|
||||
if jail is not None:
|
||||
query = "SELECT ip, timeofban, bantime, bancount, data FROM bips WHERE jail=?"
|
||||
queryArgs.append(jail.name)
|
||||
else:
|
||||
query = "SELECT ip, max(timeofban), bantime, bancount, data FROM bips WHERE 1"
|
||||
if ip is not None:
|
||||
query += " AND ip=?"
|
||||
queryArgs.append(ip)
|
||||
query += " AND (timeofban + bantime > ? OR bantime <= -1)"
|
||||
queryArgs.append(fromtime)
|
||||
if forbantime not in (None, -1): # not specified or persistent (all)
|
||||
query += " AND timeofban > ?"
|
||||
queryArgs.append(fromtime - forbantime)
|
||||
if ip is None:
|
||||
query += " GROUP BY ip ORDER BY ip, timeofban DESC"
|
||||
else:
|
||||
query += " ORDER BY timeofban DESC LIMIT 1"
|
||||
return cur.execute(query, queryArgs)
|
||||
|
||||
def getCurrentBans(self, jail=None, ip=None, forbantime=None, fromtime=None,
|
||||
correctBanTime=True, maxmatches=None
|
||||
):
|
||||
"""Reads tickets (with merged info) currently affected from ban from the database.
|
||||
|
||||
There are all the tickets corresponding parameters jail/ip, forbantime,
|
||||
fromtime (normally now).
|
||||
|
||||
If correctBanTime specified (default True) it will fix the restored ban-time
|
||||
(and therefore endOfBan) of the ticket (normally it is ban-time of jail as maximum)
|
||||
for all tickets with ban-time greater (or persistent).
|
||||
"""
|
||||
cur = self._db.cursor()
|
||||
try:
|
||||
if fromtime is None:
|
||||
fromtime = MyTime.time()
|
||||
tickets = []
|
||||
ticket = None
|
||||
if correctBanTime is True:
|
||||
correctBanTime = jail.getMaxBanTime() if jail is not None else None
|
||||
# don't change if persistent allowed:
|
||||
if correctBanTime == -1: correctBanTime = None
|
||||
|
||||
with self._lock:
|
||||
bans = self._getCurrentBans(cur, jail=jail, ip=ip,
|
||||
forbantime=forbantime, fromtime=fromtime
|
||||
)
|
||||
for ticket in bans:
|
||||
# can produce unpack error (database may return sporadical wrong-empty row):
|
||||
try:
|
||||
banip, timeofban, bantime, bancount, data = ticket
|
||||
# additionally check for empty values:
|
||||
if banip is None or banip == "": # pragma: no cover
|
||||
raise ValueError('unexpected value %r' % (banip,))
|
||||
# if bantime unknown (after upgrade-db from earlier version), just use min known ban-time:
|
||||
if bantime == -2: # todo: remove it in future version
|
||||
bantime = jail.actions.getBanTime() if jail is not None else (
|
||||
correctBanTime if correctBanTime else 600)
|
||||
elif correctBanTime and correctBanTime >= 0:
|
||||
# if persistent ban (or greater as max), use current max-bantime of the jail:
|
||||
if bantime == -1 or bantime > correctBanTime:
|
||||
bantime = correctBanTime
|
||||
# after correction check the end of ban again:
|
||||
if bantime != -1 and timeofban + bantime <= fromtime:
|
||||
# not persistent and too old - ignore it:
|
||||
logSys.debug("ignore ticket (with new max ban-time %r): too old %r <= %r, ticket: %r",
|
||||
bantime, timeofban + bantime, fromtime, ticket)
|
||||
continue
|
||||
except ValueError as e: # pragma: no cover
|
||||
logSys.debug("get current bans: ignore row %r - %s", ticket, e)
|
||||
continue
|
||||
# logSys.debug('restore ticket %r, %r, %r', banip, timeofban, data)
|
||||
ticket = FailTicket(banip, timeofban, data=data)
|
||||
# filter matches if expected (current count > as maxmatches specified):
|
||||
if maxmatches is None:
|
||||
maxmatches = self.maxMatches
|
||||
if maxmatches:
|
||||
matches = ticket.getMatches()
|
||||
if matches and len(matches) > maxmatches:
|
||||
ticket.setMatches(matches[-maxmatches:])
|
||||
else:
|
||||
ticket.setMatches(None)
|
||||
# logSys.debug('restored ticket: %r', ticket)
|
||||
ticket.setBanTime(bantime)
|
||||
ticket.setBanCount(bancount)
|
||||
if ip is not None: return ticket
|
||||
tickets.append(ticket)
|
||||
finally:
|
||||
cur.close()
|
||||
|
||||
return tickets
|
||||
|
||||
def _cleanjails(self, cur):
|
||||
"""Remove empty jails jails and log files from database.
|
||||
"""
|
||||
cur.execute(
|
||||
"DELETE FROM jails WHERE enabled = 0 "
|
||||
"AND NOT EXISTS(SELECT * FROM bans WHERE jail = jails.name) "
|
||||
"AND NOT EXISTS(SELECT * FROM bips WHERE jail = jails.name)")
|
||||
|
||||
def _purge_bips(self, cur):
|
||||
"""Purge old bad ips (jails and log files from database).
|
||||
Currently it is timed out IP, whose time since last ban is several times out-dated (outDatedFactor is default 3).
|
||||
Permanent banned ips will be never removed.
|
||||
"""
|
||||
cur.execute(
|
||||
"DELETE FROM bips WHERE timeofban < ? and bantime != -1 and (timeofban + (bantime * ?)) < ?",
|
||||
(int(MyTime.time()) - self._purgeAge, self._outDatedFactor, int(MyTime.time()) - self._purgeAge))
|
||||
|
||||
@commitandrollback
|
||||
def purge(self, cur):
|
||||
"""Purge old bans, jails and log files from database.
|
||||
"""
|
||||
self._bansMergedCache = {}
|
||||
cur.execute(
|
||||
"DELETE FROM bans WHERE timeofban < ?",
|
||||
(MyTime.time() - self._purgeAge, ))
|
||||
self._purge_bips(cur)
|
||||
self._cleanjails(cur)
|
||||
|
||||
556
fail2ban-master/fail2ban/server/datedetector.py
Normal file
556
fail2ban-master/fail2ban/server/datedetector.py
Normal file
@@ -0,0 +1,556 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
__author__ = "Cyril Jaquier and Fail2Ban Contributors"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
|
||||
__license__ = "GPL"
|
||||
|
||||
import copy
|
||||
import time
|
||||
|
||||
from threading import Lock
|
||||
|
||||
from .datetemplate import re, DateTemplate, DatePatternRegex, DateTai64n, DateEpoch, \
|
||||
RE_EPOCH_PATTERN
|
||||
from .strptime import validateTimeZone
|
||||
from .utils import Utils
|
||||
from ..helpers import getLogger
|
||||
|
||||
# Gets the instance of the logger.
|
||||
logSys = getLogger(__name__)
|
||||
|
||||
logLevel = 5
|
||||
|
||||
RE_DATE_PREMATCH = re.compile(r"(?<!\\)\{DATE\}", re.IGNORECASE)
|
||||
DD_patternCache = Utils.Cache(maxCount=1000, maxTime=60*60)
|
||||
|
||||
|
||||
def _getPatternTemplate(pattern, key=None):
|
||||
if key is None:
|
||||
key = pattern
|
||||
if '%' not in pattern:
|
||||
key = pattern.upper()
|
||||
template = DD_patternCache.get(key)
|
||||
|
||||
if not template:
|
||||
if "EPOCH" in key:
|
||||
if RE_EPOCH_PATTERN.search(pattern):
|
||||
template = DateEpoch(pattern=pattern, longFrm="LEPOCH" in key)
|
||||
elif key in ("EPOCH", "{^LN-BEG}EPOCH", "^EPOCH"):
|
||||
template = DateEpoch(lineBeginOnly=(key != "EPOCH"))
|
||||
elif key in ("LEPOCH", "{^LN-BEG}LEPOCH", "^LEPOCH"):
|
||||
template = DateEpoch(lineBeginOnly=(key != "LEPOCH"), longFrm=True)
|
||||
if template is None:
|
||||
if key in ("TAI64N", "{^LN-BEG}TAI64N", "^TAI64N"):
|
||||
template = DateTai64n(wordBegin=('start' if key != "TAI64N" else False))
|
||||
else:
|
||||
template = DatePatternRegex(pattern)
|
||||
|
||||
DD_patternCache.set(key, template)
|
||||
return template
|
||||
|
||||
def _getAnchoredTemplate(template, wrap=lambda s: '{^LN-BEG}' + s):
|
||||
# wrap name:
|
||||
name = wrap(template.name)
|
||||
# try to find in cache (by name):
|
||||
template2 = DD_patternCache.get(name)
|
||||
if not template2:
|
||||
# wrap pattern (or regexp if not pattern template):
|
||||
regex = wrap(getattr(template, 'pattern', template.regex))
|
||||
if hasattr(template, 'pattern'):
|
||||
# try to find in cache (by pattern):
|
||||
template2 = DD_patternCache.get(regex)
|
||||
# make duplicate and set new anchored regex:
|
||||
if not template2:
|
||||
if not hasattr(template, 'pattern'):
|
||||
template2 = _getPatternTemplate(name)
|
||||
else:
|
||||
template2 = _getPatternTemplate(regex)
|
||||
return template2
|
||||
|
||||
|
||||
|
||||
class DateDetectorCache(object):
|
||||
"""Implements the caching of the default templates list.
|
||||
"""
|
||||
def __init__(self):
|
||||
self.__lock = Lock()
|
||||
self.__templates = list()
|
||||
|
||||
@property
|
||||
def templates(self):
|
||||
"""List of template instances managed by the detector.
|
||||
"""
|
||||
if self.__templates:
|
||||
return self.__templates
|
||||
with self.__lock:
|
||||
if self.__templates: # pragma: no cover - race-condition + multi-threaded environment only
|
||||
return self.__templates
|
||||
self._addDefaultTemplate()
|
||||
return self.__templates
|
||||
|
||||
def _cacheTemplate(self, template):
|
||||
"""Cache Fail2Ban's default template.
|
||||
|
||||
"""
|
||||
# if not already line-begin anchored, additional template, that prefers datetime
|
||||
# at start of a line (safety+performance feature):
|
||||
name = template.name
|
||||
if not name.startswith('{^LN-BEG}') and not name.startswith('^') and hasattr(template, 'regex'):
|
||||
template2 = _getAnchoredTemplate(template)
|
||||
# prevent to add duplicates:
|
||||
if template2.name != name:
|
||||
# increase weight of such templates, because they should be always
|
||||
# preferred in template sorting process (bubble up):
|
||||
template2.weight = 100.0
|
||||
self.__tmpcache[0].append(template2)
|
||||
# add template:
|
||||
self.__tmpcache[1].append(template)
|
||||
|
||||
DEFAULT_TEMPLATES = [
|
||||
# ISO 8601, simple date, optional subsecond and timezone:
|
||||
# 2005-01-23T21:59:59.981746, 2005-01-23 21:59:59, 2005-01-23 8:59:59
|
||||
# simple date: 2005/01/23 21:59:59
|
||||
# custom for syslog-ng 2006.12.21 06:43:20
|
||||
r"%ExY(?P<_sep>[-/.])%m(?P=_sep)%d(?:T| ?)%H:%M:%S(?:[.,]%f)?(?:\s*%z)?",
|
||||
# asctime with optional day, subsecond and/or year:
|
||||
# Sun Jan 23 21:59:59.011 2005
|
||||
r"(?:%a )?%b %d %k:%M:%S(?:\.%f)?(?: %ExY)?",
|
||||
# asctime with optional day, subsecond and/or year coming after day
|
||||
# http://bugs.debian.org/798923
|
||||
# Sun Jan 23 2005 21:59:59.011
|
||||
r"(?:%a )?%b %d %ExY %k:%M:%S(?:\.%f)?",
|
||||
# simple date too (from x11vnc): 23/01/2005 21:59:59
|
||||
# and with optional year given by 2 digits: 23/01/05 21:59:59
|
||||
# (See http://bugs.debian.org/537610)
|
||||
# 17-07-2008 17:23:25
|
||||
r"%d(?P<_sep>[-/])%m(?P=_sep)(?:%ExY|%Exy) %k:%M:%S",
|
||||
# Apache format optional time zone:
|
||||
# [31/Oct/2006:09:22:55 -0000]
|
||||
# 26-Jul-2007 15:20:52
|
||||
# named 26-Jul-2007 15:20:52.252
|
||||
# roundcube 26-Jul-2007 15:20:52 +0200
|
||||
r"%d(?P<_sep>[-/])%b(?P=_sep)%ExY[ :]?%H:%M:%S(?:\.%f)?(?: %z)?",
|
||||
# CPanel 05/20/2008:01:57:39
|
||||
r"%m/%d/%ExY:%H:%M:%S",
|
||||
# 01-27-2012 16:22:44.252
|
||||
# subseconds explicit to avoid possible %m<->%d confusion
|
||||
# with previous ("%d-%m-%ExY %k:%M:%S" by "%d(?P<_sep>[-/])%m(?P=_sep)(?:%ExY|%Exy) %k:%M:%S")
|
||||
r"%m-%d-%ExY %k:%M:%S(?:\.%f)?",
|
||||
# Epoch
|
||||
r"EPOCH",
|
||||
# Only time information in the log
|
||||
r"{^LN-BEG}%H:%M:%S",
|
||||
# <09/16/08@05:03:30>
|
||||
r"^<%m/%d/%Exy@%H:%M:%S>",
|
||||
# MySQL: 130322 11:46:11
|
||||
r"%Exy%Exm%Exd ?%H:%M:%S",
|
||||
# Apache Tomcat
|
||||
r"%b %d, %ExY %I:%M:%S %p",
|
||||
# ASSP: Apr-27-13 02:33:06
|
||||
r"^%b-%d-%Exy %k:%M:%S",
|
||||
# 20050123T215959, 20050123 215959, 20050123 85959, 20050123-21:59:59
|
||||
r"%ExY%Exm%Exd(?:-|T| ?)%ExH:?%ExM:?%ExS(?:[.,]%f)?(?:\s*%z)?",
|
||||
# prefixed with optional named time zone (monit):
|
||||
# PDT Apr 16 21:05:29
|
||||
r"(?:%Z )?(?:%a )?%b %d %k:%M:%S(?:\.%f)?(?: %ExY)?",
|
||||
# +00:00 Jan 23 21:59:59.011 2005
|
||||
r"(?:%z )?(?:%a )?%b %d %k:%M:%S(?:\.%f)?(?: %ExY)?",
|
||||
# TAI64N
|
||||
r"TAI64N",
|
||||
]
|
||||
|
||||
@property
|
||||
def defaultTemplates(self):
|
||||
if isinstance(DateDetectorCache.DEFAULT_TEMPLATES[0], str):
|
||||
for i, dt in enumerate(DateDetectorCache.DEFAULT_TEMPLATES):
|
||||
dt = _getPatternTemplate(dt)
|
||||
DateDetectorCache.DEFAULT_TEMPLATES[i] = dt
|
||||
return DateDetectorCache.DEFAULT_TEMPLATES
|
||||
|
||||
def _addDefaultTemplate(self):
|
||||
"""Add resp. cache Fail2Ban's default set of date templates.
|
||||
"""
|
||||
self.__tmpcache = [], []
|
||||
# cache default templates:
|
||||
for dt in self.defaultTemplates:
|
||||
self._cacheTemplate(dt)
|
||||
#
|
||||
self.__templates = self.__tmpcache[0] + self.__tmpcache[1]
|
||||
del self.__tmpcache
|
||||
|
||||
|
||||
class DateDetectorTemplate(object):
|
||||
"""Used for "shallow copy" of the template object.
|
||||
|
||||
Prevents collectively usage of hits/lastUsed in cached templates
|
||||
"""
|
||||
__slots__ = ('template', 'hits', 'lastUsed', 'distance')
|
||||
def __init__(self, template):
|
||||
self.template = template
|
||||
self.hits = 0
|
||||
self.lastUsed = 0
|
||||
# the last distance to date-match within the log file:
|
||||
self.distance = 0x7fffffff
|
||||
|
||||
@property
|
||||
def weight(self):
|
||||
return self.hits * self.template.weight / max(1, self.distance)
|
||||
|
||||
def __getattr__(self, name):
|
||||
""" Returns attribute of template (called for parameters not in slots)
|
||||
"""
|
||||
return getattr(self.template, name)
|
||||
|
||||
|
||||
class DateDetector(object):
|
||||
"""Manages one or more date templates to find a date within a log line.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
templates
|
||||
"""
|
||||
_defCache = DateDetectorCache()
|
||||
|
||||
def __init__(self):
|
||||
self.__templates = list()
|
||||
self.__known_names = set()
|
||||
# time the template was long unused (currently 300 == 5m):
|
||||
self.__unusedTime = 300
|
||||
# last known distance (bypass one char collision) and end position:
|
||||
self.__lastPos = 1, None
|
||||
self.__lastEndPos = 0x7fffffff, None
|
||||
self.__lastTemplIdx = 0x7fffffff
|
||||
# first free place:
|
||||
self.__firstUnused = 0
|
||||
# pre-match pattern:
|
||||
self.__preMatch = None
|
||||
# default TZ (if set, treat log lines without explicit time zone to be in this time zone):
|
||||
self.__default_tz = None
|
||||
|
||||
def _appendTemplate(self, template, ignoreDup=False):
|
||||
name = template.name
|
||||
if name in self.__known_names:
|
||||
if ignoreDup: return
|
||||
raise ValueError(
|
||||
"There is already a template with name %s" % name)
|
||||
self.__known_names.add(name)
|
||||
self.__templates.append(DateDetectorTemplate(template))
|
||||
logSys.debug(" date pattern regex for `%s`: `%s`",
|
||||
getattr(template, 'pattern', ''), template.regex)
|
||||
|
||||
def appendTemplate(self, template):
|
||||
"""Add a date template to manage and use in search of dates.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
template : DateTemplate or str
|
||||
Can be either a `DateTemplate` instance, or a string which will
|
||||
be used as the pattern for the `DatePatternRegex` template. The
|
||||
template will then be added to the detector.
|
||||
|
||||
Raises
|
||||
------
|
||||
ValueError
|
||||
If a template already exists with the same name.
|
||||
"""
|
||||
if isinstance(template, str):
|
||||
key = pattern = template
|
||||
if '%' not in pattern:
|
||||
key = pattern.upper()
|
||||
template = DD_patternCache.get(key)
|
||||
if not template:
|
||||
if key in ("{^LN-BEG}", "{DEFAULT}"):
|
||||
flt = \
|
||||
lambda template: template.flags & DateTemplate.LINE_BEGIN if key == "{^LN-BEG}" else None
|
||||
self.addDefaultTemplate(flt)
|
||||
return
|
||||
elif "{DATE}" in key:
|
||||
self.addDefaultTemplate(preMatch=pattern, allDefaults=False)
|
||||
return
|
||||
elif key == "{NONE}":
|
||||
template = _getPatternTemplate('{UNB}^', key)
|
||||
else:
|
||||
template = _getPatternTemplate(pattern, key)
|
||||
|
||||
DD_patternCache.set(key, template)
|
||||
|
||||
logSys.info(" date pattern `%s`: `%s`",
|
||||
getattr(template, 'pattern', ''), template.name)
|
||||
self._appendTemplate(template)
|
||||
|
||||
def addDefaultTemplate(self, filterTemplate=None, preMatch=None, allDefaults=True):
|
||||
"""Add Fail2Ban's default set of date templates.
|
||||
"""
|
||||
ignoreDup = len(self.__templates) > 0
|
||||
cnt = 0
|
||||
for template in (
|
||||
DateDetector._defCache.templates if allDefaults else DateDetector._defCache.defaultTemplates
|
||||
):
|
||||
# filter if specified:
|
||||
if filterTemplate is not None and not filterTemplate(template): continue
|
||||
# if exact pattern available - create copy of template, contains replaced {DATE} with default regex:
|
||||
if preMatch is not None:
|
||||
# get cached or create a copy with modified name/pattern, using preMatch replacement for {DATE}:
|
||||
template = _getAnchoredTemplate(template,
|
||||
wrap=lambda s: RE_DATE_PREMATCH.sub(lambda m: DateTemplate.unboundPattern(s), preMatch))
|
||||
# append date detector template (ignore duplicate if some was added before default):
|
||||
self._appendTemplate(template, ignoreDup=ignoreDup)
|
||||
cnt += 1
|
||||
if preMatch:
|
||||
logSys.info(" default date pattern for `%r`: %d template(s)", preMatch, cnt)
|
||||
else:
|
||||
logSys.info(" default %sdate pattern: %d template(s)", "filtered " if filterTemplate else "", cnt)
|
||||
|
||||
@property
|
||||
def templates(self):
|
||||
"""List of template instances managed by the detector.
|
||||
"""
|
||||
return self.__templates
|
||||
|
||||
def matchTime(self, line):
|
||||
"""Attempts to find date on a log line using templates.
|
||||
|
||||
This uses the templates' `matchDate` method in an attempt to find
|
||||
a date. It also increments the match hit count for the winning
|
||||
template.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
line : str
|
||||
Line which is searched by the date templates.
|
||||
|
||||
Returns
|
||||
-------
|
||||
re.MatchObject, DateTemplate
|
||||
The regex match returned from the first successfully matched
|
||||
template.
|
||||
"""
|
||||
# if no templates specified - default templates should be used:
|
||||
if not len(self.__templates):
|
||||
self.addDefaultTemplate()
|
||||
log = logSys.log if logSys.getEffectiveLevel() <= logLevel else lambda *args: None
|
||||
log(logLevel-1, "try to match time for line: %.120s", line)
|
||||
|
||||
# first try to use last template with same start/end position:
|
||||
match = None
|
||||
found = None, 0x7fffffff, 0x7fffffff, -1
|
||||
ignoreBySearch = 0x7fffffff
|
||||
i = self.__lastTemplIdx
|
||||
if i < len(self.__templates):
|
||||
ddtempl = self.__templates[i]
|
||||
template = ddtempl.template
|
||||
if template.flags & (DateTemplate.LINE_BEGIN|DateTemplate.LINE_END):
|
||||
log(logLevel-1, " try to match last anchored template #%02i ...", i)
|
||||
match = template.matchDate(line)
|
||||
ignoreBySearch = i
|
||||
else:
|
||||
distance, endpos = self.__lastPos[0], self.__lastEndPos[0]
|
||||
log(logLevel-1, " try to match last template #%02i (from %r to %r): ...%r==%r %s %r==%r...",
|
||||
i, distance, endpos,
|
||||
line[distance-1:distance], self.__lastPos[1],
|
||||
line[distance:endpos],
|
||||
line[endpos:endpos+1], self.__lastEndPos[2])
|
||||
# check same boundaries left/right, outside fully equal, inside only if not alnum (e. g. bound RE
|
||||
# with space or some special char), otherwise possible collision/pattern switch:
|
||||
if ((
|
||||
line[distance-1:distance] == self.__lastPos[1] or
|
||||
(line[distance:distance+1] == self.__lastPos[2] and not self.__lastPos[2].isalnum())
|
||||
) and (
|
||||
line[endpos:endpos+1] == self.__lastEndPos[2] or
|
||||
(line[endpos-1:endpos] == self.__lastEndPos[1] and not self.__lastEndPos[1].isalnum())
|
||||
)):
|
||||
# search in line part only:
|
||||
log(logLevel-1, " boundaries are correct, search in part %r", line[distance:endpos])
|
||||
match = template.matchDate(line, distance, endpos)
|
||||
else:
|
||||
log(logLevel-1, " boundaries show conflict, try whole search")
|
||||
match = template.matchDate(line)
|
||||
ignoreBySearch = i
|
||||
if match:
|
||||
distance = match.start()
|
||||
endpos = match.end()
|
||||
# if different position, possible collision/pattern switch:
|
||||
if (
|
||||
len(self.__templates) == 1 or # single template:
|
||||
template.flags & (DateTemplate.LINE_BEGIN|DateTemplate.LINE_END) or
|
||||
(distance == self.__lastPos[0] and endpos == self.__lastEndPos[0])
|
||||
):
|
||||
log(logLevel, " matched last time template #%02i", i)
|
||||
else:
|
||||
log(logLevel, " ** last pattern collision - pattern change, reserve & search ...")
|
||||
found = match, distance, endpos, i; # save current best alternative
|
||||
match = None
|
||||
else:
|
||||
log(logLevel, " ** last pattern not found - pattern change, search ...")
|
||||
# search template and better match:
|
||||
if not match:
|
||||
log(logLevel, " search template (%i) ...", len(self.__templates))
|
||||
i = 0
|
||||
for ddtempl in self.__templates:
|
||||
if i == ignoreBySearch:
|
||||
i += 1
|
||||
continue
|
||||
log(logLevel-1, " try template #%02i: %s", i, ddtempl.name)
|
||||
template = ddtempl.template
|
||||
match = template.matchDate(line)
|
||||
if match:
|
||||
distance = match.start()
|
||||
endpos = match.end()
|
||||
log(logLevel, " matched time template #%02i (at %r <= %r, %r) %s",
|
||||
i, distance, ddtempl.distance, self.__lastPos[0], template.name)
|
||||
## last (or single) template - fast stop:
|
||||
if i+1 >= len(self.__templates):
|
||||
break
|
||||
## if line-begin/end anchored - stop searching:
|
||||
if template.flags & (DateTemplate.LINE_BEGIN|DateTemplate.LINE_END):
|
||||
break
|
||||
## stop searching if next template still unused, but we had already hits:
|
||||
if (distance == 0 and ddtempl.hits) and not self.__templates[i+1].template.hits:
|
||||
break
|
||||
## [grave] if distance changed, possible date-match was found somewhere
|
||||
## in body of message, so save this template, and search further:
|
||||
if distance > ddtempl.distance or distance > self.__lastPos[0]:
|
||||
log(logLevel, " ** distance collision - pattern change, reserve")
|
||||
## shortest of both:
|
||||
if distance < found[1]:
|
||||
found = match, distance, endpos, i
|
||||
## search further:
|
||||
match = None
|
||||
i += 1
|
||||
continue
|
||||
## winner - stop search:
|
||||
break
|
||||
i += 1
|
||||
# check other template was found (use this one with shortest distance):
|
||||
if not match and found[0]:
|
||||
match, distance, endpos, i = found
|
||||
log(logLevel, " use best time template #%02i", i)
|
||||
ddtempl = self.__templates[i]
|
||||
template = ddtempl.template
|
||||
# we've winner, incr hits, set distance, usage, reorder, etc:
|
||||
if match:
|
||||
ddtempl.hits += 1
|
||||
ddtempl.lastUsed = time.time()
|
||||
ddtempl.distance = distance
|
||||
if self.__firstUnused == i:
|
||||
self.__firstUnused += 1
|
||||
self.__lastPos = distance, line[distance-1:distance], line[distance]
|
||||
self.__lastEndPos = endpos, line[endpos-1], line[endpos:endpos+1]
|
||||
# if not first - try to reorder current template (bubble up), they will be not sorted anymore:
|
||||
if i and i != self.__lastTemplIdx:
|
||||
i = self._reorderTemplate(i)
|
||||
self.__lastTemplIdx = i
|
||||
# return tuple with match and template reference used for parsing:
|
||||
return (match, template)
|
||||
|
||||
# not found:
|
||||
log(logLevel, " no template.")
|
||||
return (None, None)
|
||||
|
||||
@property
|
||||
def default_tz(self):
|
||||
return self.__default_tz
|
||||
|
||||
@default_tz.setter
|
||||
def default_tz(self, value):
|
||||
self.__default_tz = validateTimeZone(value)
|
||||
|
||||
def getTime(self, line, timeMatch=None):
|
||||
"""Attempts to return the date on a log line using templates.
|
||||
|
||||
This uses the templates' `getDate` method in an attempt to find
|
||||
a date.
|
||||
For the faster usage, always specify a parameter timeMatch (the previous tuple result
|
||||
of the matchTime), then this will work without locking and without cycle over templates.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
line : str
|
||||
Line which is searched by the date templates.
|
||||
|
||||
Returns
|
||||
-------
|
||||
float
|
||||
The Unix timestamp returned from the first successfully matched
|
||||
template or None if not found.
|
||||
"""
|
||||
# search match for all specified templates:
|
||||
if timeMatch is None:
|
||||
timeMatch = self.matchTime(line)
|
||||
# convert:
|
||||
template = timeMatch[1]
|
||||
if template is not None:
|
||||
try:
|
||||
date = template.getDate(line, timeMatch[0], default_tz=self.__default_tz)
|
||||
if date is not None:
|
||||
if logSys.getEffectiveLevel() <= logLevel: # pragma: no cover - heavy debug
|
||||
logSys.log(logLevel, " got time %f for %r using template %s",
|
||||
date[0], date[1].group(1), template.name)
|
||||
return date
|
||||
except ValueError:
|
||||
pass
|
||||
return None
|
||||
|
||||
def _reorderTemplate(self, num):
|
||||
"""Reorder template (bubble up) in template list if hits grows enough.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
num : int
|
||||
Index of template should be moved.
|
||||
"""
|
||||
if num:
|
||||
templates = self.__templates
|
||||
ddtempl = templates[num]
|
||||
if logSys.getEffectiveLevel() <= logLevel:
|
||||
logSys.log(logLevel, " -> reorder template #%02i, hits: %r", num, ddtempl.hits)
|
||||
## current hits and time the template was long unused:
|
||||
untime = ddtempl.lastUsed - self.__unusedTime
|
||||
weight = ddtempl.weight
|
||||
## try to move faster (first if unused available, or half of part to current template position):
|
||||
pos = self.__firstUnused if self.__firstUnused < num else num // 2
|
||||
## don't move too often (multiline logs resp. log's with different date patterns),
|
||||
## if template not used too long, replace it also :
|
||||
def _moveable():
|
||||
pweight = templates[pos].weight
|
||||
if logSys.getEffectiveLevel() <= logLevel:
|
||||
logSys.log(logLevel, " -> compare template #%02i & #%02i, weight %.3f > %.3f, hits %r > %r",
|
||||
num, pos, weight, pweight, ddtempl.hits, templates[pos].hits)
|
||||
return weight > pweight or untime > templates[pos].lastUsed
|
||||
##
|
||||
## if not moveable (smaller weight or target position recently used):
|
||||
if not _moveable():
|
||||
## try to move slow (exact 1 position):
|
||||
if pos == num-1:
|
||||
return num
|
||||
pos = num-1
|
||||
## if still smaller and template at position used, don't move:
|
||||
if not _moveable():
|
||||
return num
|
||||
## move:
|
||||
del templates[num]
|
||||
templates[pos:0] = [ddtempl]
|
||||
## correct first unused:
|
||||
while self.__firstUnused < len(templates) and templates[self.__firstUnused].hits:
|
||||
self.__firstUnused += 1
|
||||
if logSys.getEffectiveLevel() <= logLevel:
|
||||
logSys.log(logLevel, " -> moved template #%02i -> #%02i", num, pos)
|
||||
return pos
|
||||
return num
|
||||
396
fail2ban-master/fail2ban/server/datetemplate.py
Normal file
396
fail2ban-master/fail2ban/server/datetemplate.py
Normal file
@@ -0,0 +1,396 @@
|
||||
# emacs: -*- mode: python; coding: utf-8; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Author: Cyril Jaquier
|
||||
#
|
||||
|
||||
__author__ = "Cyril Jaquier"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
|
||||
__license__ = "GPL"
|
||||
|
||||
import re, time
|
||||
from abc import abstractmethod
|
||||
|
||||
from .strptime import reGroupDictStrptime, timeRE, getTimePatternRE
|
||||
from ..helpers import getLogger
|
||||
|
||||
logSys = getLogger(__name__)
|
||||
|
||||
# check already grouped contains "(", but ignores char "\(" and conditional "(?(id)...)":
|
||||
RE_GROUPED = re.compile(r'(?<!(?:\(\?))(?<!\\)\((?!\?)')
|
||||
RE_GROUP = ( re.compile(r'^((?:\(\?\w+\))?\^?(?:\(\?\w+\))?)(.*?)(\$?)$'), r"\1(\2)\3" )
|
||||
RE_GLOBALFLAGS = re.compile(r'((?:^|(?<!\\))\(\?[a-z]+\))')
|
||||
|
||||
RE_EXLINE_NO_BOUNDS = re.compile(r'^\{UNB\}')
|
||||
RE_EXLINE_BOUND_BEG = re.compile(r'^\{\^LN-BEG\}')
|
||||
RE_EXSANC_BOUND_BEG = re.compile(r'^\((?:\?:)?\^\|\\b\|\\W\)')
|
||||
RE_EXEANC_BOUND_BEG = re.compile(r'\(\?=\\b\|\\W\|\$\)$')
|
||||
RE_NO_WRD_BOUND_BEG = re.compile(r'^\(*(?:\(\?\w+\))?(?:\^|\(*\*\*|\((?:\?:)?\^)')
|
||||
RE_NO_WRD_BOUND_END = re.compile(r'(?<!\\)(?:\$\)?|\\b|\\s|\*\*\)*)$')
|
||||
RE_DEL_WRD_BOUNDS = ( re.compile(r'^\(*(?:\(\?\w+\))?\(*\*\*|(?<!\\)\*\*\)*$'),
|
||||
lambda m: m.group().replace('**', '') )
|
||||
|
||||
RE_LINE_BOUND_BEG = re.compile(r'^(?:\(\?\w+\))?(?:\^|\((?:\?:)?\^(?!\|))')
|
||||
RE_LINE_BOUND_END = re.compile(r'(?<![\\\|])(?:\$\)?)$')
|
||||
|
||||
RE_ALPHA_PATTERN = re.compile(r'(?<!\%)\%[aAbBpc]')
|
||||
|
||||
RE_EPOCH_PATTERN = re.compile(r"(?<!\\)\{L?EPOCH\}", re.IGNORECASE)
|
||||
|
||||
|
||||
class DateTemplate(object):
|
||||
"""A template which searches for and returns a date from a log line.
|
||||
|
||||
This is an not functional abstract class which other templates should
|
||||
inherit from.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
name
|
||||
regex
|
||||
"""
|
||||
|
||||
LINE_BEGIN = 8
|
||||
LINE_END = 4
|
||||
WORD_BEGIN = 2
|
||||
WORD_END = 1
|
||||
|
||||
def __init__(self):
|
||||
self.name = ""
|
||||
self.weight = 1.0
|
||||
self.flags = 0
|
||||
self.hits = 0
|
||||
self.time = 0
|
||||
self._regex = ""
|
||||
self._cRegex = None
|
||||
|
||||
def getRegex(self):
|
||||
return self._regex
|
||||
|
||||
def setRegex(self, regex, wordBegin=True, wordEnd=True):
|
||||
r"""Sets regex to use for searching for date in log line.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
regex : str
|
||||
The regex the template will use for searching for a date.
|
||||
wordBegin : bool
|
||||
Defines whether the regex should be modified to search at beginning of a
|
||||
word, by adding special boundary r'(?=^|\b|\W)' to start of regex.
|
||||
Can be disabled with specifying of ** at front of regex.
|
||||
Default True.
|
||||
wordEnd : bool
|
||||
Defines whether the regex should be modified to search at end of a word,
|
||||
by adding special boundary r'(?=\b|\W|$)' to end of regex.
|
||||
Can be disabled with specifying of ** at end of regex.
|
||||
Default True.
|
||||
|
||||
Raises
|
||||
------
|
||||
re.error
|
||||
If regular expression fails to compile
|
||||
"""
|
||||
# Warning: don't use lookahead for line-begin boundary,
|
||||
# (e. g. r"^(?:\W{0,2})?" is much faster as r"(?:^|(?<=^\W)|(?<=^\W{2}))")
|
||||
# because it may be very slow in negative case (by long log-lines not matching pattern)
|
||||
|
||||
regex = regex.strip()
|
||||
# cut global flags like (?iu) from RE in order to pre-set it after processing:
|
||||
gf = RE_GLOBALFLAGS.search(regex)
|
||||
if gf:
|
||||
regex = RE_GLOBALFLAGS.sub('', regex, count=1)
|
||||
# check word boundaries needed:
|
||||
boundBegin = wordBegin and not RE_NO_WRD_BOUND_BEG.search(regex)
|
||||
boundEnd = wordEnd and not RE_NO_WRD_BOUND_END.search(regex)
|
||||
# if no group add it now, should always have a group(1):
|
||||
if not RE_GROUPED.search(regex):
|
||||
regex = RE_GROUP[0].sub(RE_GROUP[1], regex)
|
||||
self.flags = 0
|
||||
# if word or line start boundary:
|
||||
if boundBegin:
|
||||
self.flags |= DateTemplate.WORD_BEGIN if wordBegin != 'start' else DateTemplate.LINE_BEGIN
|
||||
if wordBegin != 'start':
|
||||
regex = r'(?=^|\b|\W)' + regex
|
||||
else:
|
||||
regex = r"^(?:\W{0,2})?" + regex
|
||||
if not self.name.startswith('{^LN-BEG}'):
|
||||
self.name = '{^LN-BEG}' + self.name
|
||||
# if word end boundary:
|
||||
if boundEnd:
|
||||
self.flags |= DateTemplate.WORD_END
|
||||
regex += r'(?=\b|\W|$)'
|
||||
if not (self.flags & DateTemplate.LINE_BEGIN) and RE_LINE_BOUND_BEG.search(regex):
|
||||
self.flags |= DateTemplate.LINE_BEGIN
|
||||
if not (self.flags & DateTemplate.LINE_END) and RE_LINE_BOUND_END.search(regex):
|
||||
self.flags |= DateTemplate.LINE_END
|
||||
# remove possible special pattern "**" in front and end of regex:
|
||||
regex = RE_DEL_WRD_BOUNDS[0].sub(RE_DEL_WRD_BOUNDS[1], regex)
|
||||
if gf: # restore global flags:
|
||||
regex = gf.group(1) + regex
|
||||
self._regex = regex
|
||||
logSys.log(4, ' constructed regex %s', regex)
|
||||
self._cRegex = None
|
||||
|
||||
regex = property(getRegex, setRegex, doc=
|
||||
"""Regex used to search for date.
|
||||
""")
|
||||
|
||||
def _compileRegex(self):
|
||||
"""Compile regex by first usage.
|
||||
"""
|
||||
if not self._cRegex:
|
||||
try:
|
||||
# print('*'*10 + (' compile - %-30.30s -- %s' % (getattr(self, 'pattern', self.regex), self.name)))
|
||||
self._cRegex = re.compile(self.regex)
|
||||
except Exception as e:
|
||||
logSys.error('Compile %r failed, expression %r', self.name, self.regex)
|
||||
raise e
|
||||
|
||||
def matchDate(self, line, *args):
|
||||
"""Check if regex for date matches on a log line.
|
||||
"""
|
||||
if not self._cRegex:
|
||||
self._compileRegex()
|
||||
logSys.log(4, " search %s", self.regex)
|
||||
dateMatch = self._cRegex.search(line, *args); # pos, endpos
|
||||
if dateMatch:
|
||||
self.hits += 1
|
||||
# print('*'*10 + ('[%s] - %-30.30s -- %s' % ('*' if dateMatch else ' ', getattr(self, 'pattern', self.regex), self.name)))
|
||||
return dateMatch
|
||||
|
||||
@abstractmethod
|
||||
def getDate(self, line, dateMatch=None, default_tz=None):
|
||||
"""Abstract method, which should return the date for a log line
|
||||
|
||||
This should return the date for a log line, typically taking the
|
||||
date from the part of the line which matched the templates regex.
|
||||
This requires abstraction, therefore just raises exception.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
line : str
|
||||
Log line, of which the date should be extracted from.
|
||||
default_tz: if no explicit time zone is present in the line
|
||||
passing this will interpret it as in that time zone.
|
||||
|
||||
Raises
|
||||
------
|
||||
NotImplementedError
|
||||
Abstract method, therefore always returns this.
|
||||
"""
|
||||
raise NotImplementedError("getDate() is abstract")
|
||||
|
||||
@staticmethod
|
||||
def unboundPattern(pattern):
|
||||
return RE_EXEANC_BOUND_BEG.sub('',
|
||||
RE_EXSANC_BOUND_BEG.sub('',
|
||||
RE_EXLINE_BOUND_BEG.sub('', RE_EXLINE_NO_BOUNDS.sub('', pattern))
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class DateEpoch(DateTemplate):
|
||||
"""A date template which searches for Unix timestamps.
|
||||
|
||||
This includes Unix timestamps which appear at start of a line, optionally
|
||||
within square braces (nsd), or on SELinux audit log lines.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
name
|
||||
regex
|
||||
"""
|
||||
|
||||
def __init__(self, lineBeginOnly=False, pattern=None, longFrm=False):
|
||||
DateTemplate.__init__(self)
|
||||
self.name = "Epoch" if not pattern else pattern
|
||||
self._longFrm = longFrm;
|
||||
self._grpIdx = 1
|
||||
epochRE = r"\d{10,11}\b(?:\.\d{3,6})?"
|
||||
if longFrm:
|
||||
self.name = "LongEpoch" if not pattern else pattern
|
||||
epochRE = r"\d{10,11}(?:\d{3}(?:\.\d{1,6}|\d{3})?)?"
|
||||
if pattern:
|
||||
# pattern should find the whole pattern, but cut out grouped match (or whole match if no groups specified):
|
||||
regex = RE_EPOCH_PATTERN.sub(lambda v: "(%s)" % epochRE, pattern)
|
||||
if not RE_GROUPED.search(pattern):
|
||||
regex = "(" + regex + ")"
|
||||
self._grpIdx = 2
|
||||
self.setRegex(regex)
|
||||
elif not lineBeginOnly:
|
||||
regex = r"((?:^|(?P<square>(?<=^\[))|(?P<selinux>(?<=\baudit\()))%s)(?:(?(selinux)(?=:\d+\)))|(?(square)(?=\])))" % epochRE
|
||||
self.setRegex(regex, wordBegin=False) ;# already line begin resp. word begin anchored
|
||||
else:
|
||||
regex = r"((?P<square>(?<=^\[))?%s)(?(square)(?=\]))" % epochRE
|
||||
self.setRegex(regex, wordBegin='start', wordEnd=True)
|
||||
|
||||
def getDate(self, line, dateMatch=None, default_tz=None):
|
||||
"""Method to return the date for a log line.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
line : str
|
||||
Log line, of which the date should be extracted from.
|
||||
default_tz: ignored, Unix timestamps are time zone independent
|
||||
|
||||
Returns
|
||||
-------
|
||||
(float, str)
|
||||
Tuple containing a Unix timestamp, and the string of the date
|
||||
which was matched and in turned used to calculated the timestamp.
|
||||
"""
|
||||
if not dateMatch:
|
||||
dateMatch = self.matchDate(line)
|
||||
if dateMatch:
|
||||
v = dateMatch.group(self._grpIdx)
|
||||
# extract part of format which represents seconds since epoch
|
||||
if self._longFrm and len(v) >= 13:
|
||||
if len(v) >= 16 and '.' not in v:
|
||||
v = float(v) / 1000000
|
||||
else:
|
||||
v = float(v) / 1000
|
||||
return (float(v), dateMatch)
|
||||
|
||||
|
||||
class DatePatternRegex(DateTemplate):
|
||||
"""Date template, with regex/pattern
|
||||
|
||||
Parameters
|
||||
----------
|
||||
pattern : str
|
||||
Sets the date templates pattern.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
name
|
||||
regex
|
||||
pattern
|
||||
"""
|
||||
|
||||
_patternRE, _patternName = getTimePatternRE()
|
||||
_patternRE = re.compile(_patternRE)
|
||||
|
||||
def __init__(self, pattern=None, **kwargs):
|
||||
super(DatePatternRegex, self).__init__()
|
||||
self._pattern = None
|
||||
if pattern is not None:
|
||||
self.setRegex(pattern, **kwargs)
|
||||
|
||||
@property
|
||||
def pattern(self):
|
||||
"""The pattern used for regex with strptime "%" time fields.
|
||||
|
||||
This should be a valid regular expression, of which matching string
|
||||
will be extracted from the log line. strptime style "%" fields will
|
||||
be replaced by appropriate regular expressions, or custom regex
|
||||
groups with names as per the strptime fields can also be used
|
||||
instead.
|
||||
"""
|
||||
return self._pattern
|
||||
|
||||
@pattern.setter
|
||||
def pattern(self, pattern):
|
||||
self.setRegex(pattern)
|
||||
|
||||
def setRegex(self, pattern, wordBegin=True, wordEnd=True):
|
||||
# original pattern:
|
||||
self._pattern = pattern
|
||||
# if unbound signalled - reset boundaries left and right:
|
||||
if RE_EXLINE_NO_BOUNDS.search(pattern):
|
||||
pattern = RE_EXLINE_NO_BOUNDS.sub('', pattern)
|
||||
wordBegin = wordEnd = False
|
||||
# if explicit given {^LN-BEG} - remove it from pattern and set 'start' in wordBegin:
|
||||
if wordBegin and RE_EXLINE_BOUND_BEG.search(pattern):
|
||||
pattern = RE_EXLINE_BOUND_BEG.sub('', pattern)
|
||||
wordBegin = 'start'
|
||||
try:
|
||||
# wrap to regex:
|
||||
fmt = self._patternRE.sub(r'%(\1)s', pattern)
|
||||
self.name = fmt % self._patternName
|
||||
regex = fmt % timeRE
|
||||
# if expected add (?iu) for "ignore case" and "unicode":
|
||||
if RE_ALPHA_PATTERN.search(pattern):
|
||||
regex = r'(?iu)' + regex
|
||||
super(DatePatternRegex, self).setRegex(regex, wordBegin, wordEnd)
|
||||
except Exception as e:
|
||||
raise TypeError("Failed to set datepattern '%s' (may be an invalid format or unescaped percent char): %s" % (pattern, e))
|
||||
|
||||
def getDate(self, line, dateMatch=None, default_tz=None):
|
||||
"""Method to return the date for a log line.
|
||||
|
||||
This uses a custom version of strptime, using the named groups
|
||||
from the instances `pattern` property.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
line : str
|
||||
Log line, of which the date should be extracted from.
|
||||
default_tz: optionally used to correct timezone
|
||||
|
||||
Returns
|
||||
-------
|
||||
(float, str)
|
||||
Tuple containing a Unix timestamp, and the string of the date
|
||||
which was matched and in turned used to calculated the timestamp.
|
||||
"""
|
||||
if not dateMatch:
|
||||
dateMatch = self.matchDate(line)
|
||||
if dateMatch:
|
||||
return (reGroupDictStrptime(dateMatch.groupdict(), default_tz=default_tz),
|
||||
dateMatch)
|
||||
|
||||
|
||||
class DateTai64n(DateTemplate):
|
||||
"""A date template which matches TAI64N format timestamps.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
name
|
||||
regex
|
||||
"""
|
||||
|
||||
def __init__(self, wordBegin=False):
|
||||
DateTemplate.__init__(self)
|
||||
self.name = "TAI64N"
|
||||
# We already know the format for TAI64N
|
||||
self.setRegex("@[0-9a-f]{24}", wordBegin=wordBegin)
|
||||
|
||||
def getDate(self, line, dateMatch=None, default_tz=None):
|
||||
"""Method to return the date for a log line.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
line : str
|
||||
Log line, of which the date should be extracted from.
|
||||
default_tz: ignored, since TAI is time zone independent
|
||||
|
||||
Returns
|
||||
-------
|
||||
(float, str)
|
||||
Tuple containing a Unix timestamp, and the string of the date
|
||||
which was matched and in turned used to calculated the timestamp.
|
||||
"""
|
||||
if not dateMatch:
|
||||
dateMatch = self.matchDate(line)
|
||||
if dateMatch:
|
||||
# extract part of format which represents seconds since epoch
|
||||
value = dateMatch.group(1)
|
||||
seconds_since_epoch = value[2:17]
|
||||
# convert seconds from HEX into local time stamp
|
||||
return (int(seconds_since_epoch, 16), dateMatch)
|
||||
169
fail2ban-master/fail2ban/server/failmanager.py
Normal file
169
fail2ban-master/fail2ban/server/failmanager.py
Normal file
@@ -0,0 +1,169 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Author: Cyril Jaquier
|
||||
#
|
||||
|
||||
__author__ = "Cyril Jaquier"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
|
||||
__license__ = "GPL"
|
||||
|
||||
from threading import Lock
|
||||
import logging
|
||||
|
||||
from .ticket import FailTicket, BanTicket
|
||||
from ..helpers import getLogger, BgService
|
||||
|
||||
# Gets the instance of the logger.
|
||||
logSys = getLogger(__name__)
|
||||
logLevel = logging.DEBUG
|
||||
|
||||
|
||||
class FailManager:
|
||||
|
||||
def __init__(self):
|
||||
self.__lock = Lock()
|
||||
self.__failList = dict()
|
||||
self.__maxRetry = 3
|
||||
self.__maxTime = 600
|
||||
self.__failTotal = 0
|
||||
self.maxMatches = 5
|
||||
self.__bgSvc = BgService()
|
||||
|
||||
def setFailTotal(self, value):
|
||||
self.__failTotal = value
|
||||
|
||||
def getFailTotal(self):
|
||||
return self.__failTotal
|
||||
|
||||
def getFailCount(self):
|
||||
# may be slow on large list of failures, should be used for test purposes only...
|
||||
with self.__lock:
|
||||
return len(self.__failList), sum([f.getRetry() for f in list(self.__failList.values())])
|
||||
|
||||
def setMaxRetry(self, value):
|
||||
self.__maxRetry = value
|
||||
|
||||
def getMaxRetry(self):
|
||||
return self.__maxRetry
|
||||
|
||||
def setMaxTime(self, value):
|
||||
self.__maxTime = value
|
||||
|
||||
def getMaxTime(self):
|
||||
return self.__maxTime
|
||||
|
||||
def addFailure(self, ticket, count=1, observed=False):
|
||||
attempts = 1
|
||||
with self.__lock:
|
||||
fid = ticket.getID()
|
||||
try:
|
||||
fData = self.__failList[fid]
|
||||
# if the same object - the same matches but +1 attempt:
|
||||
if fData is ticket:
|
||||
matches = None
|
||||
attempt = 1
|
||||
else:
|
||||
# will be incremented / extended (be sure we have at least +1 attempt):
|
||||
matches = ticket.getMatches() if self.maxMatches else None
|
||||
attempt = ticket.getAttempt()
|
||||
if attempt <= 0:
|
||||
attempt += 1
|
||||
unixTime = ticket.getTime()
|
||||
fData.adjustTime(unixTime, self.__maxTime)
|
||||
fData.inc(matches, attempt, count)
|
||||
# truncate to maxMatches:
|
||||
if self.maxMatches:
|
||||
matches = fData.getMatches()
|
||||
if len(matches) > self.maxMatches:
|
||||
fData.setMatches(matches[-self.maxMatches:])
|
||||
else:
|
||||
fData.setMatches(None)
|
||||
except KeyError:
|
||||
# not found - already banned - prevent to add failure if comes from observer:
|
||||
if observed or isinstance(ticket, BanTicket):
|
||||
return ticket.getRetry()
|
||||
# if already FailTicket - add it direct, otherwise create (using copy all ticket data):
|
||||
if isinstance(ticket, FailTicket):
|
||||
fData = ticket;
|
||||
else:
|
||||
fData = FailTicket.wrap(ticket)
|
||||
if count > ticket.getAttempt():
|
||||
fData.setRetry(count)
|
||||
self.__failList[fid] = fData
|
||||
|
||||
attempts = fData.getRetry()
|
||||
self.__failTotal += 1
|
||||
|
||||
if logSys.getEffectiveLevel() <= logLevel:
|
||||
# yoh: Since composing this list might be somewhat time consuming
|
||||
# in case of having many active failures, it should be ran only
|
||||
# if debug level is "low" enough
|
||||
failures_summary = ', '.join(['%s:%d' % (k, v.getRetry())
|
||||
for k,v in self.__failList.items()])
|
||||
logSys.log(logLevel, "Total # of detected failures: %d. Current failures from %d IPs (IP:count): %s"
|
||||
% (self.__failTotal, len(self.__failList), failures_summary))
|
||||
|
||||
self.__bgSvc.service()
|
||||
return attempts
|
||||
|
||||
def size(self):
|
||||
return len(self.__failList)
|
||||
|
||||
def cleanup(self, time):
|
||||
time -= self.__maxTime
|
||||
with self.__lock:
|
||||
todelete = [fid for fid,item in self.__failList.items() \
|
||||
if item.getTime() <= time]
|
||||
if len(todelete) == len(self.__failList):
|
||||
# remove all:
|
||||
self.__failList = dict()
|
||||
elif not len(todelete):
|
||||
# nothing:
|
||||
return
|
||||
if len(todelete) / 2.0 <= len(self.__failList) / 3.0:
|
||||
# few as 2/3 should be removed - remove particular items:
|
||||
for fid in todelete:
|
||||
del self.__failList[fid]
|
||||
else:
|
||||
# create new dictionary without items to be deleted:
|
||||
self.__failList = dict((fid,item) for fid,item in self.__failList.items() \
|
||||
if item.getTime() > time)
|
||||
self.__bgSvc.service()
|
||||
|
||||
def delFailure(self, fid):
|
||||
with self.__lock:
|
||||
try:
|
||||
del self.__failList[fid]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
def toBan(self, fid=None):
|
||||
with self.__lock:
|
||||
for fid in ([fid] if fid is not None and fid in self.__failList else self.__failList):
|
||||
data = self.__failList[fid]
|
||||
if data.getRetry() >= self.__maxRetry:
|
||||
del self.__failList[fid]
|
||||
return data
|
||||
self.__bgSvc.service()
|
||||
raise FailManagerEmpty
|
||||
|
||||
|
||||
class FailManagerEmpty(Exception):
|
||||
pass
|
||||
466
fail2ban-master/fail2ban/server/failregex.py
Normal file
466
fail2ban-master/fail2ban/server/failregex.py
Normal file
@@ -0,0 +1,466 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
__author__ = "Cyril Jaquier"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
|
||||
__license__ = "GPL"
|
||||
|
||||
import re
|
||||
import sys
|
||||
|
||||
from .ipdns import IPAddr
|
||||
|
||||
|
||||
FTAG_CRE = re.compile(r'</?[\w\-]+/?>')
|
||||
|
||||
FCUSTNAME_CRE = re.compile(r'^(/?)F-([A-Z0-9_\-]+)$'); # currently uppercase only
|
||||
|
||||
R_HOST = [
|
||||
# separated ipv4:
|
||||
r"""(?:::f{4,6}:)?(?P<ip4>%s)""" % (IPAddr.IP_4_RE,),
|
||||
# separated ipv6:
|
||||
r"""(?P<ip6>%s)""" % (IPAddr.IP_6_RE,),
|
||||
# separated dns:
|
||||
r"""(?P<dns>[\w\-.^_]*\w)""",
|
||||
# place-holder for ADDR tag-replacement (joined):
|
||||
"",
|
||||
# place-holder for HOST tag replacement (joined):
|
||||
"",
|
||||
# CIDR in simplest integer form:
|
||||
r"(?P<cidr>\d+)",
|
||||
# place-holder for SUBNET tag-replacement
|
||||
"",
|
||||
]
|
||||
RI_IPV4 = 0
|
||||
RI_IPV6 = 1
|
||||
RI_DNS = 2
|
||||
RI_ADDR = 3
|
||||
RI_HOST = 4
|
||||
RI_CIDR = 5
|
||||
RI_SUBNET = 6
|
||||
|
||||
R_HOST[RI_ADDR] = r"\[?(?:%s|%s)\]?" % (R_HOST[RI_IPV4], R_HOST[RI_IPV6],)
|
||||
R_HOST[RI_HOST] = r"(?:%s|%s)" % (R_HOST[RI_ADDR], R_HOST[RI_DNS],)
|
||||
R_HOST[RI_SUBNET] = r"\[?(?:%s|%s)(?:/%s)?\]?" % (R_HOST[RI_IPV4], R_HOST[RI_IPV6], R_HOST[RI_CIDR],)
|
||||
|
||||
RH4TAG = {
|
||||
# separated ipv4 (self closed, closed):
|
||||
"IP4": R_HOST[RI_IPV4],
|
||||
"F-IP4/": R_HOST[RI_IPV4],
|
||||
# separated ipv6 (self closed, closed):
|
||||
"IP6": R_HOST[RI_IPV6],
|
||||
"F-IP6/": R_HOST[RI_IPV6],
|
||||
# 2 address groups instead of <ADDR> - in opposition to `<HOST>`,
|
||||
# for separate usage of 2 address groups only (regardless of `usedns`), `ip4` and `ip6` together
|
||||
"ADDR": R_HOST[RI_ADDR],
|
||||
"F-ADDR/": R_HOST[RI_ADDR],
|
||||
# subnet tags for usage as `<ADDR>/<CIDR>` or `<SUBNET>`:
|
||||
"CIDR": R_HOST[RI_CIDR],
|
||||
"F-CIDR/": R_HOST[RI_CIDR],
|
||||
"SUBNET": R_HOST[RI_SUBNET],
|
||||
"F-SUBNET/":R_HOST[RI_SUBNET],
|
||||
# separated dns (self closed, closed):
|
||||
"DNS": R_HOST[RI_DNS],
|
||||
"F-DNS/": R_HOST[RI_DNS],
|
||||
# default failure-id as no space tag:
|
||||
"F-ID/": r"""(?P<fid>\S+)""",
|
||||
# default failure port, like 80 or http :
|
||||
"F-PORT/": r"""(?P<fport>\w+)""",
|
||||
}
|
||||
|
||||
# default failure groups map for customizable expressions (with different group-id):
|
||||
R_MAP = {
|
||||
"id": "fid",
|
||||
"port": "fport",
|
||||
}
|
||||
|
||||
# map global flags like ((?i)xxx) or (?:(?i)xxx) to local flags (?i:xxx) if supported by RE-engine in this python version:
|
||||
try:
|
||||
re.search("^re(?i:val)$", "reVAL")
|
||||
R_GLOB2LOCFLAGS = ( re.compile(r"(?<!\\)\((?:\?:)?(\(\?[a-z]+)\)"), r"\1:" )
|
||||
except:
|
||||
R_GLOB2LOCFLAGS = ()
|
||||
|
||||
def mapTag2Opt(tag):
|
||||
tag = tag.lower()
|
||||
return R_MAP.get(tag, tag)
|
||||
|
||||
|
||||
# complex names:
|
||||
# ALT_ - alternate names to be merged, e. g. alt_user_1 -> user ...
|
||||
ALTNAME_PRE = 'alt_'
|
||||
# TUPLE_ - names of parts to be combined to single value as tuple
|
||||
TUPNAME_PRE = 'tuple_'
|
||||
|
||||
COMPLNAME_PRE = (ALTNAME_PRE, TUPNAME_PRE)
|
||||
COMPLNAME_CRE = re.compile(r'^(' + '|'.join(COMPLNAME_PRE) + r')(.*?)(?:_\d+)?$')
|
||||
|
||||
|
||||
##
|
||||
# Regular expression class.
|
||||
#
|
||||
# This class represents a regular expression with its compiled version.
|
||||
|
||||
class Regex:
|
||||
|
||||
##
|
||||
# Constructor.
|
||||
#
|
||||
# Creates a new object. This method can throw RegexException in order to
|
||||
# avoid construction of invalid object.
|
||||
# @param value the regular expression
|
||||
|
||||
def __init__(self, regex, multiline=False, **kwargs):
|
||||
self._matchCache = None
|
||||
# Perform shortcuts expansions.
|
||||
# Replace standard f2b-tags (like "<HOST>", etc) using default regular expressions:
|
||||
regex = Regex._resolveHostTag(regex, **kwargs)
|
||||
#
|
||||
if regex.lstrip() == '':
|
||||
raise RegexException("Cannot add empty regex")
|
||||
# special handling wrapping global flags to local flags:
|
||||
if R_GLOB2LOCFLAGS:
|
||||
regex = R_GLOB2LOCFLAGS[0].sub(R_GLOB2LOCFLAGS[1], regex)
|
||||
try:
|
||||
self._regexObj = re.compile(regex, re.MULTILINE if multiline else 0)
|
||||
self._regex = regex
|
||||
self._altValues = []
|
||||
self._tupleValues = []
|
||||
for k in [k for k in self._regexObj.groupindex if len(k) > len(COMPLNAME_PRE[0])]:
|
||||
n = COMPLNAME_CRE.match(k)
|
||||
if n:
|
||||
g, n = n.group(1), mapTag2Opt(n.group(2))
|
||||
if g == ALTNAME_PRE:
|
||||
self._altValues.append((k,n))
|
||||
else:
|
||||
self._tupleValues.append((k,n))
|
||||
self._altValues.sort()
|
||||
self._tupleValues.sort()
|
||||
self._altValues = self._altValues if len(self._altValues) else None
|
||||
self._tupleValues = self._tupleValues if len(self._tupleValues) else None
|
||||
except re.error as e:
|
||||
raise RegexException("Unable to compile regular expression '%s':\n%s" %
|
||||
(regex, e))
|
||||
# set fetch handler depending on presence of alternate (or tuple) tags:
|
||||
self.getGroups = self._getGroupsWithAlt if (self._altValues or self._tupleValues) else self._getGroups
|
||||
|
||||
def __str__(self):
|
||||
return "%s(%r)" % (self.__class__.__name__, self._regex)
|
||||
|
||||
##
|
||||
# Replaces "<HOST>", "<IP4>", "<IP6>", "<FID>" with default regular expression for host
|
||||
#
|
||||
# (see gh-1374 for the discussion about other candidates)
|
||||
# @return the replaced regular expression as string
|
||||
|
||||
@staticmethod
|
||||
def _resolveHostTag(regex, useDns="yes"):
|
||||
|
||||
openTags = dict()
|
||||
props = {
|
||||
'nl': 0, # new lines counter by <SKIPLINES> tag;
|
||||
}
|
||||
# tag interpolation callable:
|
||||
def substTag(m):
|
||||
tag = m.group()
|
||||
tn = tag[1:-1]
|
||||
# 3 groups instead of <HOST> - separated ipv4, ipv6 and host (dns)
|
||||
if tn == "HOST":
|
||||
return R_HOST[RI_HOST if useDns not in ("no",) else RI_ADDR]
|
||||
# replace "<SKIPLINES>" with regular expression for multiple lines (by buffering with maxlines)
|
||||
if tn == "SKIPLINES":
|
||||
nl = props['nl']
|
||||
props['nl'] = nl + 1
|
||||
return r"\n(?P<skiplines%i>(?:(?:.*\n)*?))" % (nl,)
|
||||
# static replacement from RH4TAG:
|
||||
try:
|
||||
return RH4TAG[tn]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
# (begin / end tag) for customizable expressions, additionally used as
|
||||
# user custom tags (match will be stored in ticket data, can be used in actions):
|
||||
m = FCUSTNAME_CRE.match(tn)
|
||||
if m: # match F-...
|
||||
m = m.groups()
|
||||
tn = m[1]
|
||||
# close tag:
|
||||
if m[0]:
|
||||
# check it was already open:
|
||||
if openTags.get(tn):
|
||||
return ")"
|
||||
return tag; # tag not opened, use original
|
||||
# open tag:
|
||||
openTags[tn] = 1
|
||||
# if should be mapped:
|
||||
tn = mapTag2Opt(tn)
|
||||
return "(?P<%s>" % (tn,)
|
||||
|
||||
# original, no replacement:
|
||||
return tag
|
||||
|
||||
# substitute tags:
|
||||
return FTAG_CRE.sub(substTag, regex)
|
||||
|
||||
##
|
||||
# Gets the regular expression.
|
||||
#
|
||||
# The effective regular expression used is returned.
|
||||
# @return the regular expression
|
||||
|
||||
def getRegex(self):
|
||||
return self._regex
|
||||
|
||||
##
|
||||
# Returns string buffer using join of the tupleLines.
|
||||
#
|
||||
@staticmethod
|
||||
def _tupleLinesBuf(tupleLines):
|
||||
return "\n".join(["".join(v[::2]) for v in tupleLines]) + "\n"
|
||||
|
||||
##
|
||||
# Searches the regular expression.
|
||||
#
|
||||
# Sets an internal cache (match object) in order to avoid searching for
|
||||
# the pattern again. This method must be called before calling any other
|
||||
# method of this object.
|
||||
# @param a list of tuples. The tuples are ( prematch, datematch, postdatematch )
|
||||
|
||||
def search(self, tupleLines, orgLines=None):
|
||||
buf = tupleLines
|
||||
if not isinstance(tupleLines, str):
|
||||
buf = Regex._tupleLinesBuf(tupleLines)
|
||||
self._matchCache = self._regexObj.search(buf)
|
||||
if self._matchCache:
|
||||
if orgLines is None: orgLines = tupleLines
|
||||
# if single-line:
|
||||
if len(orgLines) <= 1:
|
||||
self._matchedTupleLines = orgLines
|
||||
self._unmatchedTupleLines = []
|
||||
else:
|
||||
# Find start of the first line where the match was found
|
||||
try:
|
||||
matchLineStart = self._matchCache.string.rindex(
|
||||
"\n", 0, self._matchCache.start() +1 ) + 1
|
||||
except ValueError:
|
||||
matchLineStart = 0
|
||||
# Find end of the last line where the match was found
|
||||
try:
|
||||
matchLineEnd = self._matchCache.string.index(
|
||||
"\n", self._matchCache.end() - 1) + 1
|
||||
except ValueError:
|
||||
matchLineEnd = len(self._matchCache.string)
|
||||
|
||||
lineCount1 = self._matchCache.string.count(
|
||||
"\n", 0, matchLineStart)
|
||||
lineCount2 = self._matchCache.string.count(
|
||||
"\n", 0, matchLineEnd)
|
||||
self._matchedTupleLines = orgLines[lineCount1:lineCount2]
|
||||
self._unmatchedTupleLines = orgLines[:lineCount1]
|
||||
n = 0
|
||||
for skippedLine in self.getSkippedLines():
|
||||
for m, matchedTupleLine in enumerate(
|
||||
self._matchedTupleLines[n:]):
|
||||
if "".join(matchedTupleLine[::2]) == skippedLine:
|
||||
self._unmatchedTupleLines.append(
|
||||
self._matchedTupleLines.pop(n+m))
|
||||
n += m
|
||||
break
|
||||
self._unmatchedTupleLines.extend(orgLines[lineCount2:])
|
||||
|
||||
# Checks if the previous call to search() matched.
|
||||
#
|
||||
# @return True if a match was found, False otherwise
|
||||
|
||||
def hasMatched(self):
|
||||
if self._matchCache:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
##
|
||||
# Returns all matched groups.
|
||||
#
|
||||
|
||||
def _getGroups(self):
|
||||
return self._matchCache.groupdict()
|
||||
|
||||
def _getGroupsWithAlt(self):
|
||||
fail = self._matchCache.groupdict()
|
||||
#fail = fail.copy()
|
||||
# merge alternate values (e. g. 'alt_user_1' -> 'user' or 'alt_host' -> 'host'):
|
||||
if self._altValues:
|
||||
for k,n in self._altValues:
|
||||
v = fail.get(k)
|
||||
if v and not fail.get(n):
|
||||
fail[n] = v
|
||||
# combine tuple values (e. g. 'id', 'tuple_id' ... 'tuple_id_N' -> 'id'):
|
||||
if self._tupleValues:
|
||||
for k,n in self._tupleValues:
|
||||
v = fail.get(k)
|
||||
t = fail.get(n)
|
||||
if isinstance(t, tuple):
|
||||
t += (v,)
|
||||
else:
|
||||
t = (t,v,)
|
||||
fail[n] = t
|
||||
return fail
|
||||
|
||||
def getGroups(self): # pragma: no cover - abstract function (replaced in __init__)
|
||||
pass
|
||||
|
||||
##
|
||||
# Returns skipped lines.
|
||||
#
|
||||
# This returns skipped lines captured by the <SKIPLINES> tag.
|
||||
# @return list of skipped lines
|
||||
|
||||
def getSkippedLines(self):
|
||||
if not self._matchCache:
|
||||
return []
|
||||
skippedLines = ""
|
||||
n = 0
|
||||
while True:
|
||||
try:
|
||||
if self._matchCache.group("skiplines%i" % n) is not None:
|
||||
skippedLines += self._matchCache.group("skiplines%i" % n)
|
||||
n += 1
|
||||
except IndexError:
|
||||
break
|
||||
# KeyError is because of PyPy issue1665 affecting pypy <= 2.2.1
|
||||
except KeyError:
|
||||
if 'PyPy' not in sys.version: # pragma: no cover - not sure this is even reachable
|
||||
raise
|
||||
break
|
||||
return skippedLines.splitlines(False)
|
||||
|
||||
##
|
||||
# Returns unmatched lines.
|
||||
#
|
||||
# This returns unmatched lines including captured by the <SKIPLINES> tag.
|
||||
# @return list of unmatched lines
|
||||
|
||||
def getUnmatchedTupleLines(self):
|
||||
if not self.hasMatched():
|
||||
return []
|
||||
else:
|
||||
return self._unmatchedTupleLines
|
||||
|
||||
def getUnmatchedLines(self):
|
||||
if not self.hasMatched():
|
||||
return []
|
||||
else:
|
||||
return ["".join(line) for line in self._unmatchedTupleLines]
|
||||
|
||||
##
|
||||
# Returns matched lines.
|
||||
#
|
||||
# This returns matched lines by excluding those captured
|
||||
# by the <SKIPLINES> tag.
|
||||
# @return list of matched lines
|
||||
|
||||
def getMatchedTupleLines(self):
|
||||
if not self.hasMatched():
|
||||
return []
|
||||
else:
|
||||
return self._matchedTupleLines
|
||||
|
||||
def getMatchedLines(self):
|
||||
if not self.hasMatched():
|
||||
return []
|
||||
else:
|
||||
return ["".join(line) for line in self._matchedTupleLines]
|
||||
|
||||
|
||||
##
|
||||
# Exception dedicated to the class Regex.
|
||||
|
||||
class RegexException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
##
|
||||
# Groups used as failure identifier.
|
||||
#
|
||||
# The order of this tuple is important while searching for failure-id
|
||||
#
|
||||
FAILURE_ID_GROPS = ("fid", "ip4", "ip6", "dns")
|
||||
|
||||
# Additionally allows multi-line failure-id (used for wrapping e. g. conn-id to host)
|
||||
#
|
||||
FAILURE_ID_PRESENTS = FAILURE_ID_GROPS + ("mlfid",)
|
||||
|
||||
##
|
||||
# Regular expression class.
|
||||
#
|
||||
# This class represents a regular expression with its compiled version.
|
||||
|
||||
class FailRegex(Regex):
|
||||
|
||||
##
|
||||
# Constructor.
|
||||
#
|
||||
# Creates a new object. This method can throw RegexException in order to
|
||||
# avoid construction of invalid object.
|
||||
# @param value the regular expression
|
||||
|
||||
def __init__(self, regex, prefRegex=None, **kwargs):
|
||||
# Initializes the parent.
|
||||
Regex.__init__(self, regex, **kwargs)
|
||||
# Check for group "dns", "ip4", "ip6", "fid"
|
||||
if (not [grp for grp in FAILURE_ID_PRESENTS if grp in self._regexObj.groupindex]
|
||||
and (prefRegex is None or
|
||||
not [grp for grp in FAILURE_ID_PRESENTS if grp in prefRegex._regexObj.groupindex])
|
||||
):
|
||||
raise RegexException("No failure-id group in '%s'" % self._regex)
|
||||
|
||||
##
|
||||
# Returns the matched failure id.
|
||||
#
|
||||
# This corresponds to the pattern matched by the named group from given groups.
|
||||
# @return the matched failure-id
|
||||
|
||||
def getFailID(self, groups=FAILURE_ID_GROPS):
|
||||
fid = None
|
||||
for grp in groups:
|
||||
try:
|
||||
fid = self._matchCache.group(grp)
|
||||
except (IndexError, KeyError):
|
||||
continue
|
||||
if fid is not None:
|
||||
break
|
||||
if fid is None:
|
||||
# Gets a few information.
|
||||
s = self._matchCache.string
|
||||
r = self._matchCache.re
|
||||
raise RegexException("No group found in '%s' using '%s'" % (s, r))
|
||||
return str(fid)
|
||||
|
||||
##
|
||||
# Returns the matched host.
|
||||
#
|
||||
# This corresponds to the pattern matched by the named group "ip4", "ip6" or "dns".
|
||||
# @return the matched host
|
||||
|
||||
def getHost(self):
|
||||
return self.getFailID(("ip4", "ip6", "dns"))
|
||||
|
||||
def getIP(self):
|
||||
fail = self.getGroups()
|
||||
return IPAddr(self.getFailID(("ip4", "ip6")), int(fail.get("cidr") or IPAddr.CIDR_UNSPEC))
|
||||
1574
fail2ban-master/fail2ban/server/filter.py
Normal file
1574
fail2ban-master/fail2ban/server/filter.py
Normal file
File diff suppressed because it is too large
Load Diff
176
fail2ban-master/fail2ban/server/filterpoll.py
Normal file
176
fail2ban-master/fail2ban/server/filterpoll.py
Normal file
@@ -0,0 +1,176 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Author: Cyril Jaquier, Yaroslav Halchenko
|
||||
#
|
||||
|
||||
__author__ = "Cyril Jaquier, Yaroslav Halchenko"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier; 2012 Yaroslav Halchenko"
|
||||
__license__ = "GPL"
|
||||
|
||||
import os
|
||||
import time
|
||||
|
||||
from .filter import FileFilter
|
||||
from .utils import Utils
|
||||
from ..helpers import getLogger, logging
|
||||
|
||||
|
||||
# Gets the instance of the logger.
|
||||
logSys = getLogger(__name__)
|
||||
|
||||
|
||||
##
|
||||
# Log reader class.
|
||||
#
|
||||
# This class reads a log file and detects login failures or anything else
|
||||
# that matches a given regular expression. This class is instantiated by
|
||||
# a Jail object.
|
||||
|
||||
class FilterPoll(FileFilter):
|
||||
|
||||
##
|
||||
# Constructor.
|
||||
#
|
||||
# Initialize the filter object with default values.
|
||||
# @param jail the jail object
|
||||
|
||||
def __init__(self, jail):
|
||||
FileFilter.__init__(self, jail)
|
||||
## The time of the last modification of the file.
|
||||
self.__prevStats = dict()
|
||||
self.__file404Cnt = dict()
|
||||
logSys.debug("Created FilterPoll")
|
||||
|
||||
##
|
||||
# Add a log file path
|
||||
#
|
||||
# @param path log file path
|
||||
|
||||
def _addLogPath(self, path):
|
||||
self.__prevStats[path] = (0, None, None) # mtime, ino, size
|
||||
self.__file404Cnt[path] = 0
|
||||
|
||||
##
|
||||
# Delete a log path
|
||||
#
|
||||
# @param path the log file to delete
|
||||
|
||||
def _delLogPath(self, path):
|
||||
del self.__prevStats[path]
|
||||
del self.__file404Cnt[path]
|
||||
|
||||
##
|
||||
# Get a modified log path at once
|
||||
#
|
||||
def getModified(self, modlst):
|
||||
for filename in self.getLogPaths():
|
||||
if self.isModified(filename):
|
||||
modlst.append(filename)
|
||||
return modlst
|
||||
|
||||
##
|
||||
# Main loop.
|
||||
#
|
||||
# This function is the main loop of the thread. It checks if the
|
||||
# file has been modified and looks for failures.
|
||||
# @return True when the thread exits nicely
|
||||
|
||||
def run(self):
|
||||
while self.active:
|
||||
try:
|
||||
if logSys.getEffectiveLevel() <= 4:
|
||||
logSys.log(4, "Woke up idle=%s with %d files monitored",
|
||||
self.idle, self.getLogCount())
|
||||
if self.idle:
|
||||
if not Utils.wait_for(lambda: not self.active or not self.idle,
|
||||
self.sleeptime * 10, self.sleeptime
|
||||
):
|
||||
self.ticks += 1
|
||||
continue
|
||||
# Get file modification
|
||||
modlst = []
|
||||
Utils.wait_for(lambda: not self.active or self.getModified(modlst),
|
||||
self.sleeptime)
|
||||
if not self.active: # pragma: no cover - timing
|
||||
break
|
||||
for filename in modlst:
|
||||
self.getFailures(filename)
|
||||
|
||||
self.ticks += 1
|
||||
if self.ticks % 10 == 0:
|
||||
self.performSvc()
|
||||
except Exception as e: # pragma: no cover
|
||||
if not self.active: # if not active - error by stop...
|
||||
break
|
||||
logSys.error("Caught unhandled exception in main cycle: %r", e,
|
||||
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
# incr common error counter:
|
||||
self.commonError("unhandled", e)
|
||||
logSys.debug("[%s] filter terminated", self.jailName)
|
||||
return True
|
||||
|
||||
##
|
||||
# Checks if the log file has been modified.
|
||||
#
|
||||
# Checks if the log file has been modified using os.stat().
|
||||
# @return True if log file has been modified
|
||||
|
||||
def isModified(self, filename):
|
||||
try:
|
||||
logStats = os.stat(filename)
|
||||
stats = logStats.st_mtime, logStats.st_ino, logStats.st_size
|
||||
pstats = self.__prevStats.get(filename, (0,))
|
||||
if logSys.getEffectiveLevel() <= 4:
|
||||
# we do not want to waste time on strftime etc if not necessary
|
||||
dt = logStats.st_mtime - pstats[0]
|
||||
logSys.log(4, "Checking %s for being modified. Previous/current stats: %s / %s. dt: %s",
|
||||
filename, pstats, stats, dt)
|
||||
# os.system("stat %s | grep Modify" % filename)
|
||||
self.__file404Cnt[filename] = 0
|
||||
if pstats == stats:
|
||||
return False
|
||||
logSys.debug("%s has been modified", filename)
|
||||
self.__prevStats[filename] = stats
|
||||
return True
|
||||
except Exception as e:
|
||||
# still alive (may be deleted because multi-threaded):
|
||||
if not self.getLog(filename) or self.__prevStats.get(filename) is None:
|
||||
logSys.warning("Log %r seems to be down: %s", filename, e)
|
||||
return False
|
||||
# log error:
|
||||
if self.__file404Cnt[filename] < 2:
|
||||
if e.errno == 2:
|
||||
logSys.debug("Log absence detected (possibly rotation) for %s, reason: %s",
|
||||
filename, e)
|
||||
else: # pragma: no cover
|
||||
logSys.error("Unable to get stat on %s because of: %s",
|
||||
filename, e,
|
||||
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
# increase file and common error counters:
|
||||
self.__file404Cnt[filename] += 1
|
||||
self.commonError()
|
||||
if self.__file404Cnt[filename] > 50:
|
||||
logSys.warning("Too many errors. Remove file %r from monitoring process", filename)
|
||||
self.__file404Cnt[filename] = 0
|
||||
self.delLogPath(filename)
|
||||
return False
|
||||
|
||||
def getPendingPaths(self):
|
||||
return list(self.__file404Cnt.keys())
|
||||
401
fail2ban-master/fail2ban/server/filterpyinotify.py
Normal file
401
fail2ban-master/fail2ban/server/filterpyinotify.py
Normal file
@@ -0,0 +1,401 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Original author: Cyril Jaquier
|
||||
|
||||
__author__ = "Cyril Jaquier, Lee Clemens, Yaroslav Halchenko"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier, 2011-2012 Lee Clemens, 2012 Yaroslav Halchenko"
|
||||
__license__ = "GPL"
|
||||
|
||||
import logging
|
||||
import os
|
||||
from os.path import dirname, sep as pathsep
|
||||
|
||||
from .failmanager import FailManagerEmpty
|
||||
from .filter import FileFilter
|
||||
from .mytime import MyTime, time
|
||||
from .utils import Utils
|
||||
from ..helpers import getLogger
|
||||
|
||||
# pyinotify may have dependency to asyncore, so import it after helper to ensure
|
||||
# we've a path to compat folder:
|
||||
import pyinotify
|
||||
|
||||
# Verify that pyinotify is functional on this system
|
||||
# Even though imports -- might be dysfunctional, e.g. as on kfreebsd
|
||||
try:
|
||||
manager = pyinotify.WatchManager()
|
||||
del manager
|
||||
except Exception as e: # pragma: no cover
|
||||
raise ImportError("Pyinotify is probably not functional on this system: %s"
|
||||
% str(e))
|
||||
|
||||
# Gets the instance of the logger.
|
||||
logSys = getLogger(__name__)
|
||||
|
||||
# Override pyinotify default logger/init-handler:
|
||||
def _pyinotify_logger_init(): # pragma: no cover
|
||||
return logSys
|
||||
pyinotify._logger_init = _pyinotify_logger_init
|
||||
pyinotify.log = logSys
|
||||
|
||||
##
|
||||
# Log reader class.
|
||||
#
|
||||
# This class reads a log file and detects login failures or anything else
|
||||
# that matches a given regular expression. This class is instantiated by
|
||||
# a Jail object.
|
||||
|
||||
class FilterPyinotify(FileFilter):
|
||||
##
|
||||
# Constructor.
|
||||
#
|
||||
# Initialize the filter object with default values.
|
||||
# @param jail the jail object
|
||||
|
||||
def __init__(self, jail):
|
||||
FileFilter.__init__(self, jail)
|
||||
# Pyinotify watch manager
|
||||
self.__monitor = pyinotify.WatchManager()
|
||||
self.__notifier = None
|
||||
self.__watchFiles = dict()
|
||||
self.__watchDirs = dict()
|
||||
self.__pending = dict()
|
||||
self.__pendingChkTime = 0
|
||||
self.__pendingMinTime = 60
|
||||
logSys.debug("Created FilterPyinotify")
|
||||
|
||||
def callback(self, event, origin=''):
|
||||
logSys.log(4, "[%s] %sCallback for Event: %s", self.jailName, origin, event)
|
||||
path = event.pathname
|
||||
# check watching of this path:
|
||||
isWF = False
|
||||
isWD = path in self.__watchDirs
|
||||
if not isWD and path in self.__watchFiles:
|
||||
isWF = True
|
||||
assumeNoDir = False
|
||||
if event.mask & ( pyinotify.IN_CREATE | pyinotify.IN_MOVED_TO ):
|
||||
# skip directories altogether
|
||||
if event.mask & pyinotify.IN_ISDIR:
|
||||
logSys.debug("Ignoring creation of directory %s", path)
|
||||
return
|
||||
# check if that is a file we care about
|
||||
if not isWF:
|
||||
logSys.debug("Ignoring creation of %s we do not monitor", path)
|
||||
return
|
||||
self._refreshWatcher(path)
|
||||
elif event.mask & (pyinotify.IN_IGNORED | pyinotify.IN_MOVE_SELF | pyinotify.IN_DELETE_SELF):
|
||||
assumeNoDir = event.mask & (pyinotify.IN_MOVE_SELF | pyinotify.IN_DELETE_SELF)
|
||||
# fix pyinotify behavior with '-unknown-path' (if target not watched also):
|
||||
if (assumeNoDir and
|
||||
path.endswith('-unknown-path') and not isWF and not isWD
|
||||
):
|
||||
path = path[:-len('-unknown-path')]
|
||||
isWD = path in self.__watchDirs
|
||||
# watch was removed for some reasons (log-rotate?):
|
||||
if isWD and (assumeNoDir or not os.path.isdir(path)):
|
||||
self._addPending(path, event, isDir=True)
|
||||
elif not isWF: # pragma: no cover (assume too sporadic)
|
||||
for logpath in self.__watchDirs:
|
||||
if logpath.startswith(path + pathsep) and (assumeNoDir or not os.path.isdir(logpath)):
|
||||
self._addPending(logpath, event, isDir=True)
|
||||
if isWF and not os.path.isfile(path):
|
||||
self._addPending(path, event)
|
||||
return
|
||||
# do nothing if idle:
|
||||
if self.idle: # pragma: no cover (too sporadic to get idle in callback)
|
||||
return
|
||||
# be sure we process a file:
|
||||
if not isWF:
|
||||
logSys.debug("Ignoring event (%s) of %s we do not monitor", event.maskname, path)
|
||||
return
|
||||
self._process_file(path)
|
||||
|
||||
def _process_file(self, path):
|
||||
"""Process a given file
|
||||
|
||||
TODO -- RF:
|
||||
this is a common logic and must be shared/provided by FileFilter
|
||||
"""
|
||||
if not self.idle:
|
||||
self.getFailures(path)
|
||||
|
||||
def _addPending(self, path, reason, isDir=False):
|
||||
if path not in self.__pending:
|
||||
self.__pending[path] = [Utils.DEFAULT_SLEEP_INTERVAL, isDir];
|
||||
self.__pendingMinTime = 0
|
||||
if isinstance(reason, pyinotify.Event):
|
||||
reason = [reason.maskname, reason.pathname]
|
||||
logSys.log(logging.MSG, "Log absence detected (possibly rotation) for %s, reason: %s of %s",
|
||||
path, *reason)
|
||||
|
||||
def _delPending(self, path):
|
||||
try:
|
||||
del self.__pending[path]
|
||||
except KeyError: pass
|
||||
|
||||
def getPendingPaths(self):
|
||||
return list(self.__pending.keys())
|
||||
|
||||
def _checkPending(self):
|
||||
if not self.__pending:
|
||||
return
|
||||
ntm = time.time()
|
||||
if ntm < self.__pendingChkTime + self.__pendingMinTime:
|
||||
return
|
||||
found = {}
|
||||
minTime = 60
|
||||
for path, (retardTM, isDir) in list(self.__pending.items()):
|
||||
if ntm - self.__pendingChkTime < retardTM:
|
||||
if minTime > retardTM: minTime = retardTM
|
||||
continue
|
||||
chkpath = os.path.isdir if isDir else os.path.isfile
|
||||
if not chkpath(path): # not found - prolong for next time
|
||||
if retardTM < 60: retardTM *= 2
|
||||
if minTime > retardTM: minTime = retardTM
|
||||
try:
|
||||
self.__pending[path][0] = retardTM
|
||||
except KeyError: pass
|
||||
continue
|
||||
logSys.log(logging.MSG, "Log presence detected for %s %s",
|
||||
"directory" if isDir else "file", path)
|
||||
found[path] = isDir
|
||||
self.__pendingChkTime = time.time()
|
||||
self.__pendingMinTime = minTime
|
||||
# process now because we've missed it in monitoring:
|
||||
for path, isDir in found.items():
|
||||
self._delPending(path)
|
||||
# refresh monitoring of this:
|
||||
if isDir is not None:
|
||||
self._refreshWatcher(path, isDir=isDir)
|
||||
if isDir:
|
||||
# check all files belong to this dir:
|
||||
for logpath in list(self.__watchFiles):
|
||||
if logpath.startswith(path + pathsep):
|
||||
# if still no file - add to pending, otherwise refresh and process:
|
||||
if not os.path.isfile(logpath):
|
||||
self._addPending(logpath, ('FROM_PARDIR', path))
|
||||
else:
|
||||
self._refreshWatcher(logpath)
|
||||
self._process_file(logpath)
|
||||
else:
|
||||
# process (possibly no old events for it from watcher):
|
||||
self._process_file(path)
|
||||
|
||||
def _refreshWatcher(self, oldPath, newPath=None, isDir=False):
|
||||
if not newPath: newPath = oldPath
|
||||
# we need to substitute the watcher with a new one, so first
|
||||
# remove old one and then place a new one
|
||||
if not isDir:
|
||||
self._delFileWatcher(oldPath)
|
||||
self._addFileWatcher(newPath)
|
||||
else:
|
||||
self._delDirWatcher(oldPath)
|
||||
self._addDirWatcher(newPath)
|
||||
|
||||
def _addFileWatcher(self, path):
|
||||
# we need to watch also the directory for IN_CREATE
|
||||
self._addDirWatcher(dirname(path))
|
||||
# add file watcher:
|
||||
wd = self.__monitor.add_watch(path, pyinotify.IN_MODIFY)
|
||||
self.__watchFiles.update(wd)
|
||||
logSys.debug("Added file watcher for %s", path)
|
||||
|
||||
def _delWatch(self, wdInt):
|
||||
m = self.__monitor
|
||||
try:
|
||||
if m.get_path(wdInt) is not None:
|
||||
wd = m.rm_watch(wdInt, quiet=False)
|
||||
return True
|
||||
except pyinotify.WatchManagerError as e:
|
||||
if m.get_path(wdInt) is not None and not str(e).endswith("(EINVAL)"): # prama: no cover
|
||||
logSys.debug("Remove watch causes: %s", e)
|
||||
raise e
|
||||
return False
|
||||
|
||||
def _delFileWatcher(self, path):
|
||||
try:
|
||||
wdInt = self.__watchFiles.pop(path)
|
||||
if not self._delWatch(wdInt):
|
||||
logSys.debug("Non-existing file watcher %r for file %s", wdInt, path)
|
||||
logSys.debug("Removed file watcher for %s", path)
|
||||
return True
|
||||
except KeyError: # pragma: no cover
|
||||
pass
|
||||
return False
|
||||
|
||||
def _addDirWatcher(self, path_dir):
|
||||
# Add watch for the directory:
|
||||
if path_dir not in self.__watchDirs:
|
||||
self.__watchDirs.update(
|
||||
self.__monitor.add_watch(path_dir, pyinotify.IN_CREATE |
|
||||
pyinotify.IN_MOVED_TO | pyinotify.IN_MOVE_SELF |
|
||||
pyinotify.IN_DELETE_SELF | pyinotify.IN_ISDIR))
|
||||
logSys.debug("Added monitor for the parent directory %s", path_dir)
|
||||
|
||||
def _delDirWatcher(self, path_dir):
|
||||
# Remove watches for the directory:
|
||||
try:
|
||||
wdInt = self.__watchDirs.pop(path_dir)
|
||||
if not self._delWatch(wdInt): # pragma: no cover
|
||||
logSys.debug("Non-existing file watcher %r for directory %s", wdInt, path_dir)
|
||||
logSys.debug("Removed monitor for the parent directory %s", path_dir)
|
||||
except KeyError: # pragma: no cover
|
||||
pass
|
||||
|
||||
##
|
||||
# Add a log file path
|
||||
#
|
||||
# @param path log file path
|
||||
|
||||
def _addLogPath(self, path):
|
||||
self._addFileWatcher(path)
|
||||
# notify (wake up if in waiting):
|
||||
if self.active:
|
||||
self.__pendingMinTime = 0
|
||||
# retard until filter gets started, isDir=None signals special case: process file only (don't need to refresh monitor):
|
||||
self._addPending(path, ('INITIAL', path), isDir=None)
|
||||
|
||||
##
|
||||
# Delete a log path
|
||||
#
|
||||
# @param path the log file to delete
|
||||
|
||||
def _delLogPath(self, path):
|
||||
self._delPending(path)
|
||||
if not self._delFileWatcher(path): # pragma: no cover
|
||||
logSys.error("Failed to remove watch on path: %s", path)
|
||||
|
||||
path_dir = dirname(path)
|
||||
for k in list(self.__watchFiles):
|
||||
if k.startswith(path_dir + pathsep):
|
||||
path_dir = None
|
||||
break
|
||||
if path_dir:
|
||||
# Remove watches for the directory
|
||||
# since there is no other monitored file under this directory
|
||||
self._delPending(path_dir)
|
||||
self._delDirWatcher(path_dir)
|
||||
|
||||
# pyinotify.ProcessEvent default handler:
|
||||
def __process_default(self, event):
|
||||
try:
|
||||
self.callback(event, origin='Default ')
|
||||
except Exception as e: # pragma: no cover
|
||||
logSys.error("Error in FilterPyinotify callback: %s",
|
||||
e, exc_info=logSys.getEffectiveLevel() <= logging.DEBUG)
|
||||
# incr common error counter:
|
||||
self.commonError()
|
||||
self.ticks += 1
|
||||
|
||||
@property
|
||||
def __notify_maxtout(self):
|
||||
# timeout for pyinotify must be set in milliseconds (fail2ban time values are
|
||||
# floats contain seconds), max 0.5 sec (additionally regards pending check time)
|
||||
return min(self.sleeptime, 0.5, self.__pendingMinTime) * 1000
|
||||
|
||||
##
|
||||
# Main loop.
|
||||
#
|
||||
# Since all detection is offloaded to pyinotifier -- no manual
|
||||
# loop is necessary
|
||||
|
||||
def run(self):
|
||||
prcevent = pyinotify.ProcessEvent()
|
||||
prcevent.process_default = self.__process_default
|
||||
self.__notifier = pyinotify.Notifier(self.__monitor,
|
||||
prcevent, timeout=self.__notify_maxtout)
|
||||
logSys.debug("[%s] filter started (pyinotifier)", self.jailName)
|
||||
while self.active:
|
||||
try:
|
||||
|
||||
# slow check events while idle:
|
||||
if self.idle:
|
||||
if Utils.wait_for(lambda: not self.active or not self.idle,
|
||||
min(self.sleeptime * 10, self.__pendingMinTime),
|
||||
min(self.sleeptime, self.__pendingMinTime)
|
||||
):
|
||||
if not self.active: break
|
||||
|
||||
# default pyinotify handling using Notifier:
|
||||
self.__notifier.process_events()
|
||||
|
||||
# wait for events / timeout:
|
||||
def __check_events():
|
||||
return (
|
||||
not self.active
|
||||
or bool(self.__notifier.check_events(timeout=self.__notify_maxtout))
|
||||
or (self.__pendingMinTime and self.__pending)
|
||||
)
|
||||
wres = Utils.wait_for(__check_events, min(self.sleeptime, self.__pendingMinTime))
|
||||
if wres:
|
||||
if not self.active: break
|
||||
if not isinstance(wres, dict):
|
||||
self.__notifier.read_events()
|
||||
|
||||
self.ticks += 1
|
||||
|
||||
# check pending files/dirs (logrotate ready):
|
||||
if self.idle:
|
||||
continue
|
||||
self._checkPending()
|
||||
if self.ticks % 10 == 0:
|
||||
self.performSvc()
|
||||
|
||||
except Exception as e: # pragma: no cover
|
||||
if not self.active: # if not active - error by stop...
|
||||
break
|
||||
logSys.error("Caught unhandled exception in main cycle: %r", e,
|
||||
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
# incr common error counter:
|
||||
self.commonError("unhandled", e)
|
||||
|
||||
logSys.debug("[%s] filter exited (pyinotifier)", self.jailName)
|
||||
self.done()
|
||||
|
||||
return True
|
||||
|
||||
##
|
||||
# Clean-up: then stop the 'Notifier'
|
||||
|
||||
def afterStop(self):
|
||||
try:
|
||||
if self.__notifier: # stop the notifier
|
||||
self.__notifier.stop()
|
||||
self.__notifier = None
|
||||
except AttributeError: # pragma: no cover
|
||||
if self.__notifier: raise
|
||||
|
||||
##
|
||||
# Wait for exit with cleanup.
|
||||
|
||||
def join(self):
|
||||
self.join = lambda *args: 0
|
||||
self.__cleanup()
|
||||
super(FilterPyinotify, self).join()
|
||||
logSys.debug("[%s] filter terminated (pyinotifier)", self.jailName)
|
||||
|
||||
##
|
||||
# Deallocates the resources used by pyinotify.
|
||||
|
||||
def __cleanup(self):
|
||||
if self.__notifier:
|
||||
if Utils.wait_for(lambda: not self.__notifier, self.sleeptime * 10):
|
||||
self.__notifier = None
|
||||
self.__monitor = None
|
||||
565
fail2ban-master/fail2ban/server/filtersystemd.py
Normal file
565
fail2ban-master/fail2ban/server/filtersystemd.py
Normal file
@@ -0,0 +1,565 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
|
||||
__author__ = "Steven Hiscocks"
|
||||
__copyright__ = "Copyright (c) 2013 Steven Hiscocks"
|
||||
__license__ = "GPL"
|
||||
|
||||
import os
|
||||
import time
|
||||
|
||||
from glob import glob
|
||||
from systemd import journal
|
||||
|
||||
from .failmanager import FailManagerEmpty
|
||||
from .filter import JournalFilter, Filter
|
||||
from .mytime import MyTime
|
||||
from .utils import Utils
|
||||
from ..helpers import getLogger, logging, splitwords, uni_decode, _as_bool
|
||||
|
||||
# Gets the instance of the logger.
|
||||
logSys = getLogger(__name__)
|
||||
|
||||
|
||||
_systemdPathCache = Utils.Cache()
|
||||
def _getSystemdPath(path):
|
||||
"""Get systemd path using systemd-path command (cached)"""
|
||||
p = _systemdPathCache.get(path)
|
||||
if p: return p
|
||||
p = Utils.executeCmd('systemd-path %s' % path, timeout=10, shell=True, output=True)
|
||||
if p and p[0]:
|
||||
p = str(p[1].decode('utf-8')).split('\n')[0]
|
||||
_systemdPathCache.set(path, p)
|
||||
return p
|
||||
p = '/var/log' if path == 'system-state-logs' else ('/run/log' if path == 'system-runtime-logs' else None)
|
||||
_systemdPathCache.set(path, p)
|
||||
return p
|
||||
|
||||
def _globJournalFiles(flags=None, path=None):
|
||||
"""Get journal files without rotated files."""
|
||||
filesSet = set()
|
||||
_join = os.path.join
|
||||
def _addJF(filesSet, p, flags):
|
||||
"""add journal files to set corresponding path and flags (without rotated *@*.journal)"""
|
||||
# system journal:
|
||||
if (flags is None) or (flags & journal.SYSTEM_ONLY):
|
||||
filesSet |= set(glob(_join(p,'system.journal'))) - set(glob(_join(p,'system*@*.journal')))
|
||||
# current user-journal:
|
||||
if (flags is not None) and (flags & journal.CURRENT_USER):
|
||||
uid = os.geteuid()
|
||||
filesSet |= set(glob(_join(p,('user-%s.journal' % uid)))) - set(glob(_join(p,('user-%s@*.journal' % uid))))
|
||||
# all local journals:
|
||||
if (flags is None) or not (flags & (journal.SYSTEM_ONLY|journal.CURRENT_USER)):
|
||||
filesSet |= set(glob(_join(p,'*.journal'))) - set(glob(_join(p,'*@*.journal')))
|
||||
if path:
|
||||
# journals relative given path only:
|
||||
_addJF(filesSet, path, flags)
|
||||
else:
|
||||
# persistent journals corresponding flags:
|
||||
if (flags is None) or not (flags & journal.RUNTIME_ONLY):
|
||||
_addJF(filesSet, _join(_getSystemdPath('system-state-logs'), 'journal/*'), flags)
|
||||
# runtime journals corresponding flags:
|
||||
_addJF(filesSet, _join(_getSystemdPath('system-runtime-logs'), 'journal/*'), flags)
|
||||
# if not root, filter readable only:
|
||||
if os.geteuid() != 0:
|
||||
filesSet = [f for f in filesSet if os.access(f, os.R_OK)]
|
||||
return filesSet if filesSet else None
|
||||
|
||||
|
||||
##
|
||||
# Journal reader class.
|
||||
#
|
||||
# This class reads from systemd journal and detects login failures or anything
|
||||
# else that matches a given regular expression. This class is instantiated by
|
||||
# a Jail object.
|
||||
|
||||
class FilterSystemd(JournalFilter): # pragma: systemd no cover
|
||||
##
|
||||
# Constructor.
|
||||
#
|
||||
# Initialize the filter object with default values.
|
||||
# @param jail the jail object
|
||||
|
||||
def __init__(self, jail, **kwargs):
|
||||
self.__jrnlargs = FilterSystemd._getJournalArgs(kwargs)
|
||||
JournalFilter.__init__(self, jail, **kwargs)
|
||||
self.__modified = 0
|
||||
# Initialise systemd-journal connection
|
||||
self.__journal = journal.Reader(**self.__jrnlargs)
|
||||
self.__matches = []
|
||||
self.__bypassInvalidateMsg = 0
|
||||
self.setDatePattern(None)
|
||||
logSys.debug("Created FilterSystemd")
|
||||
|
||||
@staticmethod
|
||||
def _getJournalArgs(kwargs):
|
||||
args = {'converters':{'__CURSOR': lambda x: x}}
|
||||
try:
|
||||
args['path'] = kwargs.pop('journalpath')
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
try:
|
||||
args['files'] = kwargs.pop('journalfiles')
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
p = args['files']
|
||||
if not isinstance(p, (list, set, tuple)):
|
||||
p = splitwords(p)
|
||||
files = []
|
||||
for p in p:
|
||||
files.extend(glob(p))
|
||||
args['files'] = list(set(files))
|
||||
|
||||
rotated = _as_bool(kwargs.pop('rotated', 0))
|
||||
# Default flags is SYSTEM_ONLY(4) or LOCAL_ONLY(1), depending on rotated parameter.
|
||||
# This could lead to ignore user session files, so together with ignoring rotated
|
||||
# files would prevent "Too many open files" errors on a lot of user sessions (see gh-2392):
|
||||
try:
|
||||
args['flags'] = int(kwargs.pop('journalflags'))
|
||||
except KeyError:
|
||||
# be sure all journal types will be opened if files/path specified (don't set flags):
|
||||
if (not args.get('files') and not args.get('path')):
|
||||
args['flags'] = os.getenv("F2B_SYSTEMD_DEFAULT_FLAGS", None)
|
||||
if args['flags'] is not None:
|
||||
args['flags'] = int(args['flags'])
|
||||
elif rotated:
|
||||
args['flags'] = journal.SYSTEM_ONLY
|
||||
|
||||
try:
|
||||
args['namespace'] = kwargs.pop('namespace')
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
# To avoid monitoring rotated logs, as prevention against "Too many open files",
|
||||
# set the files to system.journal and user-*.journal (without rotated *@*.journal):
|
||||
if not rotated and not args.get('files') and not args.get('namespace'):
|
||||
args['files'] = _globJournalFiles(
|
||||
args.get('flags', journal.LOCAL_ONLY), args.get('path'))
|
||||
if args['files']:
|
||||
args['files'] = list(args['files'])
|
||||
# flags and path cannot be specified simultaneously with files:
|
||||
args['flags'] = None;
|
||||
args['path'] = None;
|
||||
else:
|
||||
args['files'] = None
|
||||
|
||||
return args
|
||||
|
||||
@property
|
||||
def _journalAlive(self):
|
||||
"""Checks journal is online.
|
||||
"""
|
||||
try:
|
||||
# open?
|
||||
if self.__journal.closed: # pragma: no cover
|
||||
return False
|
||||
# has cursor? if it is broken (e. g. no descriptor) - it'd raise this:
|
||||
# OSError: [Errno 99] Cannot assign requested address
|
||||
if self.__journal._get_cursor():
|
||||
return True
|
||||
except OSError: # pragma: no cover
|
||||
pass
|
||||
return False
|
||||
|
||||
def _reopenJournal(self): # pragma: no cover
|
||||
"""Reopen journal (if it becomes offline after rotation)
|
||||
"""
|
||||
if self.__journal.closed:
|
||||
# recreate reader:
|
||||
self.__journal = journal.Reader(**self.__jrnlargs)
|
||||
else:
|
||||
try:
|
||||
# workaround for gh-3929 (no journal descriptor after rotation),
|
||||
# to reopen journal we'd simply invoke inherited init again:
|
||||
self.__journal.close()
|
||||
ja = self.__jrnlargs
|
||||
super(journal.Reader, self.__journal).__init__(
|
||||
ja.get('flags', 0), ja.get('path'), ja.get('files'), ja.get('namespace'))
|
||||
except:
|
||||
# cannot reopen in that way, so simply recreate reader:
|
||||
self.closeJournal()
|
||||
self.__journal = journal.Reader(**self.__jrnlargs)
|
||||
# restore journalmatch specified for the jail:
|
||||
self.resetJournalMatches()
|
||||
# just to avoid "Invalidate signaled" happening again after reopen:
|
||||
self.__bypassInvalidateMsg = MyTime.time() + 1
|
||||
|
||||
##
|
||||
# Add a journal match filters from list structure
|
||||
#
|
||||
# @param matches list structure with journal matches
|
||||
|
||||
def _addJournalMatches(self, matches):
|
||||
if self.__matches:
|
||||
self.__journal.add_disjunction() # Add OR
|
||||
newMatches = []
|
||||
for match in matches:
|
||||
newMatches.append([])
|
||||
for match_element in match:
|
||||
self.__journal.add_match(match_element)
|
||||
newMatches[-1].append(match_element)
|
||||
self.__journal.add_disjunction()
|
||||
self.__matches.extend(newMatches)
|
||||
|
||||
##
|
||||
# Add a journal match filter
|
||||
#
|
||||
# @param match journalctl syntax matches in list structure
|
||||
|
||||
def addJournalMatch(self, match):
|
||||
newMatches = [[]]
|
||||
for match_element in match:
|
||||
if match_element == "+":
|
||||
newMatches.append([])
|
||||
else:
|
||||
newMatches[-1].append(match_element)
|
||||
try:
|
||||
self._addJournalMatches(newMatches)
|
||||
except ValueError:
|
||||
logSys.error(
|
||||
"Error adding journal match for: %r", " ".join(match))
|
||||
self.resetJournalMatches()
|
||||
raise
|
||||
else:
|
||||
logSys.info("[%s] Added journal match for: %r", self.jailName,
|
||||
" ".join(match))
|
||||
##
|
||||
# Reset a journal match filter called on removal or failure
|
||||
#
|
||||
# @return None
|
||||
|
||||
def resetJournalMatches(self):
|
||||
self.__journal.flush_matches()
|
||||
logSys.debug("[%s] Flushed all journal matches", self.jailName)
|
||||
match_copy = self.__matches[:]
|
||||
self.__matches = []
|
||||
try:
|
||||
self._addJournalMatches(match_copy)
|
||||
except ValueError:
|
||||
logSys.error("Error restoring journal matches")
|
||||
raise
|
||||
else:
|
||||
logSys.debug("Journal matches restored")
|
||||
|
||||
##
|
||||
# Delete a journal match filter
|
||||
#
|
||||
# @param match journalctl syntax matches
|
||||
|
||||
def delJournalMatch(self, match=None):
|
||||
# clear all:
|
||||
if match is None:
|
||||
if not self.__matches:
|
||||
return
|
||||
del self.__matches[:]
|
||||
# delete by index:
|
||||
elif match in self.__matches:
|
||||
del self.__matches[self.__matches.index(match)]
|
||||
else:
|
||||
raise ValueError("Match %r not found" % match)
|
||||
self.resetJournalMatches()
|
||||
logSys.info("[%s] Removed journal match for: %r", self.jailName,
|
||||
match if match else '*')
|
||||
|
||||
##
|
||||
# Get current journal match filter
|
||||
#
|
||||
# @return journalctl syntax matches
|
||||
|
||||
def getJournalMatch(self):
|
||||
return self.__matches
|
||||
|
||||
##
|
||||
# Get journal reader
|
||||
#
|
||||
# @return journal reader
|
||||
|
||||
def getJournalReader(self):
|
||||
return self.__journal
|
||||
|
||||
def getJrnEntTime(self, logentry):
|
||||
""" Returns time of entry as tuple (ISO-str, Posix)."""
|
||||
date = logentry.get('_SOURCE_REALTIME_TIMESTAMP')
|
||||
if date is None:
|
||||
date = logentry.get('__REALTIME_TIMESTAMP')
|
||||
return (date.isoformat(), time.mktime(date.timetuple()) + date.microsecond/1.0E6)
|
||||
|
||||
##
|
||||
# Format journal log entry into syslog style
|
||||
#
|
||||
# @param entry systemd journal entry dict
|
||||
# @return format log line
|
||||
|
||||
def formatJournalEntry(self, logentry):
|
||||
# Be sure, all argument of line tuple should have the same type:
|
||||
enc = self.getLogEncoding()
|
||||
logelements = []
|
||||
v = logentry.get('_HOSTNAME')
|
||||
if v:
|
||||
logelements.append(uni_decode(v, enc))
|
||||
v = logentry.get('SYSLOG_IDENTIFIER')
|
||||
if not v:
|
||||
v = logentry.get('_COMM')
|
||||
if v:
|
||||
logelements.append(uni_decode(v, enc))
|
||||
v = logentry.get('SYSLOG_PID')
|
||||
if not v:
|
||||
v = logentry.get('_PID')
|
||||
if v:
|
||||
try: # [integer] (if already numeric):
|
||||
v = "[%i]" % v
|
||||
except TypeError:
|
||||
try: # as [integer] (try to convert to int):
|
||||
v = "[%i]" % int(v, 0)
|
||||
except (TypeError, ValueError): # fallback - [string] as it is
|
||||
v = "[%s]" % v
|
||||
logelements[-1] += v
|
||||
logelements[-1] += ":"
|
||||
if logelements[-1] == "kernel:":
|
||||
monotonic = logentry.get('_SOURCE_MONOTONIC_TIMESTAMP')
|
||||
if monotonic is None:
|
||||
monotonic = logentry.get('__MONOTONIC_TIMESTAMP')[0]
|
||||
logelements.append("[%12.6f]" % monotonic.total_seconds())
|
||||
msg = logentry.get('MESSAGE','')
|
||||
if isinstance(msg, list):
|
||||
logelements.append(" ".join(uni_decode(v, enc) for v in msg))
|
||||
else:
|
||||
logelements.append(uni_decode(msg, enc))
|
||||
|
||||
logline = " ".join(logelements)
|
||||
|
||||
date = self.getJrnEntTime(logentry)
|
||||
logSys.log(5, "[%s] Read systemd journal entry: %s %s", self.jailName,
|
||||
date[0], logline)
|
||||
## use the same type for 1st argument:
|
||||
return ((logline[:0], date[0] + ' ', logline.replace('\n', '\\n')), date[1])
|
||||
|
||||
def seekToTime(self, date):
|
||||
if isinstance(date, int):
|
||||
date = float(date)
|
||||
self.__journal.seek_realtime(date)
|
||||
|
||||
def inOperationMode(self):
|
||||
self.inOperation = True
|
||||
logSys.info("[%s] Jail is in operation now (process new journal entries)", self.jailName)
|
||||
# just to avoid "Invalidate signaled" happening often at start:
|
||||
self.__bypassInvalidateMsg = MyTime.time() + 1
|
||||
|
||||
##
|
||||
# Main loop.
|
||||
#
|
||||
# Peridocily check for new journal entries matching the filter and
|
||||
# handover to FailManager
|
||||
|
||||
def run(self):
|
||||
|
||||
if not self.getJournalMatch():
|
||||
logSys.notice(
|
||||
"[%s] Jail started without 'journalmatch' set. "
|
||||
"Jail regexs will be checked against all journal entries, "
|
||||
"which is not advised for performance reasons.", self.jailName)
|
||||
|
||||
# Save current cursor position (to recognize in operation mode):
|
||||
logentry = None
|
||||
try:
|
||||
self.__journal.seek_tail()
|
||||
logentry = self.__journal.get_previous()
|
||||
if logentry:
|
||||
self.__journal.get_next()
|
||||
except OSError:
|
||||
logentry = None # Reading failure, so safe to ignore
|
||||
if logentry:
|
||||
# Try to obtain the last known time (position of journal)
|
||||
startTime = 0
|
||||
if self.jail.database is not None:
|
||||
startTime = self.jail.database.getJournalPos(self.jail, 'systemd-journal') or 0
|
||||
# Seek to max(last_known_time, now - findtime) in journal
|
||||
startTime = max( startTime, MyTime.time() - int(self.getFindTime()) )
|
||||
self.seekToTime(startTime)
|
||||
# Not in operation while we'll read old messages ...
|
||||
self.inOperation = False
|
||||
# Save current time in order to check time to switch "in operation" mode
|
||||
startTime = (1, MyTime.time(), logentry.get('__CURSOR'))
|
||||
else:
|
||||
# empty journal or no entries for current filter:
|
||||
self.inOperationMode()
|
||||
# seek_tail() seems to have a bug by no entries (could bypass some entries hereafter), so seek to now instead:
|
||||
startTime = MyTime.time()
|
||||
self.seekToTime(startTime)
|
||||
# for possible future switches of in-operation mode:
|
||||
startTime = (0, startTime)
|
||||
|
||||
# Move back one entry to ensure do not end up in dead space
|
||||
# if start time beyond end of journal
|
||||
try:
|
||||
self.__journal.get_previous()
|
||||
except OSError:
|
||||
pass # Reading failure, so safe to ignore
|
||||
|
||||
wcode = journal.NOP
|
||||
line = None
|
||||
while self.active:
|
||||
# wait for records (or for timeout in sleeptime seconds):
|
||||
try:
|
||||
if self.idle:
|
||||
# because journal.wait will returns immediately if we have records in journal,
|
||||
# just wait a little bit here for not idle, to prevent hi-load:
|
||||
if not Utils.wait_for(lambda: not self.active or not self.idle,
|
||||
self.sleeptime * 10, self.sleeptime
|
||||
):
|
||||
self.ticks += 1
|
||||
continue
|
||||
## wait for entries using journal.wait:
|
||||
if wcode == journal.NOP and self.inOperation:
|
||||
## todo: find better method as wait_for to break (e.g. notify) journal.wait(self.sleeptime),
|
||||
## don't use `journal.close()` for it, because in some python/systemd implementation it may
|
||||
## cause abnormal program termination (e. g. segfault)
|
||||
##
|
||||
## wait for entries without sleep in intervals, because "sleeping" in journal.wait,
|
||||
## journal.NOP is 0, so we can wait for non zero (APPEND or INVALIDATE):
|
||||
wcode = Utils.wait_for(lambda: not self.active and journal.APPEND or \
|
||||
self.__journal.wait(Utils.DEFAULT_SLEEP_INTERVAL),
|
||||
self.sleeptime, 0.00001)
|
||||
## if invalidate (due to rotation, vacuuming or journal files added/removed etc):
|
||||
if self.active and wcode == journal.INVALIDATE:
|
||||
if self.ticks:
|
||||
if not self.__bypassInvalidateMsg or MyTime.time() > self.__bypassInvalidateMsg:
|
||||
logSys.log(logging.MSG, "[%s] Invalidate signaled, take a little break (rotation ends)", self.jailName)
|
||||
time.sleep(self.sleeptime * 0.25)
|
||||
self.__bypassInvalidateMsg = 0
|
||||
Utils.wait_for(lambda: not self.active or \
|
||||
self.__journal.wait(Utils.DEFAULT_SLEEP_INTERVAL) != journal.INVALIDATE,
|
||||
self.sleeptime * 3, 0.00001)
|
||||
if self.ticks:
|
||||
# move back and forth to ensure do not end up in dead space by rotation or vacuuming,
|
||||
# if position beyond end of journal (gh-3396)
|
||||
try:
|
||||
if self.__journal.get_previous(): self.__journal.get_next()
|
||||
except OSError:
|
||||
pass
|
||||
# if it is not alive - reopen:
|
||||
if not self._journalAlive:
|
||||
logSys.log(logging.MSG, "[%s] Journal reader seems to be offline, reopen journal", self.jailName)
|
||||
self._reopenJournal()
|
||||
wcode = journal.NOP
|
||||
self.__modified = 0
|
||||
while self.active:
|
||||
logentry = None
|
||||
try:
|
||||
logentry = self.__journal.get_next()
|
||||
except OSError as e:
|
||||
logSys.error("Error reading line from systemd journal: %s",
|
||||
e, exc_info=logSys.getEffectiveLevel() <= logging.DEBUG)
|
||||
self.ticks += 1
|
||||
if logentry:
|
||||
line, tm = self.formatJournalEntry(logentry)
|
||||
# switch "in operation" mode if we'll find start entry (+ some delta):
|
||||
if not self.inOperation:
|
||||
if tm >= MyTime.time() - 1: # reached now (approximated):
|
||||
self.inOperationMode()
|
||||
elif startTime[0] == 1:
|
||||
# if it reached start entry (or get read time larger than start time)
|
||||
if logentry.get('__CURSOR') == startTime[2] or tm > startTime[1]:
|
||||
# give the filter same time it needed to reach the start entry:
|
||||
startTime = (0, MyTime.time()*2 - startTime[1])
|
||||
elif tm > startTime[1]: # reached start time (approximated):
|
||||
self.inOperationMode()
|
||||
# process line
|
||||
self.processLineAndAdd(line, tm)
|
||||
self.__modified += 1
|
||||
if self.__modified >= 100: # todo: should be configurable
|
||||
wcode = journal.APPEND; # don't need wait - there are still unprocessed entries
|
||||
break
|
||||
else:
|
||||
# "in operation" mode since we don't have messages anymore (reached end of journal):
|
||||
if not self.inOperation:
|
||||
self.inOperationMode()
|
||||
wcode = journal.NOP; # enter wait - no more entries to process
|
||||
break
|
||||
self.__modified = 0
|
||||
if self.ticks % 10 == 0:
|
||||
self.performSvc()
|
||||
# update position in log (time and iso string):
|
||||
if self.jail.database:
|
||||
if line:
|
||||
self._pendDBUpdates['systemd-journal'] = (tm, line[1])
|
||||
line = None
|
||||
if self._pendDBUpdates and (
|
||||
self.ticks % 100 == 0
|
||||
or MyTime.time() >= self._nextUpdateTM
|
||||
or not self.active
|
||||
):
|
||||
self._updateDBPending()
|
||||
self._nextUpdateTM = MyTime.time() + Utils.DEFAULT_SLEEP_TIME * 5
|
||||
except Exception as e: # pragma: no cover
|
||||
if not self.active: # if not active - error by stop...
|
||||
break
|
||||
wcode = journal.NOP
|
||||
logSys.error("Caught unhandled exception in main cycle: %r", e,
|
||||
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
# incr common error counter:
|
||||
self.commonError("unhandled", e)
|
||||
|
||||
logSys.debug("[%s] filter terminated", self.jailName)
|
||||
|
||||
# call afterStop once (close journal, etc):
|
||||
self.done()
|
||||
|
||||
logSys.debug("[%s] filter exited (systemd)", self.jailName)
|
||||
return True
|
||||
|
||||
def closeJournal(self):
|
||||
try:
|
||||
jnl, self.__journal = self.__journal, None
|
||||
if jnl:
|
||||
jnl.close()
|
||||
except Exception as e: # pragma: no cover
|
||||
logSys.error("Close journal failed: %r", e,
|
||||
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
|
||||
|
||||
def status(self, flavor="basic"):
|
||||
ret = super(FilterSystemd, self).status(flavor=flavor)
|
||||
if flavor == "stats":
|
||||
return ret
|
||||
ret.append(("Journal matches",
|
||||
[" + ".join(" ".join(match) for match in self.__matches)]))
|
||||
return ret
|
||||
|
||||
def _updateDBPending(self):
|
||||
"""Apply pending updates (journal position) to database.
|
||||
"""
|
||||
db = self.jail.database
|
||||
while True:
|
||||
try:
|
||||
log, args = self._pendDBUpdates.popitem()
|
||||
except KeyError:
|
||||
break
|
||||
db.updateJournal(self.jail, log, *args)
|
||||
|
||||
def afterStop(self):
|
||||
"""Cleanup"""
|
||||
# close journal:
|
||||
self.closeJournal()
|
||||
# ensure positions of pending logs are up-to-date:
|
||||
if self._pendDBUpdates and self.jail.database:
|
||||
self._updateDBPending()
|
||||
919
fail2ban-master/fail2ban/server/ipdns.py
Normal file
919
fail2ban-master/fail2ban/server/ipdns.py
Normal file
@@ -0,0 +1,919 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
__author__ = "Fail2Ban Developers, Alexander Koeppe, Serg G. Brester, Yaroslav Halchenko"
|
||||
__copyright__ = "Copyright (c) 2004-2016 Fail2ban Developers"
|
||||
__license__ = "GPL"
|
||||
|
||||
import socket
|
||||
import struct
|
||||
import os
|
||||
import re
|
||||
|
||||
from .utils import Utils
|
||||
from ..helpers import getLogger, MyTime, splitwords
|
||||
|
||||
# Gets the instance of the logger.
|
||||
logSys = getLogger(__name__)
|
||||
|
||||
|
||||
##
|
||||
# Helper functions
|
||||
#
|
||||
#
|
||||
def asip(ip):
|
||||
"""A little helper to guarantee ip being an IPAddr instance"""
|
||||
if isinstance(ip, IPAddr):
|
||||
return ip
|
||||
return IPAddr(ip)
|
||||
|
||||
def getfqdn(name=''):
|
||||
"""Get fully-qualified hostname of given host, thereby resolve of an external
|
||||
IPs and name will be preferred before the local domain (or a loopback), see gh-2438
|
||||
"""
|
||||
try:
|
||||
name = name or socket.gethostname()
|
||||
names = (
|
||||
ai[3] for ai in socket.getaddrinfo(
|
||||
name, None, 0, socket.SOCK_DGRAM, 0, socket.AI_CANONNAME
|
||||
) if ai[3]
|
||||
)
|
||||
if names:
|
||||
# first try to find a fqdn starting with the host name like www.domain.tld for www:
|
||||
pref = name+'.'
|
||||
first = None
|
||||
for ai in names:
|
||||
if ai.startswith(pref):
|
||||
return ai
|
||||
if not first: first = ai
|
||||
# not found - simply use first known fqdn:
|
||||
return first
|
||||
except socket.error:
|
||||
pass
|
||||
# fallback to python's own getfqdn routine:
|
||||
return socket.getfqdn(name)
|
||||
|
||||
|
||||
##
|
||||
# Utils class for DNS handling.
|
||||
#
|
||||
# This class contains only static methods used to handle DNS
|
||||
#
|
||||
class DNSUtils:
|
||||
|
||||
# todo: make configurable the expired time and max count of cache entries:
|
||||
CACHE_nameToIp = Utils.Cache(maxCount=1000, maxTime=5*60)
|
||||
CACHE_ipToName = Utils.Cache(maxCount=1000, maxTime=5*60)
|
||||
# static cache used to hold sets read from files:
|
||||
CACHE_fileToIp = Utils.Cache(maxCount=100, maxTime=5*60)
|
||||
|
||||
@staticmethod
|
||||
def dnsToIp(dns):
|
||||
""" Convert a DNS into an IP address using the Python socket module.
|
||||
Thanks to Kevin Drapel.
|
||||
"""
|
||||
# cache, also prevent long wait during retrieving of ip for wrong dns or lazy dns-system:
|
||||
ips = DNSUtils.CACHE_nameToIp.get(dns)
|
||||
if ips is not None:
|
||||
return ips
|
||||
# retrieve ips
|
||||
ips = set()
|
||||
saveerr = None
|
||||
for fam in ((socket.AF_INET,socket.AF_INET6) if DNSUtils.IPv6IsAllowed() else (socket.AF_INET,)):
|
||||
try:
|
||||
for result in socket.getaddrinfo(dns, None, fam, 0, socket.IPPROTO_TCP):
|
||||
# if getaddrinfo returns something unexpected:
|
||||
if len(result) < 4 or not len(result[4]): continue
|
||||
# get ip from `(2, 1, 6, '', ('127.0.0.1', 0))`,be sure we've an ip-string
|
||||
# (some python-versions resp. host configurations causes returning of integer there):
|
||||
ip = IPAddr(str(result[4][0]), IPAddr._AF2FAM(fam))
|
||||
if ip.isValid:
|
||||
ips.add(ip)
|
||||
except Exception as e:
|
||||
saveerr = e
|
||||
if not ips and saveerr:
|
||||
logSys.warning("Unable to find a corresponding IP address for %s: %s", dns, saveerr)
|
||||
|
||||
DNSUtils.CACHE_nameToIp.set(dns, ips)
|
||||
return ips
|
||||
|
||||
@staticmethod
|
||||
def ipToName(ip):
|
||||
# cache, also prevent long wait during retrieving of name for wrong addresses, lazy dns:
|
||||
v = DNSUtils.CACHE_ipToName.get(ip, ())
|
||||
if v != ():
|
||||
return v
|
||||
# retrieve name
|
||||
try:
|
||||
v = socket.gethostbyaddr(ip)[0]
|
||||
except socket.error as e:
|
||||
logSys.debug("Unable to find a name for the IP %s: %s", ip, e)
|
||||
v = None
|
||||
DNSUtils.CACHE_ipToName.set(ip, v)
|
||||
return v
|
||||
|
||||
@staticmethod
|
||||
def textToIp(text, useDns):
|
||||
""" Return the IP of DNS found in a given text.
|
||||
"""
|
||||
ipList = set()
|
||||
# Search for plain IP
|
||||
plainIP = IPAddr.searchIP(text)
|
||||
if plainIP is not None:
|
||||
ip = IPAddr(plainIP)
|
||||
if ip.isValid:
|
||||
ipList.add(ip)
|
||||
|
||||
# If we are allowed to resolve -- give it a try if nothing was found
|
||||
if useDns in ("yes", "warn") and not ipList:
|
||||
# Try to get IP from possible DNS
|
||||
ip = DNSUtils.dnsToIp(text)
|
||||
ipList.update(ip)
|
||||
if ip and useDns == "warn":
|
||||
logSys.warning("Determined IP using DNS Lookup: %s = %s",
|
||||
text, ipList)
|
||||
|
||||
return ipList
|
||||
|
||||
@staticmethod
|
||||
def getHostname(fqdn=True):
|
||||
"""Get short hostname or fully-qualified hostname of host self"""
|
||||
# try find cached own hostnames (this tuple-key cannot be used elsewhere):
|
||||
key = ('self','hostname', fqdn)
|
||||
name = DNSUtils.CACHE_ipToName.get(key)
|
||||
if name is not None:
|
||||
return name
|
||||
# get it using different ways (hostname, fully-qualified or vice versa):
|
||||
name = ''
|
||||
for hostname in (
|
||||
(getfqdn, socket.gethostname) if fqdn else (socket.gethostname, getfqdn)
|
||||
):
|
||||
try:
|
||||
name = hostname()
|
||||
break
|
||||
except Exception as e: # pragma: no cover
|
||||
logSys.warning("Retrieving own hostnames failed: %s", e)
|
||||
# cache and return :
|
||||
DNSUtils.CACHE_ipToName.set(key, name)
|
||||
return name
|
||||
|
||||
# key find cached own hostnames (this tuple-key cannot be used elsewhere):
|
||||
_getSelfNames_key = ('self','dns')
|
||||
|
||||
@staticmethod
|
||||
def getSelfNames():
|
||||
"""Get own host names of self"""
|
||||
# try find cached own hostnames:
|
||||
names = DNSUtils.CACHE_ipToName.get(DNSUtils._getSelfNames_key)
|
||||
if names is not None:
|
||||
return names
|
||||
# get it using different ways (a set with names of localhost, hostname, fully qualified):
|
||||
names = set([
|
||||
'localhost', DNSUtils.getHostname(False), DNSUtils.getHostname(True)
|
||||
]) - set(['']) # getHostname can return ''
|
||||
# cache and return :
|
||||
DNSUtils.CACHE_ipToName.set(DNSUtils._getSelfNames_key, names)
|
||||
return names
|
||||
|
||||
# key to find cached network interfaces IPs (this tuple-key cannot be used elsewhere):
|
||||
_getNetIntrfIPs_key = ('netintrf','ips')
|
||||
|
||||
@staticmethod
|
||||
def getNetIntrfIPs():
|
||||
"""Get own IP addresses of self"""
|
||||
# to find cached own IPs:
|
||||
ips = DNSUtils.CACHE_nameToIp.get(DNSUtils._getNetIntrfIPs_key)
|
||||
if ips is not None:
|
||||
return ips
|
||||
# try to obtain from network interfaces if possible (implemented for this platform):
|
||||
try:
|
||||
ips = IPAddrSet([a for ni, a in DNSUtils._NetworkInterfacesAddrs()])
|
||||
except:
|
||||
ips = IPAddrSet()
|
||||
# cache and return :
|
||||
DNSUtils.CACHE_nameToIp.set(DNSUtils._getNetIntrfIPs_key, ips)
|
||||
return ips
|
||||
|
||||
# key to find cached own IPs (this tuple-key cannot be used elsewhere):
|
||||
_getSelfIPs_key = ('self','ips')
|
||||
|
||||
@staticmethod
|
||||
def getSelfIPs():
|
||||
"""Get own IP addresses of self"""
|
||||
# to find cached own IPs:
|
||||
ips = DNSUtils.CACHE_nameToIp.get(DNSUtils._getSelfIPs_key)
|
||||
if ips is not None:
|
||||
return ips
|
||||
# firstly try to obtain from network interfaces if possible (implemented for this platform):
|
||||
ips = IPAddrSet(DNSUtils.getNetIntrfIPs())
|
||||
# extend it using different ways (a set with IPs of localhost, hostname, fully qualified):
|
||||
for hostname in DNSUtils.getSelfNames():
|
||||
try:
|
||||
ips |= IPAddrSet(DNSUtils.dnsToIp(hostname))
|
||||
except Exception as e: # pragma: no cover
|
||||
logSys.warning("Retrieving own IPs of %s failed: %s", hostname, e)
|
||||
# cache and return :
|
||||
DNSUtils.CACHE_nameToIp.set(DNSUtils._getSelfIPs_key, ips)
|
||||
return ips
|
||||
|
||||
@staticmethod
|
||||
def getIPsFromFile(fileName, noError=True):
|
||||
"""Get set of IP addresses or subnets from file"""
|
||||
# to find cached IPs:
|
||||
ips = DNSUtils.CACHE_fileToIp.get(fileName)
|
||||
if ips is not None:
|
||||
return ips
|
||||
# try to obtain set from file:
|
||||
ips = FileIPAddrSet(fileName)
|
||||
#ips.load() - load on demand
|
||||
# cache and return :
|
||||
DNSUtils.CACHE_fileToIp.set(fileName, ips)
|
||||
return ips
|
||||
|
||||
_IPv6IsAllowed = None
|
||||
|
||||
@staticmethod
|
||||
def _IPv6IsSupportedBySystem():
|
||||
if not socket.has_ipv6:
|
||||
return False
|
||||
# try to check sysctl net.ipv6.conf.all.disable_ipv6:
|
||||
try:
|
||||
with open('/proc/sys/net/ipv6/conf/all/disable_ipv6', 'rb') as f:
|
||||
# if 1 - disabled, 0 - enabled
|
||||
return not int(f.read())
|
||||
except:
|
||||
pass
|
||||
s = None
|
||||
try:
|
||||
# try to create INET6 socket:
|
||||
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
|
||||
# bind it to free port for any interface supporting IPv6:
|
||||
s.bind(("", 0));
|
||||
return True
|
||||
except Exception as e: # pragma: no cover
|
||||
if hasattr(e, 'errno'):
|
||||
import errno
|
||||
# negative (-9 'Address family not supported', etc) or not available/supported:
|
||||
if e.errno < 0 or e.errno in (errno.EADDRNOTAVAIL, errno.EAFNOSUPPORT):
|
||||
return False
|
||||
# in use:
|
||||
if e.errno in (errno.EADDRINUSE, errno.EACCES): # normally unreachable (free port and root)
|
||||
return True
|
||||
finally:
|
||||
if s: s.close()
|
||||
# unable to detect:
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def setIPv6IsAllowed(value):
|
||||
DNSUtils._IPv6IsAllowed = value
|
||||
logSys.debug("IPv6 is %s", ('on' if value else 'off') if value is not None else 'auto')
|
||||
return value
|
||||
|
||||
# key to find cached value of IPv6 allowance (this tuple-key cannot be used elsewhere):
|
||||
_IPv6IsAllowed_key = ('self','ipv6-allowed')
|
||||
|
||||
@staticmethod
|
||||
def IPv6IsAllowed():
|
||||
if DNSUtils._IPv6IsAllowed is not None:
|
||||
return DNSUtils._IPv6IsAllowed
|
||||
v = DNSUtils.CACHE_nameToIp.get(DNSUtils._IPv6IsAllowed_key)
|
||||
if v is not None:
|
||||
return v
|
||||
v = DNSUtils._IPv6IsSupportedBySystem()
|
||||
if v is None:
|
||||
# detect by IPs of host:
|
||||
ips = DNSUtils.getNetIntrfIPs()
|
||||
if not ips:
|
||||
DNSUtils._IPv6IsAllowed = True; # avoid self recursion from getSelfIPs -> dnsToIp -> IPv6IsAllowed
|
||||
try:
|
||||
ips = DNSUtils.getSelfIPs()
|
||||
finally:
|
||||
DNSUtils._IPv6IsAllowed = None
|
||||
v = any((':' in ip.ntoa) for ip in ips)
|
||||
DNSUtils.CACHE_nameToIp.set(DNSUtils._IPv6IsAllowed_key, v)
|
||||
return v
|
||||
|
||||
|
||||
##
|
||||
# Class for IP address handling.
|
||||
#
|
||||
# This class contains methods for handling IPv4 and IPv6 addresses.
|
||||
#
|
||||
class IPAddr(object):
|
||||
"""Encapsulate functionality for IPv4 and IPv6 addresses
|
||||
"""
|
||||
|
||||
IP_4_RE = r"""(?:\d{1,3}\.){3}\d{1,3}"""
|
||||
IP_6_RE = r"""(?:[0-9a-fA-F]{1,4}::?|:){1,7}(?:[0-9a-fA-F]{1,4}|(?<=:):)"""
|
||||
IP_4_6_CRE = re.compile(
|
||||
r"""^(?:(?P<IPv4>%s)|\[?(?P<IPv6>%s)\]?)$""" % (IP_4_RE, IP_6_RE))
|
||||
IP_W_CIDR_CRE = re.compile(
|
||||
r"""^(%s|%s)/(?:(\d+)|(%s|%s))$""" % (IP_4_RE, IP_6_RE, IP_4_RE, IP_6_RE))
|
||||
# An IPv4 compatible IPv6 to be reused (see below)
|
||||
IP6_4COMPAT = None
|
||||
|
||||
# object attributes
|
||||
__slots__ = '_family','_addr','_plen','_maskplen','_raw'
|
||||
|
||||
# todo: make configurable the expired time and max count of cache entries:
|
||||
CACHE_OBJ = Utils.Cache(maxCount=10000, maxTime=5*60)
|
||||
|
||||
CIDR_RAW = -2
|
||||
CIDR_UNSPEC = -1
|
||||
FAM_IPv4 = CIDR_RAW - socket.AF_INET
|
||||
FAM_IPv6 = CIDR_RAW - socket.AF_INET6
|
||||
@staticmethod
|
||||
def _AF2FAM(v):
|
||||
return IPAddr.CIDR_RAW - v
|
||||
|
||||
def __new__(cls, ipstr, cidr=CIDR_UNSPEC):
|
||||
if cidr == IPAddr.CIDR_UNSPEC and isinstance(ipstr, (tuple, list)):
|
||||
cidr = IPAddr.CIDR_RAW
|
||||
if cidr == IPAddr.CIDR_RAW: # don't cache raw
|
||||
ip = super(IPAddr, cls).__new__(cls)
|
||||
ip.__init(ipstr, cidr)
|
||||
return ip
|
||||
# check already cached as IPAddr
|
||||
args = (ipstr, cidr)
|
||||
ip = IPAddr.CACHE_OBJ.get(args)
|
||||
if ip is not None:
|
||||
return ip
|
||||
# wrap mask to cidr (correct plen):
|
||||
if cidr == IPAddr.CIDR_UNSPEC:
|
||||
ipstr, cidr = IPAddr.__wrap_ipstr(ipstr)
|
||||
args = (ipstr, cidr)
|
||||
# check cache again:
|
||||
if cidr != IPAddr.CIDR_UNSPEC:
|
||||
ip = IPAddr.CACHE_OBJ.get(args)
|
||||
if ip is not None:
|
||||
return ip
|
||||
ip = super(IPAddr, cls).__new__(cls)
|
||||
ip.__init(ipstr, cidr)
|
||||
if ip._family != IPAddr.CIDR_RAW:
|
||||
IPAddr.CACHE_OBJ.set(args, ip)
|
||||
return ip
|
||||
|
||||
@staticmethod
|
||||
def __wrap_ipstr(ipstr):
|
||||
# because of standard spelling of IPv6 (with port) enclosed in brackets ([ipv6]:port),
|
||||
# remove they now (be sure the <HOST> inside failregex uses this for IPv6 (has \[?...\]?)
|
||||
if len(ipstr) > 2 and ipstr[0] == '[' and ipstr[-1] == ']':
|
||||
ipstr = ipstr[1:-1]
|
||||
# test mask:
|
||||
if "/" not in ipstr:
|
||||
return ipstr, IPAddr.CIDR_UNSPEC
|
||||
s = IPAddr.IP_W_CIDR_CRE.match(ipstr)
|
||||
if s is None:
|
||||
return ipstr, IPAddr.CIDR_UNSPEC
|
||||
s = list(s.groups())
|
||||
if s[2]: # 255.255.255.0 resp. ffff:: style mask
|
||||
s[1] = IPAddr.masktoplen(s[2])
|
||||
del s[2]
|
||||
try:
|
||||
s[1] = int(s[1])
|
||||
except ValueError:
|
||||
return ipstr, IPAddr.CIDR_UNSPEC
|
||||
return s
|
||||
|
||||
def __init(self, ipstr, cidr=CIDR_UNSPEC):
|
||||
""" initialize IP object by converting IP address string
|
||||
to binary to integer
|
||||
"""
|
||||
self._family = socket.AF_UNSPEC
|
||||
self._addr = 0
|
||||
self._plen = 0
|
||||
self._maskplen = None
|
||||
# always save raw value (normally used if really raw or not valid only):
|
||||
self._raw = ipstr
|
||||
# if not raw - recognize family, set addr, etc.:
|
||||
if cidr != IPAddr.CIDR_RAW:
|
||||
if cidr is not None and cidr < IPAddr.CIDR_RAW:
|
||||
family = [IPAddr.CIDR_RAW - cidr]
|
||||
else:
|
||||
family = [socket.AF_INET, socket.AF_INET6]
|
||||
for family in family:
|
||||
try:
|
||||
binary = socket.inet_pton(family, ipstr)
|
||||
self._family = family
|
||||
break
|
||||
except socket.error:
|
||||
continue
|
||||
|
||||
if self._family == socket.AF_INET:
|
||||
# convert host to network byte order
|
||||
self._addr, = struct.unpack("!L", binary)
|
||||
self._plen = 32
|
||||
|
||||
# mask out host portion if prefix length is supplied
|
||||
if cidr is not None and cidr >= 0:
|
||||
mask = ~(0xFFFFFFFF >> cidr)
|
||||
self._addr &= mask
|
||||
self._plen = cidr
|
||||
|
||||
elif self._family == socket.AF_INET6:
|
||||
# convert host to network byte order
|
||||
hi, lo = struct.unpack("!QQ", binary)
|
||||
self._addr = (hi << 64) | lo
|
||||
self._plen = 128
|
||||
|
||||
# mask out host portion if prefix length is supplied
|
||||
if cidr is not None and cidr >= 0:
|
||||
mask = ~(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF >> cidr)
|
||||
self._addr &= mask
|
||||
self._plen = cidr
|
||||
|
||||
# if IPv6 address is a IPv4-compatible, make instance a IPv4
|
||||
elif self.isInNet(IPAddr.IP6_4COMPAT):
|
||||
self._addr = lo & 0xFFFFFFFF
|
||||
self._family = socket.AF_INET
|
||||
self._plen = 32
|
||||
else:
|
||||
self._family = IPAddr.CIDR_RAW
|
||||
|
||||
def __repr__(self):
|
||||
return repr(self.ntoa)
|
||||
|
||||
def __str__(self):
|
||||
return self.ntoa if isinstance(self.ntoa, str) else str(self.ntoa)
|
||||
|
||||
def __reduce__(self):
|
||||
"""IPAddr pickle-handler, that simply wraps IPAddr to the str
|
||||
|
||||
Returns a string as instance to be pickled, because fail2ban-client can't
|
||||
unserialize IPAddr objects
|
||||
"""
|
||||
return (str, (self.ntoa,))
|
||||
|
||||
@property
|
||||
def addr(self):
|
||||
return self._addr
|
||||
|
||||
@property
|
||||
def family(self):
|
||||
return self._family
|
||||
|
||||
FAM2STR = {socket.AF_INET: 'inet4', socket.AF_INET6: 'inet6'}
|
||||
@property
|
||||
def familyStr(self):
|
||||
return IPAddr.FAM2STR.get(self._family)
|
||||
|
||||
@property
|
||||
def instanceType(self):
|
||||
return "ip" if self.isValid else "dns"
|
||||
|
||||
@property
|
||||
def plen(self):
|
||||
return self._plen
|
||||
|
||||
@property
|
||||
def raw(self):
|
||||
"""The raw address
|
||||
|
||||
Should only be set to a non-empty string if prior address
|
||||
conversion wasn't possible
|
||||
"""
|
||||
return self._raw
|
||||
|
||||
@property
|
||||
def isValid(self):
|
||||
"""Either the object corresponds to a valid IP address
|
||||
"""
|
||||
return self._family != socket.AF_UNSPEC
|
||||
|
||||
@property
|
||||
def isSingle(self):
|
||||
"""Returns whether the object is a single IP address (not DNS and subnet)
|
||||
"""
|
||||
return self._plen == {socket.AF_INET: 32, socket.AF_INET6: 128}.get(self._family, -1000)
|
||||
|
||||
def __eq__(self, other):
|
||||
if self._family == IPAddr.CIDR_RAW and not isinstance(other, IPAddr):
|
||||
return self._raw == other
|
||||
if not isinstance(other, IPAddr):
|
||||
if other is None: return False
|
||||
other = IPAddr(other)
|
||||
if self._family != other._family: return False
|
||||
if self._family == socket.AF_UNSPEC:
|
||||
return self._raw == other._raw
|
||||
return (
|
||||
(self._addr == other._addr) and
|
||||
(self._plen == other._plen)
|
||||
)
|
||||
|
||||
def __ne__(self, other):
|
||||
return not (self == other)
|
||||
|
||||
def __lt__(self, other):
|
||||
if self._family == IPAddr.CIDR_RAW and not isinstance(other, IPAddr):
|
||||
return self._raw < other
|
||||
if not isinstance(other, IPAddr):
|
||||
if other is None: return False
|
||||
other = IPAddr(other)
|
||||
return self._family < other._family or self._addr < other._addr
|
||||
|
||||
def __add__(self, other):
|
||||
if not isinstance(other, IPAddr):
|
||||
other = IPAddr(other)
|
||||
return "%s%s" % (self, other)
|
||||
|
||||
def __radd__(self, other):
|
||||
if not isinstance(other, IPAddr):
|
||||
other = IPAddr(other)
|
||||
return "%s%s" % (other, self)
|
||||
|
||||
def __hash__(self):
|
||||
# should be the same as by string (because of possible compare with string):
|
||||
return hash(self.ntoa)
|
||||
#return hash(self._addr)^hash((self._plen<<16)|self._family)
|
||||
|
||||
@property
|
||||
def hexdump(self):
|
||||
"""Hex representation of the IP address (for debug purposes)
|
||||
"""
|
||||
if self._family == socket.AF_INET:
|
||||
return "%08x" % self._addr
|
||||
elif self._family == socket.AF_INET6:
|
||||
return "%032x" % self._addr
|
||||
else:
|
||||
return ""
|
||||
|
||||
# TODO: could be lazily evaluated
|
||||
@property
|
||||
def ntoa(self):
|
||||
""" represent IP object as text like the deprecated
|
||||
C pendant inet.ntoa but address family independent
|
||||
"""
|
||||
add = ''
|
||||
if self.isIPv4:
|
||||
# convert network to host byte order
|
||||
binary = struct.pack("!L", self._addr)
|
||||
if self._plen and self._plen < 32:
|
||||
add = "/%d" % self._plen
|
||||
elif self.isIPv6:
|
||||
# convert network to host byte order
|
||||
hi = self._addr >> 64
|
||||
lo = self._addr & 0xFFFFFFFFFFFFFFFF
|
||||
binary = struct.pack("!QQ", hi, lo)
|
||||
if self._plen and self._plen < 128:
|
||||
add = "/%d" % self._plen
|
||||
else:
|
||||
return self._raw
|
||||
|
||||
return socket.inet_ntop(self._family, binary) + add
|
||||
|
||||
def getPTR(self, suffix=None):
|
||||
""" return the DNS PTR string of the provided IP address object
|
||||
|
||||
If "suffix" is provided it will be appended as the second and top
|
||||
level reverse domain.
|
||||
If omitted it is implicitly set to the second and top level reverse
|
||||
domain of the according IP address family
|
||||
"""
|
||||
if self.isIPv4:
|
||||
exploded_ip = self.ntoa.split(".")
|
||||
if suffix is None:
|
||||
suffix = "in-addr.arpa."
|
||||
elif self.isIPv6:
|
||||
exploded_ip = self.hexdump
|
||||
if suffix is None:
|
||||
suffix = "ip6.arpa."
|
||||
else:
|
||||
return ""
|
||||
|
||||
return "%s.%s" % (".".join(reversed(exploded_ip)), suffix)
|
||||
|
||||
def getHost(self):
|
||||
"""Return the host name (DNS) of the provided IP address object
|
||||
"""
|
||||
return DNSUtils.ipToName(self.ntoa)
|
||||
|
||||
@property
|
||||
def isIPv4(self):
|
||||
"""Either the IP object is of address family AF_INET
|
||||
"""
|
||||
return self.family == socket.AF_INET
|
||||
|
||||
@property
|
||||
def isIPv6(self):
|
||||
"""Either the IP object is of address family AF_INET6
|
||||
"""
|
||||
return self.family == socket.AF_INET6
|
||||
|
||||
def isInNet(self, net):
|
||||
"""Return either the IP object is in the provided network
|
||||
"""
|
||||
# if addr-set:
|
||||
if isinstance(net, IPAddrSet):
|
||||
return self in net
|
||||
# if it isn't a valid IP address, try DNS resolution
|
||||
if not net.isValid and net.raw != "":
|
||||
# Check if IP in DNS
|
||||
return self in DNSUtils.dnsToIp(net.raw)
|
||||
|
||||
if self.family != net.family:
|
||||
return False
|
||||
if self.isIPv4:
|
||||
mask = ~(0xFFFFFFFF >> net.plen)
|
||||
elif self.isIPv6:
|
||||
mask = ~(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF >> net.plen)
|
||||
else:
|
||||
return False
|
||||
|
||||
return (self.addr & mask) == net.addr
|
||||
|
||||
def contains(self, ip):
|
||||
"""Return whether the object (as network) contains given IP
|
||||
"""
|
||||
return isinstance(ip, IPAddr) and (ip == self or ip.isInNet(self))
|
||||
|
||||
def __contains__(self, ip):
|
||||
return self.contains(ip)
|
||||
|
||||
# Pre-calculated map: addr to maskplen
|
||||
def __getMaskMap():
|
||||
m6 = (1 << 128)-1
|
||||
m4 = (1 << 32)-1
|
||||
mmap = {m6: 128, m4: 32, 0: 0}
|
||||
m = 0
|
||||
for i in range(0, 128):
|
||||
m |= 1 << i
|
||||
if i < 32:
|
||||
mmap[m ^ m4] = 32-1-i
|
||||
mmap[m ^ m6] = 128-1-i
|
||||
return mmap
|
||||
|
||||
MAP_ADDR2MASKPLEN = __getMaskMap()
|
||||
|
||||
@property
|
||||
def maskplen(self):
|
||||
mplen = 0
|
||||
if self._maskplen is not None:
|
||||
return self._maskplen
|
||||
mplen = IPAddr.MAP_ADDR2MASKPLEN.get(self._addr)
|
||||
if mplen is None:
|
||||
raise ValueError("invalid mask %r, no plen representation" % (str(self),))
|
||||
self._maskplen = mplen
|
||||
return mplen
|
||||
|
||||
@staticmethod
|
||||
def masktoplen(mask):
|
||||
"""Convert mask string to prefix length
|
||||
|
||||
To be used only for IPv4 masks
|
||||
"""
|
||||
return IPAddr(mask).maskplen
|
||||
|
||||
@staticmethod
|
||||
def searchIP(text):
|
||||
"""Search if text is an IP address, and return it if so, else None
|
||||
"""
|
||||
match = IPAddr.IP_4_6_CRE.match(text)
|
||||
if not match:
|
||||
return None
|
||||
ipstr = match.group('IPv4')
|
||||
if ipstr is not None and ipstr != '':
|
||||
return ipstr
|
||||
return match.group('IPv6')
|
||||
|
||||
|
||||
# An IPv4 compatible IPv6 to be reused
|
||||
IPAddr.IP6_4COMPAT = IPAddr("::ffff:0:0", 96)
|
||||
|
||||
|
||||
class IPAddrSet(set):
|
||||
|
||||
hasSubNet = 0
|
||||
|
||||
def __init__(self, ips=[]):
|
||||
ips, subnet = IPAddrSet._list2set(ips)
|
||||
set.__init__(self, ips)
|
||||
self.hasSubNet = subnet
|
||||
|
||||
@staticmethod
|
||||
def _list2set(ips):
|
||||
ips2 = set()
|
||||
subnet = 0
|
||||
for ip in ips:
|
||||
if not isinstance(ip, IPAddr): ip = IPAddr(ip)
|
||||
ips2.add(ip)
|
||||
subnet += not ip.isSingle
|
||||
return ips2, subnet
|
||||
|
||||
@property
|
||||
def instanceType(self):
|
||||
return "ip-set"
|
||||
|
||||
def set(self, ips):
|
||||
ips, subnet = IPAddrSet._list2set(ips)
|
||||
self.clear()
|
||||
self.update(ips)
|
||||
self.hasSubNet = subnet
|
||||
|
||||
def add(self, ip):
|
||||
if not isinstance(ip, IPAddr): ip = IPAddr(ip)
|
||||
self.hasSubNet |= not ip.isSingle
|
||||
set.add(self, ip)
|
||||
|
||||
def __contains__(self, ip):
|
||||
if not isinstance(ip, IPAddr): ip = IPAddr(ip)
|
||||
# IP can be found directly or IP is in each subnet:
|
||||
return set.__contains__(self, ip) or (self.hasSubNet and any(n.contains(ip) for n in self))
|
||||
|
||||
|
||||
class FileIPAddrSet(IPAddrSet):
|
||||
|
||||
# RE matching file://... (absolute as well as relative file name)
|
||||
RE_FILE_IGN_IP = re.compile(r'^file:(?:/{0,2}(?=/(?!/|.{1,2}/))|/{0,2})(.*)$')
|
||||
|
||||
fileName = ''
|
||||
_reprName = None
|
||||
maxUpdateLatency = 1 # latency in seconds to update by changes
|
||||
_nextCheck = 0
|
||||
_fileStats = ()
|
||||
|
||||
def __init__(self, fileName=''):
|
||||
self.fileName = fileName
|
||||
# self.load() - lazy load on demand by first check (in, __contains__ etc)
|
||||
|
||||
@property
|
||||
def instanceType(self):
|
||||
return repr(self)
|
||||
|
||||
def __eq__(self, other):
|
||||
if id(self) == id(other): return 1
|
||||
# to allow remove file-set from list (delIgnoreIP) by its name:
|
||||
if isinstance(other, FileIPAddrSet):
|
||||
return self.fileName == other.fileName
|
||||
m = FileIPAddrSet.RE_FILE_IGN_IP.match(other)
|
||||
if m:
|
||||
return self.fileName == m.group(1)
|
||||
|
||||
def _isModified(self):
|
||||
"""Check whether the file is modified (file stats changed)
|
||||
|
||||
Side effect: if modified, _fileStats will be updated to last known stats of file
|
||||
"""
|
||||
tm = MyTime.time()
|
||||
# avoid to check it always (not often than maxUpdateLatency):
|
||||
if tm <= self._nextCheck:
|
||||
return None; # no check needed
|
||||
self._nextCheck = tm + self.maxUpdateLatency
|
||||
stats = os.stat(self.fileName)
|
||||
stats = stats.st_mtime, stats.st_ino, stats.st_size
|
||||
if self._fileStats != stats:
|
||||
self._fileStats = stats
|
||||
return True; # modified, needs to be reloaded
|
||||
return False; # unmodified
|
||||
|
||||
def load(self, forceReload=False, noError=True):
|
||||
"""Load set from file (on demand if needed or by forceReload)
|
||||
"""
|
||||
try:
|
||||
# load only if needed and modified (or first time load on demand)
|
||||
if self._isModified() or forceReload:
|
||||
with open(self.fileName, 'r') as f:
|
||||
ips = f.read()
|
||||
ips = splitwords(ips, ignoreComments=True)
|
||||
self.set(ips)
|
||||
except Exception as e: # pragma: no cover
|
||||
self._nextCheck += 60; # increase interval to check (to 1 minute, to avoid log flood on errors)
|
||||
if not noError: raise e
|
||||
logSys.warning("Retrieving IPs set from %r failed: %s", self.fileName, e)
|
||||
|
||||
def __repr__(self):
|
||||
if self._reprName is None:
|
||||
self._reprName = 'file:' + ('/' if self.fileName.startswith('/') else '') + self.fileName
|
||||
return self._reprName
|
||||
|
||||
def __contains__(self, ip):
|
||||
# load if needed:
|
||||
if self.fileName:
|
||||
self.load()
|
||||
# inherited contains:
|
||||
return IPAddrSet.__contains__(self, ip)
|
||||
|
||||
|
||||
def _NetworkInterfacesAddrs(withMask=False):
|
||||
|
||||
# Closure implementing lazy load modules and libc and define _NetworkInterfacesAddrs on demand:
|
||||
# Currently tested on Linux only (TODO: implement for MacOS, Solaris, etc)
|
||||
try:
|
||||
from ctypes import (
|
||||
Structure, Union, POINTER,
|
||||
pointer, get_errno, cast,
|
||||
c_ushort, c_byte, c_void_p, c_char_p, c_uint, c_int, c_uint16, c_uint32
|
||||
)
|
||||
import ctypes.util
|
||||
import ctypes
|
||||
|
||||
class struct_sockaddr(Structure):
|
||||
_fields_ = [
|
||||
('sa_family', c_ushort),
|
||||
('sa_data', c_byte * 14),]
|
||||
|
||||
class struct_sockaddr_in(Structure):
|
||||
_fields_ = [
|
||||
('sin_family', c_ushort),
|
||||
('sin_port', c_uint16),
|
||||
('sin_addr', c_byte * 4)]
|
||||
|
||||
class struct_sockaddr_in6(Structure):
|
||||
_fields_ = [
|
||||
('sin6_family', c_ushort),
|
||||
('sin6_port', c_uint16),
|
||||
('sin6_flowinfo', c_uint32),
|
||||
('sin6_addr', c_byte * 16),
|
||||
('sin6_scope_id', c_uint32)]
|
||||
|
||||
class union_ifa_ifu(Union):
|
||||
_fields_ = [
|
||||
('ifu_broadaddr', POINTER(struct_sockaddr)),
|
||||
('ifu_dstaddr', POINTER(struct_sockaddr)),]
|
||||
|
||||
class struct_ifaddrs(Structure):
|
||||
pass
|
||||
struct_ifaddrs._fields_ = [
|
||||
('ifa_next', POINTER(struct_ifaddrs)),
|
||||
('ifa_name', c_char_p),
|
||||
('ifa_flags', c_uint),
|
||||
('ifa_addr', POINTER(struct_sockaddr)),
|
||||
('ifa_netmask', POINTER(struct_sockaddr)),
|
||||
('ifa_ifu', union_ifa_ifu),
|
||||
('ifa_data', c_void_p),]
|
||||
|
||||
libc = ctypes.CDLL(ctypes.util.find_library('c') or "")
|
||||
if not libc.getifaddrs: # pragma: no cover
|
||||
raise NotImplementedError('libc.getifaddrs is not available')
|
||||
|
||||
def ifap_iter(ifap):
|
||||
ifa = ifap.contents
|
||||
while True:
|
||||
yield ifa
|
||||
if not ifa.ifa_next:
|
||||
break
|
||||
ifa = ifa.ifa_next.contents
|
||||
|
||||
def getfamaddr(ifa, withMask=False):
|
||||
sa = ifa.ifa_addr.contents
|
||||
fam = sa.sa_family
|
||||
if fam == socket.AF_INET:
|
||||
sa = cast(pointer(sa), POINTER(struct_sockaddr_in)).contents
|
||||
addr = socket.inet_ntop(fam, sa.sin_addr)
|
||||
if withMask:
|
||||
nm = ifa.ifa_netmask.contents
|
||||
if nm is not None and nm.sa_family == socket.AF_INET:
|
||||
nm = cast(pointer(nm), POINTER(struct_sockaddr_in)).contents
|
||||
addr += '/'+socket.inet_ntop(fam, nm.sin_addr)
|
||||
return IPAddr(addr)
|
||||
elif fam == socket.AF_INET6:
|
||||
sa = cast(pointer(sa), POINTER(struct_sockaddr_in6)).contents
|
||||
addr = socket.inet_ntop(fam, sa.sin6_addr)
|
||||
if withMask:
|
||||
nm = ifa.ifa_netmask.contents
|
||||
if nm is not None and nm.sa_family == socket.AF_INET6:
|
||||
nm = cast(pointer(nm), POINTER(struct_sockaddr_in6)).contents
|
||||
addr += '/'+socket.inet_ntop(fam, nm.sin6_addr)
|
||||
return IPAddr(addr)
|
||||
return None
|
||||
|
||||
def _NetworkInterfacesAddrs(withMask=False):
|
||||
ifap = POINTER(struct_ifaddrs)()
|
||||
result = libc.getifaddrs(pointer(ifap))
|
||||
if result != 0:
|
||||
raise OSError(get_errno())
|
||||
del result
|
||||
try:
|
||||
for ifa in ifap_iter(ifap):
|
||||
name = ifa.ifa_name.decode("UTF-8")
|
||||
addr = getfamaddr(ifa, withMask)
|
||||
if addr:
|
||||
yield name, addr
|
||||
finally:
|
||||
libc.freeifaddrs(ifap)
|
||||
|
||||
except Exception as e: # pragma: no cover
|
||||
_init_error = NotImplementedError(e)
|
||||
def _NetworkInterfacesAddrs():
|
||||
raise _init_error
|
||||
|
||||
DNSUtils._NetworkInterfacesAddrs = staticmethod(_NetworkInterfacesAddrs);
|
||||
return _NetworkInterfacesAddrs(withMask)
|
||||
|
||||
DNSUtils._NetworkInterfacesAddrs = staticmethod(_NetworkInterfacesAddrs);
|
||||
353
fail2ban-master/fail2ban/server/jail.py
Normal file
353
fail2ban-master/fail2ban/server/jail.py
Normal file
@@ -0,0 +1,353 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Author: Cyril Jaquier
|
||||
|
||||
__author__ = "Cyril Jaquier, Lee Clemens, Yaroslav Halchenko"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier, 2011-2012 Lee Clemens, 2012 Yaroslav Halchenko"
|
||||
__license__ = "GPL"
|
||||
|
||||
import logging
|
||||
import math
|
||||
import random
|
||||
import queue
|
||||
|
||||
from .actions import Actions
|
||||
from ..helpers import getLogger, _as_bool, extractOptions, MyTime
|
||||
from .mytime import MyTime
|
||||
|
||||
# Gets the instance of the logger.
|
||||
logSys = getLogger(__name__)
|
||||
|
||||
|
||||
class Jail(object):
|
||||
"""Fail2Ban jail, which manages a filter and associated actions.
|
||||
|
||||
The class handles the initialisation of a filter, and actions. It's
|
||||
role is then to act as an interface between the filter and actions,
|
||||
passing bans detected by the filter, for the actions to then act upon.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
name : str
|
||||
Name assigned to the jail.
|
||||
backend : str
|
||||
Backend to be used for filter. "auto" will attempt to pick
|
||||
the most preferred backend method. Default: "auto"
|
||||
db : Fail2BanDb
|
||||
Fail2Ban persistent database instance. Default: `None`
|
||||
|
||||
Attributes
|
||||
----------
|
||||
name
|
||||
database
|
||||
filter
|
||||
actions
|
||||
idle
|
||||
status
|
||||
"""
|
||||
|
||||
#Known backends. Each backend should have corresponding __initBackend method
|
||||
# yoh: stored in a list instead of a tuple since only
|
||||
# list had .index until 2.6
|
||||
_BACKENDS = ['pyinotify', 'polling', 'systemd']
|
||||
|
||||
def __init__(self, name, backend = "auto", db=None):
|
||||
self.__db = db
|
||||
# 26 based on iptable chain name limit of 30 less len('f2b-')
|
||||
if len(name) >= 26:
|
||||
logSys.warning("Jail name %r might be too long and some commands "
|
||||
"might not function correctly. Please shorten"
|
||||
% name)
|
||||
self.__name = name
|
||||
self.__queue = queue.Queue()
|
||||
self.__filter = None
|
||||
# Extra parameters for increase ban time
|
||||
self._banExtra = {};
|
||||
logSys.info("Creating new jail '%s'" % self.name)
|
||||
self._realBackend = None
|
||||
if backend is not None:
|
||||
self._realBackend = self._setBackend(backend)
|
||||
self.backend = backend
|
||||
|
||||
def __repr__(self):
|
||||
return "%s(%r)" % (self.__class__.__name__, self.name)
|
||||
|
||||
def _setBackend(self, backend):
|
||||
backend, beArgs = extractOptions(backend)
|
||||
backend = backend.lower() # to assure consistent matching
|
||||
|
||||
backends = self._BACKENDS
|
||||
if backend != 'auto':
|
||||
# we have got strict specification of the backend to use
|
||||
if not (backend in self._BACKENDS):
|
||||
logSys.error("Unknown backend %s. Must be among %s or 'auto'"
|
||||
% (backend, backends))
|
||||
raise ValueError("Unknown backend %s. Must be among %s or 'auto'"
|
||||
% (backend, backends))
|
||||
# so explore starting from it till the 'end'
|
||||
backends = backends[backends.index(backend):]
|
||||
|
||||
for b in backends:
|
||||
initmethod = getattr(self, '_init%s' % b.capitalize())
|
||||
try:
|
||||
initmethod(**beArgs)
|
||||
if backend != 'auto' and b != backend:
|
||||
logSys.warning("Could only initiated %r backend whenever "
|
||||
"%r was requested" % (b, backend))
|
||||
else:
|
||||
logSys.info("Initiated %r backend" % b)
|
||||
self.__actions = Actions(self)
|
||||
return b # we are done
|
||||
except ImportError as e: # pragma: no cover
|
||||
# Log debug if auto, but error if specific
|
||||
logSys.log(
|
||||
logging.DEBUG if backend == "auto" else logging.ERROR,
|
||||
"Backend %r failed to initialize due to %s" % (b, e))
|
||||
# pragma: no cover
|
||||
# log error since runtime error message isn't printed, INVALID COMMAND
|
||||
logSys.error(
|
||||
"Failed to initialize any backend for Jail %r" % self.name)
|
||||
raise RuntimeError(
|
||||
"Failed to initialize any backend for Jail %r" % self.name)
|
||||
|
||||
def _initPolling(self, **kwargs):
|
||||
from .filterpoll import FilterPoll
|
||||
logSys.info("Jail '%s' uses poller %r" % (self.name, kwargs))
|
||||
self.__filter = FilterPoll(self, **kwargs)
|
||||
|
||||
def _initPyinotify(self, **kwargs):
|
||||
# Try to import pyinotify
|
||||
from .filterpyinotify import FilterPyinotify
|
||||
logSys.info("Jail '%s' uses pyinotify %r" % (self.name, kwargs))
|
||||
self.__filter = FilterPyinotify(self, **kwargs)
|
||||
|
||||
def _initSystemd(self, **kwargs): # pragma: systemd no cover
|
||||
# Try to import systemd
|
||||
from .filtersystemd import FilterSystemd
|
||||
logSys.info("Jail '%s' uses systemd %r" % (self.name, kwargs))
|
||||
self.__filter = FilterSystemd(self, **kwargs)
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
"""Name of jail.
|
||||
"""
|
||||
return self.__name
|
||||
|
||||
@property
|
||||
def database(self):
|
||||
"""The database used to store persistent data for the jail.
|
||||
"""
|
||||
return self.__db
|
||||
|
||||
@database.setter
|
||||
def database(self, value):
|
||||
self.__db = value;
|
||||
|
||||
@property
|
||||
def filter(self):
|
||||
"""The filter which the jail is using to monitor log files.
|
||||
"""
|
||||
return self.__filter
|
||||
|
||||
@property
|
||||
def actions(self):
|
||||
"""Actions object used to manage actions for jail.
|
||||
"""
|
||||
return self.__actions
|
||||
|
||||
@property
|
||||
def idle(self):
|
||||
"""A boolean indicating whether jail is idle.
|
||||
"""
|
||||
return self.filter.idle or self.actions.idle
|
||||
|
||||
@idle.setter
|
||||
def idle(self, value):
|
||||
self.filter.idle = value
|
||||
self.actions.idle = value
|
||||
|
||||
def status(self, flavor="basic"):
|
||||
"""The status of the jail.
|
||||
"""
|
||||
fstat = self.filter.status(flavor=flavor)
|
||||
astat = self.actions.status(flavor=flavor)
|
||||
if flavor == "stats":
|
||||
backend = type(self.filter).__name__.replace('Filter', '').lower()
|
||||
return [self._realBackend or self.backend, fstat, astat]
|
||||
return [
|
||||
("Filter", fstat),
|
||||
("Actions", astat),
|
||||
]
|
||||
|
||||
@property
|
||||
def hasFailTickets(self):
|
||||
"""Retrieve whether queue has tickets to ban.
|
||||
"""
|
||||
return not self.__queue.empty()
|
||||
|
||||
def putFailTicket(self, ticket):
|
||||
"""Add a fail ticket to the jail.
|
||||
|
||||
Used by filter to add a failure for banning.
|
||||
"""
|
||||
self.__queue.put(ticket)
|
||||
# add ban to database moved to observer (should previously check not already banned
|
||||
# and increase ticket time if "bantime.increment" set)
|
||||
|
||||
def getFailTicket(self):
|
||||
"""Get a fail ticket from the jail.
|
||||
|
||||
Used by actions to get a failure for banning.
|
||||
"""
|
||||
try:
|
||||
ticket = self.__queue.get(False)
|
||||
return ticket
|
||||
except queue.Empty:
|
||||
return False
|
||||
|
||||
def setBanTimeExtra(self, opt, value):
|
||||
# merge previous extra with new option:
|
||||
be = self._banExtra;
|
||||
if value == '':
|
||||
value = None
|
||||
if value is not None:
|
||||
be[opt] = value;
|
||||
elif opt in be:
|
||||
del be[opt]
|
||||
logSys.info('Set banTime.%s = %s', opt, value)
|
||||
if opt == 'increment':
|
||||
be[opt] = _as_bool(value)
|
||||
if be.get(opt) and self.database is None:
|
||||
logSys.warning("ban time increment is not available as long jail database is not set")
|
||||
if opt in ['maxtime', 'rndtime']:
|
||||
if not value is None:
|
||||
be[opt] = MyTime.str2seconds(value)
|
||||
# prepare formula lambda:
|
||||
if opt in ['formula', 'factor', 'maxtime', 'rndtime', 'multipliers'] or be.get('evformula', None) is None:
|
||||
# split multifiers to an array begins with 0 (or empty if not set):
|
||||
if opt == 'multipliers':
|
||||
be['evmultipliers'] = [int(i) for i in (value.split(' ') if value is not None and value != '' else [])]
|
||||
# if we have multifiers - use it in lambda, otherwise compile and use formula within lambda
|
||||
multipliers = be.get('evmultipliers', [])
|
||||
banFactor = eval(be.get('factor', "1"))
|
||||
if len(multipliers):
|
||||
evformula = lambda ban, banFactor=banFactor: (
|
||||
ban.Time * banFactor * multipliers[ban.Count if ban.Count < len(multipliers) else -1]
|
||||
)
|
||||
else:
|
||||
formula = be.get('formula', 'ban.Time * (1<<(ban.Count if ban.Count<20 else 20)) * banFactor')
|
||||
formula = compile(formula, '~inline-conf-expr~', 'eval')
|
||||
evformula = lambda ban, banFactor=banFactor, formula=formula: max(ban.Time, eval(formula))
|
||||
# extend lambda with max time :
|
||||
if not be.get('maxtime', None) is None:
|
||||
maxtime = be['maxtime']
|
||||
evformula = lambda ban, evformula=evformula: min(evformula(ban), maxtime)
|
||||
# mix lambda with random time (to prevent bot-nets to calculate exact time IP can be unbanned):
|
||||
if not be.get('rndtime', None) is None:
|
||||
rndtime = be['rndtime']
|
||||
evformula = lambda ban, evformula=evformula: (evformula(ban) + random.random() * rndtime)
|
||||
# set to extra dict:
|
||||
be['evformula'] = evformula
|
||||
#logSys.info('banTimeExtra : %s' % json.dumps(be))
|
||||
|
||||
def getBanTimeExtra(self, opt=None):
|
||||
if opt is not None:
|
||||
return self._banExtra.get(opt, None)
|
||||
return self._banExtra
|
||||
|
||||
def getMaxBanTime(self):
|
||||
"""Returns max possible ban-time of jail.
|
||||
"""
|
||||
return self._banExtra.get("maxtime", -1) \
|
||||
if self._banExtra.get('increment') else self.actions.getBanTime()
|
||||
|
||||
def restoreCurrentBans(self, correctBanTime=True):
|
||||
"""Restore any previous valid bans from the database.
|
||||
"""
|
||||
try:
|
||||
if self.database is not None:
|
||||
if self._banExtra.get('increment'):
|
||||
forbantime = None;
|
||||
if correctBanTime:
|
||||
correctBanTime = self.getMaxBanTime()
|
||||
else:
|
||||
# use ban time as search time if we have not enabled a increasing:
|
||||
forbantime = self.actions.getBanTime()
|
||||
for ticket in self.database.getCurrentBans(jail=self, forbantime=forbantime,
|
||||
correctBanTime=correctBanTime, maxmatches=self.filter.failManager.maxMatches
|
||||
):
|
||||
try:
|
||||
# mark ticked was restored from database - does not put it again into db:
|
||||
ticket.restored = True
|
||||
#logSys.debug('restored ticket: %s', ticket)
|
||||
if self.filter._inIgnoreIPList(ticket.getID(), ticket): continue
|
||||
# correct start time / ban time (by the same end of ban):
|
||||
btm = ticket.getBanTime(forbantime)
|
||||
diftm = MyTime.time() - ticket.getTime()
|
||||
if btm != -1 and diftm > 0:
|
||||
btm -= diftm
|
||||
# ignore obsolete tickets:
|
||||
if btm != -1 and btm <= 0:
|
||||
continue
|
||||
self.putFailTicket(ticket)
|
||||
except Exception as e: # pragma: no cover
|
||||
logSys.error('Restore ticket failed: %s', e,
|
||||
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
except Exception as e: # pragma: no cover
|
||||
logSys.error('Restore bans failed: %s', e,
|
||||
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
|
||||
def start(self):
|
||||
"""Start the jail, by starting filter and actions threads.
|
||||
|
||||
Once stated, also queries the persistent database to reinstate
|
||||
any valid bans.
|
||||
"""
|
||||
logSys.debug("Starting jail %r", self.name)
|
||||
self.filter.start()
|
||||
self.actions.start()
|
||||
self.restoreCurrentBans()
|
||||
logSys.info("Jail %r started", self.name)
|
||||
|
||||
def stop(self, stop=True, join=True):
|
||||
"""Stop the jail, by stopping filter and actions threads.
|
||||
"""
|
||||
if stop:
|
||||
logSys.debug("Stopping jail %r", self.name)
|
||||
for obj in (self.filter, self.actions):
|
||||
try:
|
||||
## signal to stop filter / actions:
|
||||
if stop:
|
||||
if obj.isAlive():
|
||||
obj.stop()
|
||||
obj.done(); # and clean-up everything
|
||||
## wait for end of threads:
|
||||
if join:
|
||||
obj.join()
|
||||
except Exception as e:
|
||||
logSys.error("Stop %r of jail %r failed: %s", obj, self.name, e,
|
||||
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
if join:
|
||||
logSys.info("Jail %r stopped", self.name)
|
||||
|
||||
def isAlive(self):
|
||||
"""Check jail "isAlive" by checking filter and actions threads.
|
||||
"""
|
||||
return self.filter.isAlive() or self.actions.isAlive()
|
||||
107
fail2ban-master/fail2ban/server/jails.py
Normal file
107
fail2ban-master/fail2ban/server/jails.py
Normal file
@@ -0,0 +1,107 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
__author__ = "Cyril Jaquier, Yaroslav Halchenko"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier, 2013- Yaroslav Halchenko"
|
||||
__license__ = "GPL"
|
||||
|
||||
from threading import Lock
|
||||
try:
|
||||
from collections.abc import Mapping
|
||||
except ImportError:
|
||||
from collections import Mapping
|
||||
|
||||
from ..exceptions import DuplicateJailException, UnknownJailException
|
||||
from .jail import Jail
|
||||
|
||||
|
||||
class Jails(Mapping):
|
||||
"""Handles the jails.
|
||||
|
||||
This class handles the jails. Creation, deletion or access to a jail
|
||||
must be done through this class. This class is thread-safe which is
|
||||
not the case of the jail itself, including filter and actions. This
|
||||
class is based on Mapping type, and the `add` method must be used to
|
||||
add additional jails.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.__lock = Lock()
|
||||
self._jails = dict()
|
||||
|
||||
def add(self, name, backend, db=None):
|
||||
"""Adds a jail.
|
||||
|
||||
Adds a new jail if not already present which should use the
|
||||
given backend.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
name : str
|
||||
The name of the jail.
|
||||
backend : str
|
||||
The backend to use.
|
||||
db : Fail2BanDb
|
||||
Fail2Ban's persistent database instance.
|
||||
|
||||
Raises
|
||||
------
|
||||
DuplicateJailException
|
||||
If jail name is already present.
|
||||
"""
|
||||
with self.__lock:
|
||||
if name in self._jails:
|
||||
raise DuplicateJailException(name)
|
||||
else:
|
||||
self._jails[name] = Jail(name, backend, db)
|
||||
|
||||
def exists(self, name):
|
||||
return name in self._jails
|
||||
|
||||
def __getitem__(self, name):
|
||||
try:
|
||||
self.__lock.acquire()
|
||||
return self._jails[name]
|
||||
except KeyError:
|
||||
raise UnknownJailException(name)
|
||||
finally:
|
||||
self.__lock.release()
|
||||
|
||||
def __delitem__(self, name):
|
||||
try:
|
||||
self.__lock.acquire()
|
||||
del self._jails[name]
|
||||
except KeyError:
|
||||
raise UnknownJailException(name)
|
||||
finally:
|
||||
self.__lock.release()
|
||||
|
||||
def __len__(self):
|
||||
try:
|
||||
self.__lock.acquire()
|
||||
return len(self._jails)
|
||||
finally:
|
||||
self.__lock.release()
|
||||
|
||||
def __iter__(self):
|
||||
try:
|
||||
self.__lock.acquire()
|
||||
return iter(self._jails)
|
||||
finally:
|
||||
self.__lock.release()
|
||||
143
fail2ban-master/fail2ban/server/jailthread.py
Normal file
143
fail2ban-master/fail2ban/server/jailthread.py
Normal file
@@ -0,0 +1,143 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Author: Cyril Jaquier
|
||||
#
|
||||
|
||||
__author__ = "Cyril Jaquier"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
|
||||
__license__ = "GPL"
|
||||
|
||||
import sys
|
||||
from threading import Thread
|
||||
from abc import abstractmethod
|
||||
|
||||
from .utils import Utils
|
||||
from ..helpers import excepthook, prctl_set_th_name
|
||||
|
||||
|
||||
class JailThread(Thread):
|
||||
"""Abstract class for threading elements in Fail2Ban.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
daemon
|
||||
ident
|
||||
name
|
||||
status
|
||||
active : bool
|
||||
Control the state of the thread.
|
||||
idle : bool
|
||||
Control the idle state of the thread.
|
||||
sleeptime : int
|
||||
The time the thread sleeps for in the loop.
|
||||
"""
|
||||
|
||||
def __init__(self, name=None):
|
||||
super(JailThread, self).__init__(name=name)
|
||||
## Should going with main thread also:
|
||||
self.daemon = True
|
||||
## Control the state of the thread (None - was not started, True - active, False - stopped).
|
||||
self.active = None
|
||||
## Control the idle state of the thread.
|
||||
self.idle = False
|
||||
## The time the thread sleeps in the loop.
|
||||
self.sleeptime = Utils.DEFAULT_SLEEP_TIME
|
||||
|
||||
# excepthook workaround for threads, derived from:
|
||||
# http://bugs.python.org/issue1230540#msg91244
|
||||
run = self.run
|
||||
|
||||
def run_with_except_hook(*args, **kwargs):
|
||||
try:
|
||||
run(*args, **kwargs)
|
||||
# call on stop callback to do some finalizations:
|
||||
self.onStop()
|
||||
except Exception as e:
|
||||
# avoid very sporadic error "'NoneType' object has no attribute 'exc_info'" (https://bugs.python.org/issue7336)
|
||||
# only extremely fast systems are affected ATM (2.7 / 3.x), if thread ends nothing is available here.
|
||||
if sys is not None:
|
||||
excepthook(*sys.exc_info())
|
||||
else:
|
||||
print(e)
|
||||
self.run = run_with_except_hook
|
||||
|
||||
def _bootstrap(self):
|
||||
prctl_set_th_name(self.name)
|
||||
return super(JailThread, self)._bootstrap();
|
||||
|
||||
@abstractmethod
|
||||
def status(self, flavor="basic"): # pragma: no cover - abstract
|
||||
"""Abstract - Should provide status information.
|
||||
"""
|
||||
pass
|
||||
|
||||
def start(self):
|
||||
"""Sets active flag and starts thread.
|
||||
"""
|
||||
self.active = True
|
||||
super(JailThread, self).start()
|
||||
|
||||
@abstractmethod
|
||||
def onStop(self): # pragma: no cover - absract
|
||||
"""Abstract - Called when thread ends (after run).
|
||||
"""
|
||||
pass
|
||||
|
||||
def stop(self):
|
||||
"""Sets `active` property to False, to flag run method to return.
|
||||
"""
|
||||
if self.active: self.active = False
|
||||
# normally onStop will be called automatically in thread after its run ends,
|
||||
# but for backwards compatibilities we'll invoke it in caller of stop method.
|
||||
self.onStop()
|
||||
self.onStop = lambda:()
|
||||
self.done()
|
||||
|
||||
def done(self):
|
||||
self.done = lambda:()
|
||||
# if still runniung - wait a bit before initiate clean-up:
|
||||
if self.is_alive():
|
||||
Utils.wait_for(lambda: not self.is_alive(), 5)
|
||||
# now clean-up everything:
|
||||
self.afterStop()
|
||||
|
||||
|
||||
@abstractmethod
|
||||
def run(self): # pragma: no cover - absract
|
||||
"""Abstract - Called when thread starts, thread stops when returns.
|
||||
"""
|
||||
pass
|
||||
|
||||
def afterStop(self):
|
||||
"""Cleanup resources."""
|
||||
pass
|
||||
|
||||
def join(self):
|
||||
""" Safer join, that could be called also for not started (or ended) threads (used for cleanup).
|
||||
"""
|
||||
## if cleanup needed - create derivative and call it before join...
|
||||
self.done()
|
||||
## if was really started - should call join:
|
||||
if self.active is not None:
|
||||
super(JailThread, self).join()
|
||||
|
||||
## python 3.9, restore isAlive method:
|
||||
if not hasattr(JailThread, 'isAlive'):
|
||||
JailThread.isAlive = JailThread.is_alive
|
||||
235
fail2ban-master/fail2ban/server/mytime.py
Normal file
235
fail2ban-master/fail2ban/server/mytime.py
Normal file
@@ -0,0 +1,235 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
__author__ = "Cyril Jaquier"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
|
||||
__license__ = "GPL"
|
||||
|
||||
import datetime
|
||||
import re
|
||||
import time
|
||||
|
||||
|
||||
##
|
||||
# MyTime class.
|
||||
#
|
||||
|
||||
class MyTime:
|
||||
"""A wrapper around time module primarily for testing purposes
|
||||
|
||||
This class is a wrapper around time.time() and time.gmtime(). When
|
||||
performing unit test, it is very useful to get a fixed value from
|
||||
these functions. Thus, time.time() and time.gmtime() should never
|
||||
be called directly. This wrapper should be called instead. The API
|
||||
are equivalent.
|
||||
"""
|
||||
|
||||
myTime = None
|
||||
alternateNowTime = None
|
||||
alternateNow = None
|
||||
|
||||
@staticmethod
|
||||
def setAlternateNow(t):
|
||||
"""Set current time.
|
||||
|
||||
Use None in order to always get the real current time.
|
||||
|
||||
@param t the time to set or None
|
||||
"""
|
||||
|
||||
MyTime.alternateNowTime = t
|
||||
MyTime.alternateNow = \
|
||||
datetime.datetime.fromtimestamp(t) if t is not None else None
|
||||
|
||||
@staticmethod
|
||||
def setTime(t):
|
||||
"""Set current time.
|
||||
|
||||
Use None in order to always get the real current time.
|
||||
|
||||
@param t the time to set or None
|
||||
"""
|
||||
|
||||
MyTime.myTime = t
|
||||
|
||||
@staticmethod
|
||||
def time():
|
||||
"""Decorate time.time() for the purpose of testing mocking
|
||||
|
||||
@return time.time() if setTime was called with None
|
||||
"""
|
||||
|
||||
if MyTime.myTime is None:
|
||||
return time.time()
|
||||
else:
|
||||
return MyTime.myTime
|
||||
|
||||
@staticmethod
|
||||
def gmtime():
|
||||
"""Decorate time.gmtime() for the purpose of testing mocking
|
||||
|
||||
@return time.gmtime() if setTime was called with None
|
||||
"""
|
||||
if MyTime.myTime is None:
|
||||
return time.gmtime()
|
||||
else:
|
||||
return time.gmtime(MyTime.myTime)
|
||||
|
||||
@staticmethod
|
||||
def now():
|
||||
"""Decorate datetime.now() for the purpose of testing mocking
|
||||
|
||||
@return datetime.now() if setTime was called with None
|
||||
"""
|
||||
if MyTime.myTime is None:
|
||||
return datetime.datetime.now()
|
||||
if MyTime.myTime == MyTime.alternateNowTime:
|
||||
return MyTime.alternateNow
|
||||
return datetime.datetime.fromtimestamp(MyTime.myTime)
|
||||
|
||||
@staticmethod
|
||||
def localtime(x=None):
|
||||
"""Decorate time.localtime() for the purpose of testing mocking
|
||||
|
||||
@return time.localtime() if setTime was called with None
|
||||
"""
|
||||
if MyTime.myTime is None or x is not None:
|
||||
return time.localtime(x)
|
||||
else:
|
||||
return time.localtime(MyTime.myTime)
|
||||
|
||||
@staticmethod
|
||||
def time2str(unixTime, format="%Y-%m-%d %H:%M:%S"):
|
||||
"""Convert time to a string representing as date and time using given format.
|
||||
Default format is ISO 8601, YYYY-MM-DD HH:MM:SS without microseconds.
|
||||
|
||||
@return ISO-capable string representation of given unixTime
|
||||
"""
|
||||
# consider end of 9999th year (in GMT+23 to avoid year overflow in other TZ)
|
||||
dt = datetime.datetime.fromtimestamp(
|
||||
unixTime).replace(microsecond=0
|
||||
) if unixTime < 253402214400 else datetime.datetime(9999, 12, 31, 23, 59, 59)
|
||||
return dt.strftime(format)
|
||||
|
||||
## precreate/precompile primitives used in str2seconds:
|
||||
|
||||
## preparing expression:
|
||||
_str2sec_prep = re.compile(r"(?i)(?<=[a-z])(\d)")
|
||||
## finally expression:
|
||||
_str2sec_fini = re.compile(r"(\d)\s+(\d)")
|
||||
## wrapper for each sub part:
|
||||
_str2sec_subpart = r"(?i)(?<=[\d\s])(%s)\b"
|
||||
## parts to be replaced - pair of (regexp x replacement):
|
||||
_str2sec_parts = (
|
||||
(re.compile(_str2sec_subpart % r"days?|da|dd?"), "*"+str(24*60*60)),
|
||||
(re.compile(_str2sec_subpart % r"weeks?|wee?|ww?"), "*"+str(7*24*60*60)),
|
||||
(re.compile(_str2sec_subpart % r"months?|mon?"), "*"+str((365*3+366)*24*60*60/4/12)),
|
||||
(re.compile(_str2sec_subpart % r"years?|yea?|yy?"), "*"+str((365*3+366)*24*60*60/4)),
|
||||
(re.compile(_str2sec_subpart % r"seconds?|sec?|ss?"), "*"+str(1)),
|
||||
(re.compile(_str2sec_subpart % r"minutes?|min?|mm?"), "*"+str(60)),
|
||||
(re.compile(_str2sec_subpart % r"hours?|hou?|hh?"), "*"+str(60*60)),
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def str2seconds(val):
|
||||
"""Wraps string expression like "1h 2m 3s" into number contains seconds (3723).
|
||||
The string expression will be evaluated as mathematical expression, spaces between each groups
|
||||
will be wrapped to "+" operand (only if any operand does not specified between).
|
||||
Because of case insensitivity and overwriting with minutes ("m" or "mm"), the short replacement for month
|
||||
are "mo" or "mon".
|
||||
Ex: 1hour+30min = 5400
|
||||
0d 1h 30m = 5400
|
||||
1year-6mo = 15778800
|
||||
6 months = 15778800
|
||||
warn: month is not 30 days, it is a year in seconds / 12, the leap years will be respected also:
|
||||
>>>> float(str2seconds("1month")) / 60 / 60 / 24
|
||||
30.4375
|
||||
>>>> float(str2seconds("1year")) / 60 / 60 / 24
|
||||
365.25
|
||||
|
||||
@returns number (calculated seconds from expression "val")
|
||||
"""
|
||||
if isinstance(val, (int, float, complex)):
|
||||
return val
|
||||
# replace together standing abbreviations, example '1d12h' -> '1d 12h':
|
||||
val = MyTime._str2sec_prep.sub(r" \1", val)
|
||||
# replace abbreviation with expression:
|
||||
for rexp, rpl in MyTime._str2sec_parts:
|
||||
val = rexp.sub(rpl, val)
|
||||
val = MyTime._str2sec_fini.sub(r"\1+\2", val)
|
||||
return eval(val)
|
||||
|
||||
class seconds2str():
|
||||
"""Converts seconds to string on demand (if string representation needed).
|
||||
Ex: seconds2str(86400*390) = 1y 3w 4d
|
||||
seconds2str(86400*368) = 1y 3d
|
||||
seconds2str(86400*365.5) = 1y
|
||||
seconds2str(86400*2+3600*7+60*15) = 2d 7h 15m
|
||||
seconds2str(86400*2+3599) = 2d 1h
|
||||
seconds2str(3600-5) = 1h
|
||||
seconds2str(3600-10) = 59m 50s
|
||||
seconds2str(59) = 59s
|
||||
"""
|
||||
def __init__(self, sec):
|
||||
self.sec = sec
|
||||
def __str__(self):
|
||||
# s = str(datetime.timedelta(seconds=int(self.sec)))
|
||||
# return s if s[-3:] != ":00" else s[:-3]
|
||||
s = self.sec; c = 3
|
||||
# automatic accuracy: round by large values (and maximally 3 groups)
|
||||
if s >= 31536000: # a year as 365*24*60*60 (don't need to consider leap year by this accuracy)
|
||||
s = int(round(float(s)/86400)) # round by a day
|
||||
r = str(s//365) + 'y '; s %= 365
|
||||
if s >= 7:
|
||||
r += str(s//7) + 'w '; s %= 7
|
||||
if s:
|
||||
r += str(s) + 'd '
|
||||
return r[:-1]
|
||||
if s >= 604800: # a week as 24*60*60*7
|
||||
s = int(round(float(s)/3600)) # round by a hour
|
||||
r = str(s//168) + 'w '; s %= 168
|
||||
if s >= 24:
|
||||
r += str(s//24) + 'd '; s %= 24
|
||||
if s:
|
||||
r += str(s) + 'h '
|
||||
return r[:-1]
|
||||
if s >= 86400: # a day as 24*60*60
|
||||
s = int(round(float(s)/60)) # round by a minute
|
||||
r = str(s//1440) + 'd '; s %= 1440
|
||||
if s >= 60:
|
||||
r += str(s//60) + 'h '; s %= 60
|
||||
if s:
|
||||
r += str(s) + 'm '
|
||||
return r[:-1]
|
||||
if s >= 3595: # a hour as 60*60 (- 5 seconds)
|
||||
s = int(round(float(s)/10)) # round by 10 seconds
|
||||
r = str(s//360) + 'h '; s %= 360
|
||||
if s >= 6: # a minute
|
||||
r += str(s//6) + 'm '; s %= 6
|
||||
return r[:-1]
|
||||
r = ''
|
||||
if s >= 60: # a minute
|
||||
r += str(s//60) + 'm '; s %= 60
|
||||
if s: # remaining seconds
|
||||
r += str(s) + 's '
|
||||
elif not self.sec: # 0s
|
||||
r = '0 '
|
||||
return r[:-1]
|
||||
def __repr__(self):
|
||||
return self.__str__()
|
||||
529
fail2ban-master/fail2ban/server/observer.py
Normal file
529
fail2ban-master/fail2ban/server/observer.py
Normal file
@@ -0,0 +1,529 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Author: Serg G. Brester (sebres)
|
||||
#
|
||||
# This module was written as part of ban time increment feature.
|
||||
|
||||
__author__ = "Serg G. Brester (sebres)"
|
||||
__copyright__ = "Copyright (c) 2014 Serg G. Brester"
|
||||
__license__ = "GPL"
|
||||
|
||||
import threading
|
||||
from .jailthread import JailThread
|
||||
from .failmanager import FailManagerEmpty
|
||||
import os, logging, time, datetime, math, json, random
|
||||
import sys
|
||||
from ..helpers import getLogger
|
||||
from .mytime import MyTime
|
||||
from .utils import Utils
|
||||
|
||||
# Gets the instance of the logger.
|
||||
logSys = getLogger(__name__)
|
||||
|
||||
class ObserverThread(JailThread):
|
||||
"""Handles observing a database, managing bad ips and ban increment.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
Attributes
|
||||
----------
|
||||
daemon
|
||||
ident
|
||||
name
|
||||
status
|
||||
active : bool
|
||||
Control the state of the thread.
|
||||
idle : bool
|
||||
Control the idle state of the thread.
|
||||
sleeptime : int
|
||||
The time the thread sleeps for in the loop.
|
||||
"""
|
||||
|
||||
# observer is event driven and it sleep organized incremental, so sleep intervals can be shortly:
|
||||
DEFAULT_SLEEP_INTERVAL = Utils.DEFAULT_SLEEP_INTERVAL / 10
|
||||
|
||||
def __init__(self):
|
||||
# init thread
|
||||
super(ObserverThread, self).__init__(name='f2b/observer')
|
||||
# before started - idle:
|
||||
self.idle = True
|
||||
## Event queue
|
||||
self._queue_lock = threading.RLock()
|
||||
self._queue = []
|
||||
## Event, be notified if anything added to event queue
|
||||
self._notify = threading.Event()
|
||||
## Sleep for max 60 seconds, it possible to specify infinite to always sleep up to notifying via event,
|
||||
## but so we can later do some service "events" occurred infrequently directly in main loop of observer (not using queue)
|
||||
self.sleeptime = 60
|
||||
#
|
||||
self._timers = {}
|
||||
self._paused = False
|
||||
self.__db = None
|
||||
self.__db_purge_interval = 60*60
|
||||
# observer is a not main thread:
|
||||
self.daemon = True
|
||||
|
||||
def __getitem__(self, i):
|
||||
try:
|
||||
return self._queue[i]
|
||||
except KeyError:
|
||||
raise KeyError("Invalid event index : %s" % i)
|
||||
|
||||
def __delitem__(self, i):
|
||||
try:
|
||||
del self._queue[i]
|
||||
except KeyError:
|
||||
raise KeyError("Invalid event index: %s" % i)
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self._queue)
|
||||
|
||||
def __len__(self):
|
||||
return len(self._queue)
|
||||
|
||||
def __eq__(self, other): # Required for Threading
|
||||
return False
|
||||
|
||||
def __hash__(self): # Required for Threading
|
||||
return id(self)
|
||||
|
||||
def add_named_timer(self, name, starttime, *event):
|
||||
"""Add a named timer event to queue will start (and wake) in 'starttime' seconds
|
||||
|
||||
Previous timer event with same name will be canceled and trigger self into
|
||||
queue after new 'starttime' value
|
||||
"""
|
||||
t = self._timers.get(name, None)
|
||||
if t is not None:
|
||||
t.cancel()
|
||||
t = threading.Timer(starttime, self.add, event)
|
||||
self._timers[name] = t
|
||||
t.start()
|
||||
|
||||
def add_timer(self, starttime, *event):
|
||||
"""Add a timer event to queue will start (and wake) in 'starttime' seconds
|
||||
"""
|
||||
# in testing we should wait (looping) for the possible time drifts:
|
||||
if MyTime.myTime is not None and starttime:
|
||||
# test time after short sleep:
|
||||
t = threading.Timer(Utils.DEFAULT_SLEEP_INTERVAL, self._delayedEvent,
|
||||
(MyTime.time() + starttime, time.time() + starttime, event)
|
||||
)
|
||||
t.start()
|
||||
return
|
||||
# add timer event:
|
||||
t = threading.Timer(starttime, self.add, event)
|
||||
t.start()
|
||||
|
||||
def _delayedEvent(self, endMyTime, endTime, event):
|
||||
if MyTime.time() >= endMyTime or time.time() >= endTime:
|
||||
self.add_timer(0, *event)
|
||||
return
|
||||
# repeat after short sleep:
|
||||
t = threading.Timer(Utils.DEFAULT_SLEEP_INTERVAL, self._delayedEvent,
|
||||
(endMyTime, endTime, event)
|
||||
)
|
||||
t.start()
|
||||
|
||||
def pulse_notify(self):
|
||||
"""Notify wakeup (sets /and resets/ notify event)
|
||||
"""
|
||||
if not self._paused:
|
||||
n = self._notify
|
||||
if n:
|
||||
n.set()
|
||||
#n.clear()
|
||||
|
||||
def add(self, *event):
|
||||
"""Add a event to queue and notify thread to wake up.
|
||||
"""
|
||||
## lock and add new event to queue:
|
||||
with self._queue_lock:
|
||||
self._queue.append(event)
|
||||
self.pulse_notify()
|
||||
|
||||
def add_wn(self, *event):
|
||||
"""Add a event to queue without notifying thread to wake up.
|
||||
"""
|
||||
## lock and add new event to queue:
|
||||
with self._queue_lock:
|
||||
self._queue.append(event)
|
||||
|
||||
def call_lambda(self, l, *args):
|
||||
l(*args)
|
||||
|
||||
def run(self):
|
||||
"""Main loop for Threading.
|
||||
|
||||
This function is the main loop of the thread.
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True when the thread exits nicely.
|
||||
"""
|
||||
logSys.info("Observer start...")
|
||||
## first time create named timer to purge database each hour (clean old entries) ...
|
||||
self.add_named_timer('DB_PURGE', self.__db_purge_interval, 'db_purge')
|
||||
## Mapping of all possible event types of observer:
|
||||
__meth = {
|
||||
# universal lambda:
|
||||
'call': self.call_lambda,
|
||||
# system and service events:
|
||||
'db_set': self.db_set,
|
||||
'db_purge': self.db_purge,
|
||||
# service events of observer self:
|
||||
'is_alive' : self.isAlive,
|
||||
'is_active': self.isActive,
|
||||
'start': self.start,
|
||||
'stop': self.stop,
|
||||
'nop': lambda:(),
|
||||
'shutdown': lambda:()
|
||||
}
|
||||
try:
|
||||
## check it self with sending is_alive event
|
||||
self.add('is_alive')
|
||||
## if we should stop - break a main loop
|
||||
while self.active:
|
||||
self.idle = False
|
||||
## check events available and execute all events from queue
|
||||
while not self._paused:
|
||||
## lock, check and pop one from begin of queue:
|
||||
try:
|
||||
ev = None
|
||||
with self._queue_lock:
|
||||
if len(self._queue):
|
||||
ev = self._queue.pop(0)
|
||||
if ev is None:
|
||||
break
|
||||
## retrieve method by name
|
||||
meth = ev[0]
|
||||
if not callable(ev[0]): meth = __meth.get(meth) or getattr(self, meth)
|
||||
## execute it with rest of event as variable arguments
|
||||
meth(*ev[1:])
|
||||
except Exception as e:
|
||||
#logSys.error('%s', e, exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
logSys.error('%s', e, exc_info=True)
|
||||
## going sleep, wait for events (in queue)
|
||||
n = self._notify
|
||||
if n:
|
||||
self.idle = True
|
||||
n.wait(self.sleeptime)
|
||||
## wake up - reset signal now (we don't need it so long as we reed from queue)
|
||||
n.clear()
|
||||
if self._paused:
|
||||
continue
|
||||
else:
|
||||
## notify event deleted (shutdown) - just sleep a little bit (waiting for shutdown events, prevent high cpu usage)
|
||||
time.sleep(ObserverThread.DEFAULT_SLEEP_INTERVAL)
|
||||
## stop by shutdown and empty queue :
|
||||
if not self.is_full:
|
||||
break
|
||||
## end of main loop - exit
|
||||
logSys.info("Observer stopped, %s events remaining.", len(self._queue))
|
||||
self._notify = None
|
||||
#print("Observer stopped, %s events remaining." % len(self._queue))
|
||||
except Exception as e:
|
||||
logSys.error('Observer stopped after error: %s', e, exc_info=True)
|
||||
#print("Observer stopped with error: %s" % str(e))
|
||||
# clear all events - exit, for possible calls of wait_empty:
|
||||
with self._queue_lock:
|
||||
self._queue = []
|
||||
self.idle = True
|
||||
return True
|
||||
|
||||
def isAlive(self):
|
||||
#logSys.debug("Observer alive...")
|
||||
return True
|
||||
|
||||
def isActive(self, fromStr=None):
|
||||
# logSys.info("Observer alive, %s%s",
|
||||
# 'active' if self.active else 'inactive',
|
||||
# '' if fromStr is None else (", called from '%s'" % fromStr))
|
||||
return self.active
|
||||
|
||||
def start(self):
|
||||
with self._queue_lock:
|
||||
if not self.active:
|
||||
super(ObserverThread, self).start()
|
||||
|
||||
def stop(self, wtime=5, forceQuit=True):
|
||||
if self.active and self._notify:
|
||||
logSys.info("Observer stop ... try to end queue %s seconds", wtime)
|
||||
#print("Observer stop ....")
|
||||
# just add shutdown job to make possible wait later until full (events remaining)
|
||||
with self._queue_lock:
|
||||
self.add_wn('shutdown')
|
||||
#don't pulse - just set, because we will delete it hereafter (sometimes not wakeup)
|
||||
n = self._notify
|
||||
self._notify.set()
|
||||
#self.pulse_notify()
|
||||
self._notify = None
|
||||
# wait max wtime seconds until full (events remaining)
|
||||
if self.wait_empty(wtime) or forceQuit:
|
||||
n.clear()
|
||||
self.active = False; # leave outer (active) loop
|
||||
self._paused = True; # leave inner (queue) loop
|
||||
self.__db = None
|
||||
else:
|
||||
self._notify = n
|
||||
return self.wait_idle(min(wtime, 0.5)) and not self.is_full
|
||||
return True
|
||||
|
||||
@property
|
||||
def is_full(self):
|
||||
with self._queue_lock:
|
||||
return True if len(self._queue) else False
|
||||
|
||||
def wait_empty(self, sleeptime=None):
|
||||
"""Wait observer is running and returns if observer has no more events (queue is empty)
|
||||
"""
|
||||
time.sleep(ObserverThread.DEFAULT_SLEEP_INTERVAL)
|
||||
if sleeptime is not None:
|
||||
e = MyTime.time() + sleeptime
|
||||
# block queue with not operation to be sure all really jobs are executed if nop goes from queue :
|
||||
if self._notify is not None:
|
||||
self.add_wn('nop')
|
||||
if self.is_full and self.idle:
|
||||
self.pulse_notify()
|
||||
while self.is_full:
|
||||
if sleeptime is not None and MyTime.time() > e:
|
||||
break
|
||||
time.sleep(ObserverThread.DEFAULT_SLEEP_INTERVAL)
|
||||
# wait idle to be sure the last queue element is processed (because pop event before processing it) :
|
||||
self.wait_idle(0.001)
|
||||
return not self.is_full
|
||||
|
||||
|
||||
def wait_idle(self, sleeptime=None):
|
||||
"""Wait observer is running and returns if observer idle (observer sleeps)
|
||||
"""
|
||||
time.sleep(ObserverThread.DEFAULT_SLEEP_INTERVAL)
|
||||
if self.idle:
|
||||
return True
|
||||
if sleeptime is not None:
|
||||
e = MyTime.time() + sleeptime
|
||||
while not self.idle:
|
||||
if sleeptime is not None and MyTime.time() > e:
|
||||
break
|
||||
time.sleep(ObserverThread.DEFAULT_SLEEP_INTERVAL)
|
||||
return self.idle
|
||||
|
||||
@property
|
||||
def paused(self):
|
||||
return self._paused;
|
||||
|
||||
@paused.setter
|
||||
def paused(self, pause):
|
||||
if self._paused == pause:
|
||||
return
|
||||
self._paused = pause
|
||||
# wake after pause ended
|
||||
self.pulse_notify()
|
||||
|
||||
|
||||
@property
|
||||
def status(self):
|
||||
"""Status of observer to be implemented. [TODO]
|
||||
"""
|
||||
return ('', '')
|
||||
|
||||
## -----------------------------------------
|
||||
## [Async] database service functionality ...
|
||||
## -----------------------------------------
|
||||
|
||||
def db_set(self, db):
|
||||
self.__db = db
|
||||
|
||||
def db_purge(self):
|
||||
logSys.debug("Purge database event occurred")
|
||||
if self.__db is not None:
|
||||
self.__db.purge()
|
||||
# trigger timer again ...
|
||||
self.add_named_timer('DB_PURGE', self.__db_purge_interval, 'db_purge')
|
||||
|
||||
## -----------------------------------------
|
||||
## [Async] ban time increment functionality ...
|
||||
## -----------------------------------------
|
||||
|
||||
def failureFound(self, jail, ticket):
|
||||
""" Notify observer a failure for ip was found
|
||||
|
||||
Observer will check ip was known (bad) and possibly increase an retry count
|
||||
"""
|
||||
# check jail active :
|
||||
if not jail.isAlive() or not jail.getBanTimeExtra("increment"):
|
||||
return
|
||||
ip = ticket.getID()
|
||||
unixTime = ticket.getTime()
|
||||
logSys.debug("[%s] Observer: failure found %s", jail.name, ip)
|
||||
# increase retry count for known (bad) ip, corresponding banCount of it (one try will count than 2, 3, 5, 9 ...) :
|
||||
banCount = 0
|
||||
retryCount = 1
|
||||
timeOfBan = None
|
||||
try:
|
||||
maxRetry = jail.filter.failManager.getMaxRetry()
|
||||
db = jail.database
|
||||
if db is not None:
|
||||
for banCount, timeOfBan, lastBanTime in db.getBan(ip, jail):
|
||||
banCount = max(banCount, ticket.getBanCount())
|
||||
retryCount = ((1 << (banCount if banCount < 20 else 20))/2 + 1)
|
||||
# if lastBanTime == -1 or timeOfBan + lastBanTime * 2 > MyTime.time():
|
||||
# retryCount = maxRetry
|
||||
break
|
||||
retryCount = min(retryCount, maxRetry)
|
||||
# check this ticket already known (line was already processed and in the database and will be restored from there):
|
||||
if timeOfBan is not None and unixTime <= timeOfBan:
|
||||
logSys.debug("[%s] Ignore failure %s before last ban %s < %s, restored",
|
||||
jail.name, ip, unixTime, timeOfBan)
|
||||
return
|
||||
# for not increased failures observer should not add it to fail manager, because was already added by filter self
|
||||
if retryCount <= 1:
|
||||
return
|
||||
# retry counter was increased - add it again:
|
||||
logSys.info("[%s] Found %s, bad - %s, %s # -> %s%s", jail.name, ip,
|
||||
MyTime.time2str(unixTime), banCount, retryCount,
|
||||
(', Ban' if retryCount >= maxRetry else ''))
|
||||
# retryCount-1, because a ticket was already once incremented by filter self
|
||||
retryCount = jail.filter.failManager.addFailure(ticket, retryCount - 1, True)
|
||||
ticket.setBanCount(banCount)
|
||||
# after observe we have increased attempt count, compare it >= maxretry ...
|
||||
if retryCount >= maxRetry:
|
||||
# perform the banning of the IP now (again)
|
||||
jail.filter.performBan(ip)
|
||||
|
||||
except Exception as e:
|
||||
logSys.error('%s', e, exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
|
||||
|
||||
class BanTimeIncr:
|
||||
def __init__(self, banTime, banCount):
|
||||
self.Time = banTime
|
||||
self.Count = banCount
|
||||
|
||||
def calcBanTime(self, jail, banTime, banCount):
|
||||
be = jail.getBanTimeExtra()
|
||||
return be['evformula'](self.BanTimeIncr(banTime, banCount))
|
||||
|
||||
def incrBanTime(self, jail, banTime, ticket):
|
||||
"""Check for IP address to increment ban time (if was already banned).
|
||||
|
||||
Returns
|
||||
-------
|
||||
float
|
||||
new ban time.
|
||||
"""
|
||||
# check jail active :
|
||||
if not jail.isAlive() or not jail.database:
|
||||
return banTime
|
||||
be = jail.getBanTimeExtra()
|
||||
ip = ticket.getID()
|
||||
orgBanTime = banTime
|
||||
# check ip was already banned (increment time of ban):
|
||||
try:
|
||||
if banTime > 0 and be.get('increment', False):
|
||||
# search IP in database and increase time if found:
|
||||
for banCount, timeOfBan, lastBanTime in \
|
||||
jail.database.getBan(ip, jail, overalljails=be.get('overalljails', False)) \
|
||||
:
|
||||
# increment count in ticket (if still not increased from banmanager, test-cases?):
|
||||
if banCount >= ticket.getBanCount():
|
||||
ticket.setBanCount(banCount+1)
|
||||
logSys.debug('IP %s was already banned: %s #, %s', ip, banCount, timeOfBan);
|
||||
# calculate new ban time
|
||||
if banCount > 0:
|
||||
banTime = be['evformula'](self.BanTimeIncr(banTime, banCount))
|
||||
ticket.setBanTime(banTime)
|
||||
# check current ticket time to prevent increasing for twice read tickets (restored from log file besides database after restart)
|
||||
if ticket.getTime() > timeOfBan:
|
||||
logSys.info('[%s] IP %s is bad: %s # last %s - incr %s to %s' % (jail.name, ip, banCount,
|
||||
MyTime.time2str(timeOfBan),
|
||||
MyTime.seconds2str(orgBanTime), MyTime.seconds2str(banTime)))
|
||||
else:
|
||||
ticket.restored = True
|
||||
break
|
||||
except Exception as e:
|
||||
logSys.error('%s', e, exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
return banTime
|
||||
|
||||
def banFound(self, ticket, jail, btime):
|
||||
""" Notify observer a ban occurred for ip
|
||||
|
||||
Observer will check ip was known (bad) and possibly increase/prolong a ban time
|
||||
Secondary we will actualize the bans and bips (bad ip) in database
|
||||
"""
|
||||
if ticket.restored: # pragma: no cover (normally not resored tickets only)
|
||||
return
|
||||
try:
|
||||
oldbtime = btime
|
||||
ip = ticket.getID()
|
||||
logSys.debug("[%s] Observer: ban found %s, %s", jail.name, ip, btime)
|
||||
# if not permanent and ban time was not set - check time should be increased:
|
||||
if btime != -1 and ticket.getBanTime() is None:
|
||||
btime = self.incrBanTime(jail, btime, ticket)
|
||||
# if we should prolong ban time:
|
||||
if btime == -1 or btime > oldbtime:
|
||||
ticket.setBanTime(btime)
|
||||
# if not permanent
|
||||
if btime != -1:
|
||||
bendtime = ticket.getTime() + btime
|
||||
logtime = (MyTime.seconds2str(btime), MyTime.time2str(bendtime))
|
||||
# check ban is not too old :
|
||||
if bendtime < MyTime.time():
|
||||
logSys.debug('Ignore old bantime %s', logtime[1])
|
||||
return False
|
||||
else:
|
||||
logtime = ('permanent', 'infinite')
|
||||
# if ban time was prolonged - log again with new ban time:
|
||||
if btime != oldbtime:
|
||||
logSys.notice("[%s] Increase Ban %s (%d # %s -> %s)", jail.name,
|
||||
ip, ticket.getBanCount(), *logtime)
|
||||
# delayed prolonging ticket via actions that expected this (not later than 10 sec):
|
||||
logSys.log(5, "[%s] Observer: prolong %s in %s", jail.name, ip, (btime, oldbtime))
|
||||
self.add_timer(min(10, max(0, btime - oldbtime - 5)), self.prolongBan, ticket, jail)
|
||||
# add ticket to database, but only if was not restored (not already read from database):
|
||||
if jail.database is not None and not ticket.restored:
|
||||
# add to database always only after ban time was calculated an not yet already banned:
|
||||
jail.database.addBan(jail, ticket)
|
||||
except Exception as e:
|
||||
logSys.error('%s', e, exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
|
||||
def prolongBan(self, ticket, jail):
|
||||
""" Notify observer a ban occurred for ip
|
||||
|
||||
Observer will check ip was known (bad) and possibly increase/prolong a ban time
|
||||
Secondary we will actualize the bans and bips (bad ip) in database
|
||||
"""
|
||||
try:
|
||||
btime = ticket.getBanTime()
|
||||
ip = ticket.getID()
|
||||
logSys.debug("[%s] Observer: prolong %s, %s", jail.name, ip, btime)
|
||||
# prolong ticket via actions that expected this:
|
||||
jail.actions._prolongBan(ticket)
|
||||
except Exception as e:
|
||||
logSys.error('%s', e, exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
|
||||
# Global observer initial created in server (could be later rewritten via singleton)
|
||||
class _Observers:
|
||||
def __init__(self):
|
||||
self.Main = None
|
||||
|
||||
Observers = _Observers()
|
||||
957
fail2ban-master/fail2ban/server/server.py
Normal file
957
fail2ban-master/fail2ban/server/server.py
Normal file
@@ -0,0 +1,957 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Author: Cyril Jaquier
|
||||
#
|
||||
|
||||
__author__ = "Cyril Jaquier"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
|
||||
__license__ = "GPL"
|
||||
|
||||
import threading
|
||||
from threading import Lock, RLock
|
||||
import logging
|
||||
import os
|
||||
import signal
|
||||
import stat
|
||||
import sys
|
||||
|
||||
from .observer import Observers, ObserverThread
|
||||
from .jails import Jails
|
||||
from .filter import DNSUtils, FileFilter, JournalFilter
|
||||
from .transmitter import Transmitter
|
||||
from .asyncserver import AsyncServer, AsyncServerException
|
||||
from .. import version
|
||||
from ..helpers import getLogger, _as_bool, extractOptions, str2LogLevel, \
|
||||
getVerbosityFormat, excepthook, prctl_set_th_name
|
||||
|
||||
# Gets the instance of the logger.
|
||||
logSys = getLogger(__name__)
|
||||
|
||||
DEF_SYSLOGSOCKET = "auto"
|
||||
DEF_LOGLEVEL = "INFO"
|
||||
DEF_LOGTARGET = "STDOUT"
|
||||
|
||||
try:
|
||||
from .database import Fail2BanDb
|
||||
except ImportError: # pragma: no cover
|
||||
# Dont print error here, as database may not even be used
|
||||
Fail2BanDb = None
|
||||
|
||||
|
||||
def _thread_name():
|
||||
return threading.current_thread().__class__.__name__
|
||||
|
||||
def _make_file_path(name):
|
||||
"""Creates path of file (last level only) on demand"""
|
||||
name = os.path.dirname(name)
|
||||
# only if it is absolute (e. g. important for socket, so if unix path):
|
||||
if os.path.isabs(name):
|
||||
# be sure path exists (create last level of directory on demand):
|
||||
try:
|
||||
os.mkdir(name)
|
||||
except (OSError, FileExistsError) as e:
|
||||
if e.errno != 17: # pragma: no cover - not EEXIST is not covered
|
||||
raise
|
||||
|
||||
|
||||
class Server:
|
||||
|
||||
def __init__(self, daemon=False):
|
||||
self.__loggingLock = Lock()
|
||||
self.__lock = RLock()
|
||||
self.__jails = Jails()
|
||||
self.__db = None
|
||||
self.__daemon = daemon
|
||||
self.__transm = Transmitter(self)
|
||||
self.__reload_state = {}
|
||||
#self.__asyncServer = AsyncServer(self.__transm)
|
||||
self.__asyncServer = None
|
||||
self.__logLevel = None
|
||||
self.__logTarget = None
|
||||
self.__verbose = None
|
||||
self.__syslogSocket = None
|
||||
self.__autoSyslogSocketPaths = {
|
||||
'Darwin': '/var/run/syslog',
|
||||
'FreeBSD': '/var/run/log',
|
||||
'Linux': '/dev/log',
|
||||
}
|
||||
self.__prev_signals = {}
|
||||
|
||||
def __sigTERMhandler(self, signum, frame): # pragma: no cover - indirect tested
|
||||
logSys.debug("Caught signal %d. Exiting", signum)
|
||||
self.quit()
|
||||
|
||||
def __sigUSR1handler(self, signum, fname): # pragma: no cover - indirect tested
|
||||
logSys.debug("Caught signal %d. Flushing logs", signum)
|
||||
self.flushLogs()
|
||||
|
||||
def _rebindSignal(self, s, new):
|
||||
"""Bind new signal handler while storing old one in _prev_signals"""
|
||||
self.__prev_signals[s] = signal.getsignal(s)
|
||||
signal.signal(s, new)
|
||||
|
||||
def start(self, sock, pidfile, force=False, observer=True, conf={}):
|
||||
# First set the mask to only allow access to owner
|
||||
os.umask(0o077)
|
||||
# Second daemonize before logging etc, because it will close all handles:
|
||||
if self.__daemon: # pragma: no cover
|
||||
logSys.info("Starting in daemon mode")
|
||||
ret = self.__createDaemon()
|
||||
# If forked parent - return here (parent process will configure server later):
|
||||
if ret is None:
|
||||
return False
|
||||
# If error:
|
||||
if not ret[0]:
|
||||
err = "Could not create daemon %s", ret[1:]
|
||||
logSys.error(err)
|
||||
raise ServerInitializationError(err)
|
||||
# We are daemon.
|
||||
|
||||
# replace main thread (and process) name to identify server (for top/ps/pstree or diagnostic):
|
||||
prctl_set_th_name(conf.get("pname", "fail2ban-server"))
|
||||
|
||||
# Set all logging parameters (or use default if not specified):
|
||||
self.__verbose = conf.get("verbose", None)
|
||||
self.setSyslogSocket(conf.get("syslogsocket",
|
||||
self.__syslogSocket if self.__syslogSocket is not None else DEF_SYSLOGSOCKET))
|
||||
self.setLogLevel(conf.get("loglevel",
|
||||
self.__logLevel if self.__logLevel is not None else DEF_LOGLEVEL))
|
||||
self.setLogTarget(conf.get("logtarget",
|
||||
self.__logTarget if self.__logTarget is not None else DEF_LOGTARGET))
|
||||
|
||||
logSys.info("-"*50)
|
||||
logSys.info("Starting Fail2ban v%s", version.version)
|
||||
|
||||
if self.__daemon: # pragma: no cover
|
||||
logSys.info("Daemon started")
|
||||
|
||||
# Install signal handlers
|
||||
if _thread_name() == '_MainThread':
|
||||
for s in (signal.SIGTERM, signal.SIGINT):
|
||||
self._rebindSignal(s, self.__sigTERMhandler)
|
||||
self._rebindSignal(signal.SIGUSR1, self.__sigUSR1handler)
|
||||
|
||||
# Ensure unhandled exceptions are logged
|
||||
sys.excepthook = excepthook
|
||||
|
||||
# Creates a PID file.
|
||||
try:
|
||||
logSys.debug("Creating PID file %s", pidfile)
|
||||
_make_file_path(pidfile)
|
||||
pidFile = open(pidfile, 'w')
|
||||
pidFile.write("%s\n" % os.getpid())
|
||||
pidFile.close()
|
||||
except (OSError, IOError) as e: # pragma: no cover
|
||||
logSys.error("Unable to create PID file: %s", e)
|
||||
|
||||
# Create observers and start it:
|
||||
if observer:
|
||||
if Observers.Main is None:
|
||||
Observers.Main = ObserverThread()
|
||||
Observers.Main.start()
|
||||
|
||||
# Start the communication
|
||||
logSys.debug("Starting communication")
|
||||
try:
|
||||
_make_file_path(sock)
|
||||
self.__asyncServer = AsyncServer(self.__transm)
|
||||
self.__asyncServer.onstart = conf.get('onstart')
|
||||
self.__asyncServer.start(sock, force)
|
||||
except AsyncServerException as e:
|
||||
logSys.error("Could not start server: %s", e)
|
||||
|
||||
# Stop (if not yet already executed):
|
||||
self.quit()
|
||||
|
||||
# Removes the PID file.
|
||||
try:
|
||||
logSys.debug("Remove PID file %s", pidfile)
|
||||
os.remove(pidfile)
|
||||
except (OSError, IOError) as e: # pragma: no cover
|
||||
logSys.error("Unable to remove PID file: %s", e)
|
||||
|
||||
def quit(self):
|
||||
# Prevent to call quit twice:
|
||||
self.quit = lambda: False
|
||||
|
||||
logSys.info("Shutdown in progress...")
|
||||
|
||||
# Stop communication first because if jail's unban action
|
||||
# tries to communicate via fail2ban-client we get a lockup
|
||||
# among threads. So the simplest resolution is to stop all
|
||||
# communications first (which should be ok anyways since we
|
||||
# are exiting)
|
||||
# See https://github.com/fail2ban/fail2ban/issues/7
|
||||
if self.__asyncServer is not None:
|
||||
self.__asyncServer.stop_communication()
|
||||
|
||||
# Restore default signal handlers:
|
||||
if _thread_name() == '_MainThread':
|
||||
for s, sh in self.__prev_signals.items():
|
||||
signal.signal(s, sh)
|
||||
|
||||
# Give observer a small chance to complete its work before exit
|
||||
obsMain = Observers.Main
|
||||
if obsMain is not None:
|
||||
if obsMain.stop(forceQuit=False):
|
||||
obsMain = None
|
||||
Observers.Main = None
|
||||
|
||||
# Now stop all the jails
|
||||
self.stopAllJail()
|
||||
|
||||
# Stop observer ultimately
|
||||
if obsMain is not None:
|
||||
obsMain.stop()
|
||||
|
||||
# Explicit close database (server can leave in a thread,
|
||||
# so delayed GC can prevent committing changes)
|
||||
if self.__db:
|
||||
self.__db.close()
|
||||
self.__db = None
|
||||
|
||||
# Stop async and exit
|
||||
if self.__asyncServer is not None:
|
||||
self.__asyncServer.stop()
|
||||
self.__asyncServer = None
|
||||
logSys.info("Exiting Fail2ban")
|
||||
|
||||
|
||||
def addJail(self, name, backend):
|
||||
addflg = True
|
||||
if self.__reload_state.get(name) and self.__jails.exists(name):
|
||||
jail = self.__jails[name]
|
||||
# if backend switch - restart instead of reload:
|
||||
if jail.backend == backend:
|
||||
addflg = False
|
||||
logSys.info("Reload jail %r", name)
|
||||
# prevent to reload the same jail twice (temporary keep it in state, needed to commit reload):
|
||||
self.__reload_state[name] = None
|
||||
else:
|
||||
logSys.info("Restart jail %r (reason: %r != %r)", name, jail.backend, backend)
|
||||
self.delJail(name, stop=True)
|
||||
# prevent to start the same jail twice (no reload more - restart):
|
||||
del self.__reload_state[name]
|
||||
if addflg:
|
||||
self.__jails.add(name, backend, self.__db)
|
||||
if self.__db is not None:
|
||||
self.__db.addJail(self.__jails[name])
|
||||
|
||||
def delJail(self, name, stop=True, join=True):
|
||||
jail = self.__jails[name]
|
||||
if join or jail.isAlive():
|
||||
jail.stop(stop=stop, join=join)
|
||||
if join:
|
||||
if self.__db is not None:
|
||||
self.__db.delJail(jail)
|
||||
del self.__jails[name]
|
||||
|
||||
def startJail(self, name):
|
||||
with self.__lock:
|
||||
jail = self.__jails[name]
|
||||
if not jail.isAlive():
|
||||
jail.start()
|
||||
elif name in self.__reload_state:
|
||||
logSys.info("Jail %r reloaded", name)
|
||||
del self.__reload_state[name]
|
||||
if jail.idle:
|
||||
jail.idle = False
|
||||
|
||||
def stopJail(self, name):
|
||||
with self.__lock:
|
||||
self.delJail(name, stop=True)
|
||||
|
||||
def stopAllJail(self):
|
||||
logSys.info("Stopping all jails")
|
||||
with self.__lock:
|
||||
# 1st stop all jails (signal and stop actions/filter thread):
|
||||
for name in list(self.__jails.keys()):
|
||||
self.delJail(name, stop=True, join=False)
|
||||
# 2nd wait for end and delete jails:
|
||||
for name in list(self.__jails.keys()):
|
||||
self.delJail(name, stop=False, join=True)
|
||||
|
||||
def clearCaches(self):
|
||||
# we need to clear caches, to be able to recognize new IPs/families etc:
|
||||
DNSUtils.CACHE_nameToIp.clear()
|
||||
DNSUtils.CACHE_ipToName.clear()
|
||||
|
||||
def reloadJails(self, name, opts, begin):
|
||||
if begin:
|
||||
# begin reload:
|
||||
if self.__reload_state and (name == '--all' or self.__reload_state.get(name)): # pragma: no cover
|
||||
raise ValueError('Reload already in progress')
|
||||
logSys.info("Reload " + (("jail %s" % name) if name != '--all' else "all jails"))
|
||||
with self.__lock:
|
||||
# if single jail:
|
||||
if name != '--all':
|
||||
jail = None
|
||||
# test jail exists (throws exception if not):
|
||||
if "--if-exists" not in opts or self.__jails.exists(name):
|
||||
jail = self.__jails[name]
|
||||
if jail:
|
||||
# first unban all ips (will be not restored after (re)start):
|
||||
if "--unban" in opts:
|
||||
self.setUnbanIP(name)
|
||||
# stop if expected:
|
||||
if "--restart" in opts:
|
||||
self.stopJail(name)
|
||||
else:
|
||||
# invalidate caches by reload
|
||||
self.clearCaches()
|
||||
# first unban all ips (will be not restored after (re)start):
|
||||
if "--unban" in opts:
|
||||
self.setUnbanIP()
|
||||
# stop if expected:
|
||||
if "--restart" in opts:
|
||||
self.stopAllJail()
|
||||
# first set all affected jail(s) to idle and reset filter regex and other lists/dicts:
|
||||
for jn, jail in self.__jails.items():
|
||||
if name == '--all' or jn == name:
|
||||
jail.idle = True
|
||||
self.__reload_state[jn] = jail
|
||||
jail.filter.reload(begin=True)
|
||||
jail.actions.reload(begin=True)
|
||||
pass
|
||||
else:
|
||||
# end reload, all affected (or new) jails have already all new parameters (via stream) and (re)started:
|
||||
with self.__lock:
|
||||
deljails = []
|
||||
for jn, jail in self.__jails.items():
|
||||
# still in reload state:
|
||||
if jn in self.__reload_state:
|
||||
# remove jails that are not reloaded (untouched, so not in new configuration)
|
||||
deljails.append(jn)
|
||||
else:
|
||||
# commit (reload was finished):
|
||||
jail.filter.reload(begin=False)
|
||||
jail.actions.reload(begin=False)
|
||||
for jn in deljails:
|
||||
self.delJail(jn)
|
||||
self.__reload_state = {}
|
||||
logSys.info("Reload finished.")
|
||||
|
||||
def setIdleJail(self, name, value):
|
||||
self.__jails[name].idle = value
|
||||
return True
|
||||
|
||||
def getIdleJail(self, name):
|
||||
return self.__jails[name].idle
|
||||
|
||||
# Filter
|
||||
def setIgnoreSelf(self, name, value):
|
||||
self.__jails[name].filter.ignoreSelf = _as_bool(value)
|
||||
|
||||
def getIgnoreSelf(self, name):
|
||||
return self.__jails[name].filter.ignoreSelf
|
||||
|
||||
def addIgnoreIP(self, name, ip):
|
||||
self.__jails[name].filter.addIgnoreIP(ip)
|
||||
|
||||
def delIgnoreIP(self, name, ip):
|
||||
self.__jails[name].filter.delIgnoreIP(ip)
|
||||
|
||||
def getIgnoreIP(self, name):
|
||||
return self.__jails[name].filter.getIgnoreIP()
|
||||
|
||||
def addLogPath(self, name, fileName, tail=False):
|
||||
filter_ = self.__jails[name].filter
|
||||
if isinstance(filter_, FileFilter):
|
||||
filter_.addLogPath(fileName, tail)
|
||||
|
||||
def delLogPath(self, name, fileName):
|
||||
filter_ = self.__jails[name].filter
|
||||
if isinstance(filter_, FileFilter):
|
||||
filter_.delLogPath(fileName)
|
||||
|
||||
def getLogPath(self, name):
|
||||
filter_ = self.__jails[name].filter
|
||||
if isinstance(filter_, FileFilter):
|
||||
return filter_.getLogPaths()
|
||||
else: # pragma: systemd no cover
|
||||
logSys.debug("Jail %s is not a FileFilter instance" % name)
|
||||
return []
|
||||
|
||||
def addJournalMatch(self, name, match): # pragma: systemd no cover
|
||||
filter_ = self.__jails[name].filter
|
||||
if isinstance(filter_, JournalFilter):
|
||||
filter_.addJournalMatch(match)
|
||||
|
||||
def delJournalMatch(self, name, match): # pragma: systemd no cover
|
||||
filter_ = self.__jails[name].filter
|
||||
if isinstance(filter_, JournalFilter):
|
||||
filter_.delJournalMatch(match)
|
||||
|
||||
def getJournalMatch(self, name): # pragma: systemd no cover
|
||||
filter_ = self.__jails[name].filter
|
||||
if isinstance(filter_, JournalFilter):
|
||||
return filter_.getJournalMatch()
|
||||
else:
|
||||
logSys.debug("Jail %s is not a JournalFilter instance" % name)
|
||||
return []
|
||||
|
||||
def setLogEncoding(self, name, encoding):
|
||||
filter_ = self.__jails[name].filter
|
||||
filter_.setLogEncoding(encoding)
|
||||
|
||||
def getLogEncoding(self, name):
|
||||
filter_ = self.__jails[name].filter
|
||||
return filter_.getLogEncoding()
|
||||
|
||||
def setFindTime(self, name, value):
|
||||
self.__jails[name].filter.setFindTime(value)
|
||||
|
||||
def getFindTime(self, name):
|
||||
return self.__jails[name].filter.getFindTime()
|
||||
|
||||
def setDatePattern(self, name, pattern):
|
||||
self.__jails[name].filter.setDatePattern(pattern)
|
||||
|
||||
def getDatePattern(self, name):
|
||||
return self.__jails[name].filter.getDatePattern()
|
||||
|
||||
def setLogTimeZone(self, name, tz):
|
||||
self.__jails[name].filter.setLogTimeZone(tz)
|
||||
|
||||
def getLogTimeZone(self, name):
|
||||
return self.__jails[name].filter.getLogTimeZone()
|
||||
|
||||
def setIgnoreCommand(self, name, value):
|
||||
self.__jails[name].filter.ignoreCommand = value
|
||||
|
||||
def getIgnoreCommand(self, name):
|
||||
return self.__jails[name].filter.ignoreCommand
|
||||
|
||||
def setIgnoreCache(self, name, value):
|
||||
value, options = extractOptions("cache["+value+"]")
|
||||
self.__jails[name].filter.ignoreCache = options
|
||||
|
||||
def getIgnoreCache(self, name):
|
||||
return self.__jails[name].filter.ignoreCache
|
||||
|
||||
def setPrefRegex(self, name, value):
|
||||
flt = self.__jails[name].filter
|
||||
logSys.debug(" prefregex: %r", value)
|
||||
flt.prefRegex = value
|
||||
|
||||
def getPrefRegex(self, name):
|
||||
return self.__jails[name].filter.prefRegex
|
||||
|
||||
def addFailRegex(self, name, value, multiple=False):
|
||||
flt = self.__jails[name].filter
|
||||
if not multiple: value = (value,)
|
||||
for value in value:
|
||||
logSys.debug(" failregex: %r", value)
|
||||
flt.addFailRegex(value)
|
||||
|
||||
def delFailRegex(self, name, index=None):
|
||||
self.__jails[name].filter.delFailRegex(index)
|
||||
|
||||
def getFailRegex(self, name):
|
||||
return self.__jails[name].filter.getFailRegex()
|
||||
|
||||
def addIgnoreRegex(self, name, value, multiple=False):
|
||||
flt = self.__jails[name].filter
|
||||
if not multiple: value = (value,)
|
||||
for value in value:
|
||||
logSys.debug(" ignoreregex: %r", value)
|
||||
flt.addIgnoreRegex(value)
|
||||
|
||||
def delIgnoreRegex(self, name, index):
|
||||
self.__jails[name].filter.delIgnoreRegex(index)
|
||||
|
||||
def getIgnoreRegex(self, name):
|
||||
return self.__jails[name].filter.getIgnoreRegex()
|
||||
|
||||
def setUseDns(self, name, value):
|
||||
self.__jails[name].filter.setUseDns(value)
|
||||
|
||||
def getUseDns(self, name):
|
||||
return self.__jails[name].filter.getUseDns()
|
||||
|
||||
def setMaxMatches(self, name, value):
|
||||
self.__jails[name].filter.failManager.maxMatches = value
|
||||
|
||||
def getMaxMatches(self, name):
|
||||
return self.__jails[name].filter.failManager.maxMatches
|
||||
|
||||
def setMaxRetry(self, name, value):
|
||||
self.__jails[name].filter.setMaxRetry(value)
|
||||
|
||||
def getMaxRetry(self, name):
|
||||
return self.__jails[name].filter.getMaxRetry()
|
||||
|
||||
def setMaxLines(self, name, value):
|
||||
self.__jails[name].filter.setMaxLines(value)
|
||||
|
||||
def getMaxLines(self, name):
|
||||
return self.__jails[name].filter.getMaxLines()
|
||||
|
||||
# Action
|
||||
def addAction(self, name, value, *args):
|
||||
## create (or reload) jail action:
|
||||
self.__jails[name].actions.add(value, *args,
|
||||
reload=name in self.__reload_state)
|
||||
|
||||
def getActions(self, name):
|
||||
return self.__jails[name].actions
|
||||
|
||||
def delAction(self, name, value):
|
||||
del self.__jails[name].actions[value]
|
||||
|
||||
def getAction(self, name, value):
|
||||
return self.__jails[name].actions[value]
|
||||
|
||||
def setBanTime(self, name, value):
|
||||
self.__jails[name].actions.setBanTime(value)
|
||||
|
||||
def addAttemptIP(self, name, *args):
|
||||
return self.__jails[name].filter.addAttempt(*args)
|
||||
|
||||
def setBanIP(self, name, value):
|
||||
return self.__jails[name].actions.addBannedIP(value)
|
||||
|
||||
def setUnbanIP(self, name=None, value=None, ifexists=True):
|
||||
if name is not None:
|
||||
# single jail:
|
||||
jails = [self.__jails[name]]
|
||||
else:
|
||||
# in all jails:
|
||||
jails = list(self.__jails.values())
|
||||
# unban given or all (if value is None):
|
||||
cnt = 0
|
||||
ifexists |= (name is None)
|
||||
for jail in jails:
|
||||
cnt += jail.actions.removeBannedIP(value, ifexists=ifexists)
|
||||
return cnt
|
||||
|
||||
def banned(self, name=None, ids=None):
|
||||
if name is not None:
|
||||
# single jail:
|
||||
jails = [self.__jails[name]]
|
||||
else:
|
||||
# in all jails:
|
||||
jails = list(self.__jails.values())
|
||||
# check banned ids:
|
||||
res = []
|
||||
if name is None and ids:
|
||||
for ip in ids:
|
||||
ret = []
|
||||
for jail in jails:
|
||||
if jail.actions.getBanned([ip]):
|
||||
ret.append(jail.name)
|
||||
res.append(ret)
|
||||
else:
|
||||
for jail in jails:
|
||||
ret = jail.actions.getBanned(ids)
|
||||
if name is not None:
|
||||
return ret
|
||||
res.append(ret)
|
||||
else:
|
||||
res.append({jail.name: ret})
|
||||
return res
|
||||
|
||||
def getBanTime(self, name):
|
||||
return self.__jails[name].actions.getBanTime()
|
||||
|
||||
def getBanList(self, name, withTime=False):
|
||||
"""Returns the list of banned IP addresses for a jail.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
name : str
|
||||
The name of a jail.
|
||||
|
||||
Returns
|
||||
-------
|
||||
list
|
||||
The list of banned IP addresses.
|
||||
"""
|
||||
return self.__jails[name].actions.getBanList(withTime)
|
||||
|
||||
def setBanTimeExtra(self, name, opt, value):
|
||||
self.__jails[name].setBanTimeExtra(opt, value)
|
||||
|
||||
def getBanTimeExtra(self, name, opt):
|
||||
return self.__jails[name].getBanTimeExtra(opt)
|
||||
|
||||
def isStarted(self):
|
||||
return self.__asyncServer is not None and self.__asyncServer.isActive()
|
||||
|
||||
def isAlive(self, jailnum=None):
|
||||
if jailnum is not None and len(self.__jails) != jailnum:
|
||||
return 0
|
||||
for jail in list(self.__jails.values()):
|
||||
if not jail.isAlive():
|
||||
return 0
|
||||
return 1
|
||||
|
||||
# Status
|
||||
def status(self, name="", flavor="basic"):
|
||||
try:
|
||||
self.__lock.acquire()
|
||||
jails = sorted(self.__jails.items())
|
||||
if flavor != "stats":
|
||||
jailList = [n for n, j in jails]
|
||||
ret = [
|
||||
("Number of jail", len(jailList)),
|
||||
("Jail list", ", ".join(jailList))
|
||||
]
|
||||
if name == '--all':
|
||||
jstat = dict(jails)
|
||||
for n, j in jails:
|
||||
jstat[n] = j.status(flavor=flavor)
|
||||
if flavor == "stats":
|
||||
return jstat
|
||||
ret.append(jstat)
|
||||
return ret
|
||||
finally:
|
||||
self.__lock.release()
|
||||
|
||||
def statusJail(self, name, flavor="basic"):
|
||||
return self.__jails[name].status(flavor=flavor)
|
||||
|
||||
# Logging
|
||||
|
||||
##
|
||||
# Set the logging level.
|
||||
#
|
||||
# CRITICAL
|
||||
# ERROR
|
||||
# WARNING
|
||||
# NOTICE
|
||||
# INFO
|
||||
# DEBUG
|
||||
# @param value the level
|
||||
|
||||
def setLogLevel(self, value):
|
||||
value = value.upper()
|
||||
with self.__loggingLock:
|
||||
if self.__logLevel == value:
|
||||
return
|
||||
ll = str2LogLevel(value)
|
||||
# don't change real log-level if running from the test cases:
|
||||
getLogger("fail2ban").setLevel(
|
||||
ll if DEF_LOGTARGET != "INHERITED" or ll < logging.DEBUG else DEF_LOGLEVEL)
|
||||
self.__logLevel = value
|
||||
|
||||
##
|
||||
# Get the logging level.
|
||||
#
|
||||
# @see setLogLevel
|
||||
# @return the log level
|
||||
|
||||
def getLogLevel(self):
|
||||
with self.__loggingLock:
|
||||
return self.__logLevel
|
||||
|
||||
##
|
||||
# Sets the logging target.
|
||||
#
|
||||
# target can be a file, SYSLOG, STDOUT or STDERR.
|
||||
# @param target the logging target
|
||||
|
||||
def setLogTarget(self, target):
|
||||
# check reserved targets in uppercase, don't change target, because it can be file:
|
||||
target, logOptions = extractOptions(target)
|
||||
systarget = target.upper()
|
||||
with self.__loggingLock:
|
||||
# don't set new handlers if already the same
|
||||
# or if "INHERITED" (foreground worker of the test cases, to prevent stop logging):
|
||||
if self.__logTarget == target:
|
||||
return True
|
||||
if systarget == "INHERITED":
|
||||
self.__logTarget = target
|
||||
return True
|
||||
padding = logOptions.get('padding')
|
||||
# set a format which is simpler for console use
|
||||
if systarget == "SYSTEMD-JOURNAL":
|
||||
from systemd.journal import JournalHandler
|
||||
hdlr = JournalHandler(SYSLOG_IDENTIFIER='fail2ban')
|
||||
elif systarget == "SYSLOG":
|
||||
facility = logOptions.get('facility', 'DAEMON').upper()
|
||||
# backwards compatibility - default no padding for syslog handler:
|
||||
if padding is None: padding = '0'
|
||||
try:
|
||||
facility = getattr(logging.handlers.SysLogHandler, 'LOG_' + facility)
|
||||
except AttributeError: # pragma: no cover
|
||||
logSys.error("Unable to set facility %r, using 'DAEMON'", logOptions.get('facility'))
|
||||
facility = logging.handlers.SysLogHandler.LOG_DAEMON
|
||||
if self.__syslogSocket == "auto":
|
||||
import platform
|
||||
self.__syslogSocket = self.__autoSyslogSocketPaths.get(
|
||||
platform.system())
|
||||
if self.__syslogSocket is not None\
|
||||
and os.path.exists(self.__syslogSocket)\
|
||||
and stat.S_ISSOCK(os.stat(
|
||||
self.__syslogSocket).st_mode):
|
||||
hdlr = logging.handlers.SysLogHandler(
|
||||
self.__syslogSocket, facility=facility)
|
||||
else:
|
||||
logSys.error(
|
||||
"Syslog socket file: %s does not exists"
|
||||
" or is not a socket" % self.__syslogSocket)
|
||||
return False
|
||||
elif systarget in ("STDOUT", "SYSOUT"):
|
||||
hdlr = logging.StreamHandler(sys.stdout)
|
||||
elif systarget == "STDERR":
|
||||
hdlr = logging.StreamHandler(sys.stderr)
|
||||
else:
|
||||
# Target should be a file
|
||||
try:
|
||||
open(target, "a").close()
|
||||
hdlr = logging.handlers.RotatingFileHandler(target)
|
||||
except IOError:
|
||||
logSys.error("Unable to log to %r", target)
|
||||
logSys.info("Logging to previous target %r", self.__logTarget)
|
||||
return False
|
||||
# Removes previous handlers -- in reverse order since removeHandler
|
||||
# alter the list in-place and that can confuses the iterable
|
||||
logger = getLogger("fail2ban")
|
||||
for handler in logger.handlers[::-1]:
|
||||
# Remove the handler.
|
||||
logger.removeHandler(handler)
|
||||
# And try to close -- it might be closed already
|
||||
handler.flush()
|
||||
handler.close()
|
||||
# detailed format by deep log levels (as DEBUG=10):
|
||||
if logger.getEffectiveLevel() <= logging.DEBUG: # pragma: no cover
|
||||
if self.__verbose is None:
|
||||
self.__verbose = logging.DEBUG - logger.getEffectiveLevel() + 1
|
||||
# If handler don't already add date to the message:
|
||||
addtime = logOptions.get('datetime')
|
||||
if addtime is not None:
|
||||
addtime = _as_bool(addtime)
|
||||
else:
|
||||
addtime = systarget not in ("SYSLOG", "SYSOUT")
|
||||
if padding is not None:
|
||||
padding = _as_bool(padding)
|
||||
else:
|
||||
padding = True
|
||||
# If log-format is redefined in options:
|
||||
if logOptions.get('format', '') != '':
|
||||
fmt = logOptions.get('format')
|
||||
else:
|
||||
# verbose log-format:
|
||||
verbose = 0
|
||||
if self.__verbose is not None and self.__verbose > 2: # pragma: no cover
|
||||
verbose = self.__verbose-1
|
||||
fmt = getVerbosityFormat(verbose, addtime=addtime, padding=padding)
|
||||
# tell the handler to use this format
|
||||
if target != "SYSTEMD-JOURNAL":
|
||||
hdlr.setFormatter(logging.Formatter(fmt))
|
||||
logger.addHandler(hdlr)
|
||||
# Does not display this message at startup.
|
||||
if self.__logTarget is not None:
|
||||
logSys.info("Start Fail2ban v%s", version.version)
|
||||
logSys.info(
|
||||
"Changed logging target to %s for Fail2ban v%s"
|
||||
% ((target
|
||||
if target != "SYSLOG"
|
||||
else "%s (%s)"
|
||||
% (target, self.__syslogSocket)),
|
||||
version.version))
|
||||
# Sets the logging target.
|
||||
self.__logTarget = target
|
||||
return True
|
||||
|
||||
##
|
||||
# Sets the syslog socket.
|
||||
#
|
||||
# syslogsocket is the full path to the syslog socket
|
||||
# @param syslogsocket the syslog socket path
|
||||
def setSyslogSocket(self, syslogsocket):
|
||||
with self.__loggingLock:
|
||||
if self.__syslogSocket == syslogsocket:
|
||||
return True
|
||||
self.__syslogSocket = syslogsocket
|
||||
# Conditionally reload, logtarget depends on socket path when SYSLOG
|
||||
return self.__logTarget != "SYSLOG"\
|
||||
or self.setLogTarget(self.__logTarget)
|
||||
|
||||
def getLogTarget(self):
|
||||
with self.__loggingLock:
|
||||
return self.__logTarget
|
||||
|
||||
def getSyslogSocket(self):
|
||||
with self.__loggingLock:
|
||||
return self.__syslogSocket
|
||||
|
||||
def flushLogs(self):
|
||||
if self.__logTarget not in ['STDERR', 'STDOUT', 'SYSLOG', 'SYSTEMD-JOURNAL']:
|
||||
for handler in getLogger("fail2ban").handlers:
|
||||
try:
|
||||
handler.doRollover()
|
||||
logSys.info("rollover performed on %s" % self.__logTarget)
|
||||
except AttributeError:
|
||||
handler.flush()
|
||||
logSys.info("flush performed on %s" % self.__logTarget)
|
||||
return "rolled over"
|
||||
else:
|
||||
for handler in getLogger("fail2ban").handlers:
|
||||
handler.flush()
|
||||
logSys.info("flush performed on %s" % self.__logTarget)
|
||||
return "flushed"
|
||||
|
||||
@staticmethod
|
||||
def setIPv6IsAllowed(value):
|
||||
value = _as_bool(value) if value != 'auto' else None
|
||||
return DNSUtils.setIPv6IsAllowed(value)
|
||||
|
||||
def setThreadOptions(self, value):
|
||||
for o, v in value.items():
|
||||
if o == 'stacksize':
|
||||
threading.stack_size(int(v)*1024)
|
||||
else: # pragma: no cover
|
||||
raise KeyError("unknown option %r" % o)
|
||||
|
||||
def getThreadOptions(self):
|
||||
return {'stacksize': threading.stack_size() // 1024}
|
||||
|
||||
def setDatabase(self, filename):
|
||||
# if not changed - nothing to do
|
||||
if self.__db and self.__db.filename == filename:
|
||||
return
|
||||
if not self.__db and filename.lower() == 'none':
|
||||
return
|
||||
if len(self.__jails) != 0:
|
||||
raise RuntimeError(
|
||||
"Cannot change database when there are jails present")
|
||||
if filename.lower() == "none":
|
||||
self.__db = None
|
||||
else:
|
||||
if Fail2BanDb is not None:
|
||||
_make_file_path(filename)
|
||||
self.__db = Fail2BanDb(filename)
|
||||
self.__db.delAllJails()
|
||||
else: # pragma: no cover
|
||||
logSys.error(
|
||||
"Unable to import fail2ban database module as sqlite "
|
||||
"is not available.")
|
||||
if Observers.Main is not None:
|
||||
Observers.Main.db_set(self.__db)
|
||||
|
||||
def getDatabase(self):
|
||||
return self.__db
|
||||
|
||||
@staticmethod
|
||||
def __get_fdlist():
|
||||
"""Generate a list of open file descriptors.
|
||||
|
||||
This wouldn't work on some platforms, or if proc/fdescfs not mounted, or a chroot environment,
|
||||
then it'd raise a FileExistsError.
|
||||
"""
|
||||
for path in (
|
||||
'/proc/self/fd', # Linux, Cygwin and NetBSD
|
||||
'/proc/fd', # MacOS and FreeBSD
|
||||
):
|
||||
if os.path.exists(path):
|
||||
def fdlist():
|
||||
for name in os.listdir(path):
|
||||
if name.isdigit():
|
||||
yield int(name)
|
||||
return fdlist()
|
||||
# other platform or unmounted, chroot etc:
|
||||
raise FileExistsError("fd-list not found")
|
||||
|
||||
def __createDaemon(self): # pragma: no cover
|
||||
""" Detach a process from the controlling terminal and run it in the
|
||||
background as a daemon.
|
||||
|
||||
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/278731
|
||||
"""
|
||||
|
||||
# When the first child terminates, all processes in the second child
|
||||
# are sent a SIGHUP, so it's ignored.
|
||||
|
||||
# We need to set this in the parent process, so it gets inherited by the
|
||||
# child process, and this makes sure that it is effect even if the parent
|
||||
# terminates quickly.
|
||||
self._rebindSignal(signal.SIGHUP, signal.SIG_IGN)
|
||||
|
||||
try:
|
||||
# Fork a child process so the parent can exit. This will return control
|
||||
# to the command line or shell. This is required so that the new process
|
||||
# is guaranteed not to be a process group leader. We have this guarantee
|
||||
# because the process GID of the parent is inherited by the child, but
|
||||
# the child gets a new PID, making it impossible for its PID to equal its
|
||||
# PGID.
|
||||
pid = os.fork()
|
||||
except OSError as e:
|
||||
return (False, (e.errno, e.strerror)) # ERROR (return a tuple)
|
||||
|
||||
if pid == 0: # The first child.
|
||||
|
||||
# Next we call os.setsid() to become the session leader of this new
|
||||
# session. The process also becomes the process group leader of the
|
||||
# new process group. Since a controlling terminal is associated with a
|
||||
# session, and this new session has not yet acquired a controlling
|
||||
# terminal our process now has no controlling terminal. This shouldn't
|
||||
# fail, since we're guaranteed that the child is not a process group
|
||||
# leader.
|
||||
os.setsid()
|
||||
|
||||
try:
|
||||
# Fork a second child to prevent zombies. Since the first child is
|
||||
# a session leader without a controlling terminal, it's possible for
|
||||
# it to acquire one by opening a terminal in the future. This second
|
||||
# fork guarantees that the child is no longer a session leader, thus
|
||||
# preventing the daemon from ever acquiring a controlling terminal.
|
||||
pid = os.fork() # Fork a second child.
|
||||
except OSError as e:
|
||||
return (False, (e.errno, e.strerror)) # ERROR (return a tuple)
|
||||
|
||||
if (pid == 0): # The second child.
|
||||
# Ensure that the daemon doesn't keep any directory in use. Failure
|
||||
# to do this could make a filesystem unmountable.
|
||||
os.chdir("/")
|
||||
else:
|
||||
os._exit(0) # Exit parent (the first child) of the second child.
|
||||
else:
|
||||
# Signal to exit, parent of the first child.
|
||||
return None
|
||||
|
||||
# Close all open files. Try to obtain the range of open descriptors directly.
|
||||
# As a fallback try the system configuration variable, SC_OPEN_MAX,
|
||||
# for the maximum number of open files to close. If it doesn't exist, use
|
||||
# the default value (configurable).
|
||||
try:
|
||||
fdlist = self.__get_fdlist()
|
||||
for fd in fdlist:
|
||||
try:
|
||||
os.close(fd)
|
||||
except OSError: # ERROR (ignore)
|
||||
pass
|
||||
except:
|
||||
try:
|
||||
maxfd = os.sysconf("SC_OPEN_MAX")
|
||||
except (AttributeError, ValueError):
|
||||
maxfd = 256 # default maximum
|
||||
os.closerange(0, maxfd)
|
||||
|
||||
# Redirect the standard file descriptors to /dev/null.
|
||||
os.open("/dev/null", os.O_RDONLY) # standard input (0)
|
||||
os.open("/dev/null", os.O_RDWR) # standard output (1)
|
||||
os.open("/dev/null", os.O_RDWR) # standard error (2)
|
||||
return (True,)
|
||||
|
||||
|
||||
class ServerInitializationError(Exception):
|
||||
pass
|
||||
392
fail2ban-master/fail2ban/server/strptime.py
Normal file
392
fail2ban-master/fail2ban/server/strptime.py
Normal file
@@ -0,0 +1,392 @@
|
||||
# emacs: -*- mode: python; coding: utf-8; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
import re
|
||||
import time
|
||||
import calendar
|
||||
import datetime
|
||||
from _strptime import LocaleTime, TimeRE, _calc_julian_from_U_or_W
|
||||
|
||||
from .mytime import MyTime
|
||||
|
||||
locale_time = LocaleTime()
|
||||
|
||||
TZ_ABBR_RE = r"[A-Z](?:[A-Z]{2,4})?"
|
||||
FIXED_OFFSET_TZ_RE = re.compile(r"(%s)?([+-][01]\d(?::?\d{2})?)?$" % (TZ_ABBR_RE,))
|
||||
|
||||
timeRE = TimeRE()
|
||||
|
||||
# %k - one- or two-digit number giving the hour of the day (0-23) on a 24-hour clock,
|
||||
# (corresponds %H, but allows space if not zero-padded).
|
||||
# %l - one- or two-digit number giving the hour of the day (12-11) on a 12-hour clock,
|
||||
# (corresponds %I, but allows space if not zero-padded).
|
||||
timeRE['k'] = r" ?(?P<H>[0-2]?\d)"
|
||||
timeRE['l'] = r" ?(?P<I>1?\d)"
|
||||
|
||||
# TODO: because python currently does not support mixing of case-sensitive with case-insensitive matching,
|
||||
# check how TZ (in uppercase) can be combined with %a/%b etc. (that are currently case-insensitive),
|
||||
# to avoid invalid date-time recognition in strings like '11-Aug-2013 03:36:11.372 error ...'
|
||||
# with wrong TZ "error", which is at least not backwards compatible.
|
||||
# Hence %z currently match literal Z|UTC|GMT only (and offset-based), and %Exz - all zone abbreviations.
|
||||
timeRE['Z'] = r"(?P<Z>Z|[A-Z]{3,5})"
|
||||
timeRE['z'] = r"(?P<z>Z|UTC|GMT|[+-][01]\d(?::?\d{2})?)"
|
||||
|
||||
# Note: this extended tokens supported zone abbreviations, but it can parse 1 or 3-5 char(s) in lowercase,
|
||||
# see todo above. Don't use them in default date-patterns (if not anchored, few precise resp. optional).
|
||||
timeRE['ExZ'] = r"(?P<Z>%s)" % (TZ_ABBR_RE,)
|
||||
timeRE['Exz'] = r"(?P<z>(?:%s)?[+-][01]\d(?::?\d{2})?|%s)" % (TZ_ABBR_RE, TZ_ABBR_RE)
|
||||
|
||||
# overwrite default patterns, since they can be non-optimal:
|
||||
timeRE['d'] = r"(?P<d>[1-2]\d|[0 ]?[1-9]|3[0-1])"
|
||||
timeRE['m'] = r"(?P<m>0?[1-9]|1[0-2])"
|
||||
timeRE['Y'] = r"(?P<Y>\d{4})"
|
||||
timeRE['H'] = r"(?P<H>[0-1]?\d|2[0-3])"
|
||||
timeRE['M'] = r"(?P<M>[0-5]?\d)"
|
||||
timeRE['S'] = r"(?P<S>[0-5]?\d|6[0-1])"
|
||||
|
||||
# Extend built-in TimeRE with some exact patterns
|
||||
# exact two-digit patterns:
|
||||
timeRE['Exd'] = r"(?P<d>[1-2]\d|0[1-9]|3[0-1])"
|
||||
timeRE['Exm'] = r"(?P<m>0[1-9]|1[0-2])"
|
||||
timeRE['ExH'] = r"(?P<H>[0-1]\d|2[0-3])"
|
||||
timeRE['Exk'] = r" ?(?P<H>[0-1]?\d|2[0-3])"
|
||||
timeRE['Exl'] = r" ?(?P<I>1[0-2]|\d)"
|
||||
timeRE['ExM'] = r"(?P<M>[0-5]\d)"
|
||||
timeRE['ExS'] = r"(?P<S>[0-5]\d|6[0-1])"
|
||||
|
||||
def _updateTimeRE():
|
||||
def _getYearCentRE(cent=(0,3), distance=3, now=(MyTime.now(), MyTime.alternateNow)):
|
||||
""" Build century regex for last year and the next years (distance).
|
||||
|
||||
Thereby respect possible run in the test-cases (alternate date used there)
|
||||
"""
|
||||
cent = lambda year, f=cent[0], t=cent[1]: str(year)[f:t]
|
||||
def grp(exprset):
|
||||
c = None
|
||||
if len(exprset) > 1:
|
||||
for i in exprset:
|
||||
if c is None or i[0:-1] == c:
|
||||
c = i[0:-1]
|
||||
else:
|
||||
c = None
|
||||
break
|
||||
if not c:
|
||||
for i in exprset:
|
||||
if c is None or i[0] == c:
|
||||
c = i[0]
|
||||
else:
|
||||
c = None
|
||||
break
|
||||
if c:
|
||||
return "%s%s" % (c, grp([i[len(c):] for i in exprset]))
|
||||
return ("(?:%s)" % "|".join(exprset) if len(exprset[0]) > 1 else "[%s]" % "".join(exprset)) \
|
||||
if len(exprset) > 1 else "".join(exprset)
|
||||
exprset = set( cent(now[0].year + i) for i in (-1, distance) )
|
||||
if len(now) > 1 and now[1]:
|
||||
exprset |= set( cent(now[1].year + i) for i in range(-1, now[0].year-now[1].year+1, distance) )
|
||||
return grp(sorted(list(exprset)))
|
||||
|
||||
# more precise year patterns, within same century of last year and
|
||||
# the next 3 years (for possible long uptime of fail2ban); thereby
|
||||
# consider possible run in the test-cases (alternate date used there),
|
||||
# so accept years: 20xx (from test-date or 2001 up to current century)
|
||||
timeRE['ExY'] = r"(?P<Y>%s\d)" % _getYearCentRE(cent=(0,3), distance=3,
|
||||
now=(datetime.datetime.now(), datetime.datetime.fromtimestamp(
|
||||
min(MyTime.alternateNowTime or 978393600, 978393600))
|
||||
)
|
||||
)
|
||||
timeRE['Exy'] = r"(?P<y>\d{2})"
|
||||
|
||||
_updateTimeRE()
|
||||
|
||||
def getTimePatternRE():
|
||||
keys = list(timeRE.keys())
|
||||
patt = (r"%%(%%|%s|[%s])" % (
|
||||
"|".join([k for k in keys if len(k) > 1]),
|
||||
"".join([k for k in keys if len(k) == 1]),
|
||||
))
|
||||
names = {
|
||||
'a': "DAY", 'A': "DAYNAME", 'b': "MON", 'B': "MONTH", 'd': "Day",
|
||||
'H': "24hour", 'I': "12hour", 'j': "Yearday", 'm': "Month",
|
||||
'M': "Minute", 'p': "AMPM", 'S': "Second", 'U': "Yearweek",
|
||||
'w': "Weekday", 'W': "Yearweek", 'y': 'Year2', 'Y': "Year", '%': "%",
|
||||
'z': "Zone offset", 'f': "Microseconds", 'Z': "Zone name",
|
||||
}
|
||||
for key in set(keys) - set(names): # may not have them all...
|
||||
if key.startswith('Ex'):
|
||||
kn = names.get(key[2:])
|
||||
if kn:
|
||||
names[key] = "Ex" + kn
|
||||
continue
|
||||
names[key] = "%%%s" % key
|
||||
return (patt, names)
|
||||
|
||||
|
||||
def validateTimeZone(tz):
|
||||
"""Validate a timezone and convert it to offset if it can (offset-based TZ).
|
||||
|
||||
For now this accepts the UTC[+-]hhmm format (UTC has aliases GMT/Z and optional).
|
||||
Additionally it accepts all zone abbreviations mentioned below in TZ_STR.
|
||||
Note that currently this zone abbreviations are offset-based and used fixed
|
||||
offset without automatically DST-switch (if CET used then no automatically CEST-switch).
|
||||
|
||||
In the future, it may be extended for named time zones (such as Europe/Paris)
|
||||
present on the system, if a suitable tz library is present (pytz).
|
||||
"""
|
||||
if tz is None:
|
||||
return None
|
||||
m = FIXED_OFFSET_TZ_RE.match(tz)
|
||||
if m is None:
|
||||
raise ValueError("Unknown or unsupported time zone: %r" % tz)
|
||||
tz = m.groups()
|
||||
return zone2offset(tz, 0)
|
||||
|
||||
def zone2offset(tz, dt):
|
||||
"""Return the proper offset, in minutes according to given timezone at a given time.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
tz: symbolic timezone or offset (for now only TZA?([+-]hh:?mm?)? is supported,
|
||||
as value are accepted:
|
||||
int offset;
|
||||
string in form like 'CET+0100' or 'UTC' or '-0400';
|
||||
tuple (or list) in form (zone name, zone offset);
|
||||
dt: datetime instance for offset computation (currently unused)
|
||||
"""
|
||||
if isinstance(tz, int):
|
||||
return tz
|
||||
if isinstance(tz, str):
|
||||
return validateTimeZone(tz)
|
||||
tz, tzo = tz
|
||||
if tzo is None or tzo == '': # without offset
|
||||
return TZ_ABBR_OFFS[tz]
|
||||
if len(tzo) <= 3: # short tzo (hh only)
|
||||
# [+-]hh --> [+-]hh*60
|
||||
return TZ_ABBR_OFFS[tz] + int(tzo)*60
|
||||
if tzo[3] != ':':
|
||||
# [+-]hhmm --> [+-]1 * (hh*60 + mm)
|
||||
return TZ_ABBR_OFFS[tz] + (-1 if tzo[0] == '-' else 1) * (int(tzo[1:3])*60 + int(tzo[3:5]))
|
||||
else:
|
||||
# [+-]hh:mm --> [+-]1 * (hh*60 + mm)
|
||||
return TZ_ABBR_OFFS[tz] + (-1 if tzo[0] == '-' else 1) * (int(tzo[1:3])*60 + int(tzo[4:6]))
|
||||
|
||||
def reGroupDictStrptime(found_dict, msec=False, default_tz=None):
|
||||
"""Return time from dictionary of strptime fields
|
||||
|
||||
This is tweaked from python built-in _strptime.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
found_dict : dict
|
||||
Dictionary where keys represent the strptime fields, and values the
|
||||
respective value.
|
||||
default_tz : default timezone to apply if nothing relevant is in found_dict
|
||||
(may be a non-fixed one in the future)
|
||||
Returns
|
||||
-------
|
||||
float
|
||||
Unix time stamp.
|
||||
"""
|
||||
|
||||
now = \
|
||||
year = month = day = tzoffset = \
|
||||
weekday = julian = week_of_year = None
|
||||
hour = minute = second = fraction = 0
|
||||
for key, val in found_dict.items():
|
||||
if val is None: continue
|
||||
# Directives not explicitly handled below:
|
||||
# c, x, X
|
||||
# handled by making out of other directives
|
||||
# U, W
|
||||
# worthless without day of the week
|
||||
if key == 'y':
|
||||
year = int(val)
|
||||
# Fail2ban year should be always in the current century (>= 2000)
|
||||
if year <= 2000:
|
||||
year += 2000
|
||||
elif key == 'Y':
|
||||
year = int(val)
|
||||
elif key == 'm':
|
||||
month = int(val)
|
||||
elif key == 'B':
|
||||
month = locale_time.f_month.index(val.lower())
|
||||
elif key == 'b':
|
||||
month = locale_time.a_month.index(val.lower())
|
||||
elif key == 'd':
|
||||
day = int(val)
|
||||
elif key == 'H':
|
||||
hour = int(val)
|
||||
elif key == 'I':
|
||||
hour = int(val)
|
||||
ampm = found_dict.get('p', '').lower()
|
||||
# If there was no AM/PM indicator, we'll treat this like AM
|
||||
if ampm in ('', locale_time.am_pm[0]):
|
||||
# We're in AM so the hour is correct unless we're
|
||||
# looking at 12 midnight.
|
||||
# 12 midnight == 12 AM == hour 0
|
||||
if hour == 12:
|
||||
hour = 0
|
||||
elif ampm == locale_time.am_pm[1]:
|
||||
# We're in PM so we need to add 12 to the hour unless
|
||||
# we're looking at 12 noon.
|
||||
# 12 noon == 12 PM == hour 12
|
||||
if hour != 12:
|
||||
hour += 12
|
||||
elif key == 'M':
|
||||
minute = int(val)
|
||||
elif key == 'S':
|
||||
second = int(val)
|
||||
elif key == 'f':
|
||||
if msec: # pragma: no cover - currently unused
|
||||
s = val
|
||||
# Pad to always return microseconds.
|
||||
s += "0" * (6 - len(s))
|
||||
fraction = int(s)
|
||||
elif key == 'A':
|
||||
weekday = locale_time.f_weekday.index(val.lower())
|
||||
elif key == 'a':
|
||||
weekday = locale_time.a_weekday.index(val.lower())
|
||||
elif key == 'w':
|
||||
weekday = int(val) - 1
|
||||
if weekday < 0: weekday = 6
|
||||
elif key == 'j':
|
||||
julian = int(val)
|
||||
elif key in ('U', 'W'):
|
||||
week_of_year = int(val)
|
||||
# U starts week on Sunday, W - on Monday
|
||||
week_of_year_start = 6 if key == 'U' else 0
|
||||
elif key in ('z', 'Z'):
|
||||
z = val
|
||||
if z in ("Z", "UTC", "GMT"):
|
||||
tzoffset = 0
|
||||
else:
|
||||
tzoffset = zone2offset(z, 0); # currently offset-based only
|
||||
|
||||
# Fail2Ban will assume it's this year
|
||||
assume_year = False
|
||||
if year is None:
|
||||
if not now: now = MyTime.now()
|
||||
year = now.year
|
||||
assume_year = True
|
||||
if month is None or day is None:
|
||||
# If we know the week of the year and what day of that week, we can figure
|
||||
# out the Julian day of the year.
|
||||
if julian is None and week_of_year is not None and weekday is not None:
|
||||
julian = _calc_julian_from_U_or_W(year, week_of_year, weekday,
|
||||
(week_of_year_start == 0))
|
||||
# Cannot pre-calculate datetime.datetime() since can change in Julian
|
||||
# calculation and thus could have different value for the day of the week
|
||||
# calculation.
|
||||
if julian is not None:
|
||||
datetime_result = datetime.datetime.fromordinal((julian - 1) + datetime.datetime(year, 1, 1).toordinal())
|
||||
year = datetime_result.year
|
||||
month = datetime_result.month
|
||||
day = datetime_result.day
|
||||
|
||||
# Fail2Ban assume today
|
||||
assume_today = False
|
||||
if month is None and day is None:
|
||||
if not now: now = MyTime.now()
|
||||
month = now.month
|
||||
day = now.day
|
||||
assume_today = True
|
||||
|
||||
# Actually create date
|
||||
date_result = datetime.datetime(
|
||||
year, month, day, hour, minute, second, fraction)
|
||||
# Correct timezone if not supplied in the log linge
|
||||
if tzoffset is None and default_tz is not None:
|
||||
tzoffset = zone2offset(default_tz, date_result)
|
||||
# Add timezone info
|
||||
if tzoffset is not None:
|
||||
date_result -= datetime.timedelta(seconds=tzoffset * 60)
|
||||
|
||||
if assume_today:
|
||||
if not now: now = MyTime.now()
|
||||
if date_result > now:
|
||||
# Rollover at midnight, could mean it's yesterday...
|
||||
date_result -= datetime.timedelta(days=1)
|
||||
if assume_year:
|
||||
if not now: now = MyTime.now()
|
||||
if date_result > now + datetime.timedelta(days=1): # ignore by timezone issues (+24h)
|
||||
# assume last year - also reset month and day as it's not yesterday...
|
||||
date_result = date_result.replace(
|
||||
year=year-1, month=month, day=day)
|
||||
|
||||
# make time:
|
||||
if tzoffset is not None:
|
||||
tm = calendar.timegm(date_result.utctimetuple())
|
||||
else:
|
||||
tm = time.mktime(date_result.timetuple())
|
||||
if msec: # pragma: no cover - currently unused
|
||||
tm += fraction/1000000.0
|
||||
return tm
|
||||
|
||||
|
||||
TZ_ABBR_OFFS = {'':0, None:0}
|
||||
TZ_STR = '''
|
||||
-12 Y
|
||||
-11 X NUT SST
|
||||
-10 W CKT HAST HST TAHT TKT
|
||||
-9 V AKST GAMT GIT HADT HNY
|
||||
-8 U AKDT CIST HAY HNP PST PT
|
||||
-7 T HAP HNR MST PDT
|
||||
-6 S CST EAST GALT HAR HNC MDT
|
||||
-5 R CDT COT EASST ECT EST ET HAC HNE PET
|
||||
-4 Q AST BOT CLT COST EDT FKT GYT HAE HNA PYT
|
||||
-3 P ADT ART BRT CLST FKST GFT HAA PMST PYST SRT UYT WGT
|
||||
-2 O BRST FNT PMDT UYST WGST
|
||||
-1 N AZOT CVT EGT
|
||||
0 Z EGST GMT UTC WET WT
|
||||
1 A CET DFT WAT WEDT WEST
|
||||
2 B CAT CEDT CEST EET SAST WAST
|
||||
3 C EAT EEDT EEST IDT MSK
|
||||
4 D AMT AZT GET GST KUYT MSD MUT RET SAMT SCT
|
||||
5 E AMST AQTT AZST HMT MAWT MVT PKT TFT TJT TMT UZT YEKT
|
||||
6 F ALMT BIOT BTT IOT KGT NOVT OMST YEKST
|
||||
7 G CXT DAVT HOVT ICT KRAT NOVST OMSST THA WIB
|
||||
8 H ACT AWST BDT BNT CAST HKT IRKT KRAST MYT PHT SGT ULAT WITA WST
|
||||
9 I AWDT IRKST JST KST PWT TLT WDT WIT YAKT
|
||||
10 K AEST ChST PGT VLAT YAKST YAPT
|
||||
11 L AEDT LHDT MAGT NCT PONT SBT VLAST VUT
|
||||
12 M ANAST ANAT FJT GILT MAGST MHT NZST PETST PETT TVT WFT
|
||||
13 FJST NZDT
|
||||
11.5 NFT
|
||||
10.5 ACDT LHST
|
||||
9.5 ACST
|
||||
6.5 CCT MMT
|
||||
5.75 NPT
|
||||
5.5 SLT
|
||||
4.5 AFT IRDT
|
||||
3.5 IRST
|
||||
-2.5 HAT NDT
|
||||
-3.5 HNT NST NT
|
||||
-4.5 HLV VET
|
||||
-9.5 MART MIT
|
||||
'''
|
||||
|
||||
def _init_TZ_ABBR():
|
||||
"""Initialized TZ_ABBR_OFFS dictionary (TZ -> offset in minutes)"""
|
||||
for tzline in map(str.split, TZ_STR.split('\n')):
|
||||
if not len(tzline): continue
|
||||
tzoffset = int(float(tzline[0]) * 60)
|
||||
for tz in tzline[1:]:
|
||||
TZ_ABBR_OFFS[tz] = tzoffset
|
||||
|
||||
_init_TZ_ABBR()
|
||||
293
fail2ban-master/fail2ban/server/ticket.py
Normal file
293
fail2ban-master/fail2ban/server/ticket.py
Normal file
@@ -0,0 +1,293 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Author: Cyril Jaquier
|
||||
#
|
||||
|
||||
__author__ = "Cyril Jaquier"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
|
||||
__license__ = "GPL"
|
||||
|
||||
from ..helpers import getLogger
|
||||
from .ipdns import IPAddr
|
||||
from .mytime import MyTime
|
||||
|
||||
# Gets the instance of the logger.
|
||||
logSys = getLogger(__name__)
|
||||
|
||||
|
||||
class Ticket(object):
|
||||
__slots__ = ('_id', '_flags', '_banCount', '_banTime', '_time', '_data', '_retry', '_lastReset')
|
||||
|
||||
MAX_TIME = 0X7FFFFFFFFFFF ;# 4461763-th year
|
||||
|
||||
RESTORED = 0x01
|
||||
BANNED = 0x08
|
||||
|
||||
def __init__(self, ip=None, time=None, matches=None, data={}, ticket=None):
|
||||
"""Ticket constructor
|
||||
|
||||
@param ip the IP address
|
||||
@param time the ban time
|
||||
@param matches (log) lines caused the ticket
|
||||
"""
|
||||
|
||||
self.setID(ip)
|
||||
self._flags = 0;
|
||||
self._banCount = 0;
|
||||
self._banTime = None;
|
||||
self._time = time if time is not None else MyTime.time()
|
||||
self._data = {'matches': matches or [], 'failures': 0}
|
||||
if data is not None:
|
||||
for k,v in data.items():
|
||||
if v is not None:
|
||||
self._data[k] = v
|
||||
if ticket:
|
||||
# ticket available - copy whole information from ticket:
|
||||
self.update(ticket)
|
||||
#self.__dict__.update(i for i in ticket.__dict__.iteritems() if i[0] in self.__dict__)
|
||||
|
||||
def __str__(self):
|
||||
return "%s: ip=%s time=%s bantime=%s bancount=%s #attempts=%d matches=%r" % \
|
||||
(self.__class__.__name__.split('.')[-1], self._id, self._time,
|
||||
self._banTime, self._banCount,
|
||||
self._data['failures'], self._data.get('matches', []))
|
||||
|
||||
def __repr__(self):
|
||||
return str(self)
|
||||
|
||||
def __eq__(self, other):
|
||||
try:
|
||||
return self._id == other._id and \
|
||||
round(self._time, 2) == round(other._time, 2) and \
|
||||
self._data == other._data
|
||||
except AttributeError:
|
||||
return False
|
||||
|
||||
def update(self, ticket):
|
||||
for n in ticket.__slots__:
|
||||
v = getattr(ticket, n, None)
|
||||
if v is not None:
|
||||
setattr(self, n, v)
|
||||
|
||||
def setID(self, value):
|
||||
# guarantee using IPAddr instead of unicode, str for the IP
|
||||
if isinstance(value, str):
|
||||
value = IPAddr(value)
|
||||
self._id = value
|
||||
|
||||
def getID(self):
|
||||
return self._id
|
||||
|
||||
def getIP(self):
|
||||
return self._data.get('ip', self._id)
|
||||
|
||||
def setTime(self, value):
|
||||
self._time = value
|
||||
|
||||
def getTime(self):
|
||||
return self._time
|
||||
|
||||
def setBanTime(self, value):
|
||||
self._banTime = value
|
||||
|
||||
def getBanTime(self, defaultBT=None):
|
||||
return (self._banTime if self._banTime is not None else defaultBT)
|
||||
|
||||
def setBanCount(self, value, always=False):
|
||||
if always or value > self._banCount:
|
||||
self._banCount = value
|
||||
|
||||
def incrBanCount(self, value=1):
|
||||
self._banCount += value
|
||||
|
||||
def getBanCount(self):
|
||||
return self._banCount;
|
||||
|
||||
def getEndOfBanTime(self, defaultBT=None):
|
||||
bantime = (self._banTime if self._banTime is not None else defaultBT)
|
||||
# permanent
|
||||
if bantime == -1:
|
||||
return Ticket.MAX_TIME
|
||||
# unban time (end of ban):
|
||||
return self._time + bantime
|
||||
|
||||
def isTimedOut(self, time, defaultBT=None):
|
||||
bantime = (self._banTime if self._banTime is not None else defaultBT)
|
||||
# permanent
|
||||
if bantime == -1:
|
||||
return False
|
||||
# timed out
|
||||
return (time > self._time + bantime)
|
||||
|
||||
def setAttempt(self, value):
|
||||
self._data['failures'] = value
|
||||
|
||||
def getAttempt(self):
|
||||
return self._data['failures']
|
||||
|
||||
def setMatches(self, matches):
|
||||
if matches:
|
||||
self._data['matches'] = matches
|
||||
else:
|
||||
try:
|
||||
del self._data['matches']
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
def getMatches(self):
|
||||
return [(line if not isinstance(line, (list, tuple)) else "".join(line)) \
|
||||
for line in self._data.get('matches', ())]
|
||||
|
||||
@property
|
||||
def restored(self):
|
||||
return self._flags & Ticket.RESTORED
|
||||
@restored.setter
|
||||
def restored(self, value):
|
||||
if value:
|
||||
self._flags |= Ticket.RESTORED
|
||||
else:
|
||||
self._flags &= ~(Ticket.RESTORED)
|
||||
|
||||
@property
|
||||
def banned(self):
|
||||
return self._flags & Ticket.BANNED
|
||||
@banned.setter
|
||||
def banned(self, value):
|
||||
if value:
|
||||
self._flags |= Ticket.BANNED
|
||||
else:
|
||||
self._flags &= ~(Ticket.BANNED)
|
||||
|
||||
def setData(self, *args, **argv):
|
||||
# if overwrite - set data and filter None values:
|
||||
if len(args) == 1:
|
||||
# todo: if support >= 2.7 only:
|
||||
# self._data = {k:v for k,v in args[0].iteritems() if v is not None}
|
||||
self._data = dict([(k,v) for k,v in args[0].items() if v is not None])
|
||||
# add k,v list or dict (merge):
|
||||
elif len(args) == 2:
|
||||
self._data.update((args,))
|
||||
elif len(args) > 2:
|
||||
self._data.update((k,v) for k,v in zip(*[iter(args)]*2))
|
||||
if len(argv):
|
||||
self._data.update(argv)
|
||||
# filter (delete) None values:
|
||||
# todo: if support >= 2.7 only:
|
||||
# self._data = {k:v for k,v in self._data.iteritems() if v is not None}
|
||||
self._data = dict([(k,v) for k,v in self._data.items() if v is not None])
|
||||
|
||||
def getData(self, key=None, default=None):
|
||||
# return whole data dict:
|
||||
if key is None:
|
||||
return self._data
|
||||
# return default if not exists:
|
||||
if not self._data:
|
||||
return default
|
||||
if not isinstance(key,(str,type(None),int,float,bool,complex)):
|
||||
# return filtered by lambda/function:
|
||||
if callable(key):
|
||||
# todo: if support >= 2.7 only:
|
||||
# return {k:v for k,v in self._data.iteritems() if key(k)}
|
||||
return dict([(k,v) for k,v in self._data.items() if key(k)])
|
||||
# return filtered by keys:
|
||||
if hasattr(key, '__iter__'):
|
||||
# todo: if support >= 2.7 only:
|
||||
# return {k:v for k,v in self._data.iteritems() if k in key}
|
||||
return dict([(k,v) for k,v in self._data.items() if k in key])
|
||||
# return single value of data:
|
||||
return self._data.get(key, default)
|
||||
|
||||
@property
|
||||
def banEpoch(self):
|
||||
return getattr(self, '_banEpoch', 0)
|
||||
@banEpoch.setter
|
||||
def banEpoch(self, value):
|
||||
self._banEpoch = value
|
||||
|
||||
|
||||
class FailTicket(Ticket):
|
||||
|
||||
def __init__(self, ip=None, time=None, matches=None, data={}, ticket=None):
|
||||
# this class variables:
|
||||
self._firstTime = None
|
||||
self._retry = 1
|
||||
# create/copy using default ticket constructor:
|
||||
Ticket.__init__(self, ip, time, matches, data, ticket)
|
||||
# init:
|
||||
if not isinstance(ticket, FailTicket):
|
||||
self._firstTime = time if time is not None else self.getTime()
|
||||
self._retry = self._data.get('failures', 1)
|
||||
|
||||
def setRetry(self, value):
|
||||
""" Set artificial retry count, normally equal failures / attempt,
|
||||
used in incremental features (BanTimeIncr) to increase retry count for bad IPs
|
||||
"""
|
||||
self._retry = value
|
||||
if not self._data['failures']:
|
||||
self._data['failures'] = 1
|
||||
if not value:
|
||||
self._data['failures'] = 0
|
||||
self._data['matches'] = []
|
||||
|
||||
def getRetry(self):
|
||||
""" Returns failures / attempt count or
|
||||
artificial retry count increased for bad IPs
|
||||
"""
|
||||
return self._retry
|
||||
|
||||
def adjustTime(self, time, maxTime):
|
||||
""" Adjust time of ticket and current attempts count considering given maxTime
|
||||
as estimation from rate by previous known interval (if it exceeds the findTime)
|
||||
"""
|
||||
if time > self._time:
|
||||
# expand current interval and attempts count (considering maxTime):
|
||||
if self._firstTime < time - maxTime:
|
||||
# adjust retry calculated as estimation from rate by previous known interval:
|
||||
self._retry = int(round(self._retry / float(time - self._firstTime) * maxTime))
|
||||
self._firstTime = time - maxTime
|
||||
# last time of failure:
|
||||
self._time = time
|
||||
|
||||
def inc(self, matches=None, attempt=1, count=1):
|
||||
self._retry += count
|
||||
self._data['failures'] += attempt
|
||||
if matches:
|
||||
# we should duplicate "matches", because possibly referenced to multiple tickets:
|
||||
if self._data['matches']:
|
||||
self._data['matches'] = self._data['matches'] + matches
|
||||
else:
|
||||
self._data['matches'] = matches
|
||||
|
||||
@staticmethod
|
||||
def wrap(o):
|
||||
o.__class__ = FailTicket
|
||||
return o
|
||||
|
||||
##
|
||||
# Ban Ticket.
|
||||
#
|
||||
# This class extends the Ticket class. It is mainly used by the BanManager.
|
||||
|
||||
class BanTicket(FailTicket):
|
||||
|
||||
@staticmethod
|
||||
def wrap(o):
|
||||
o.__class__ = BanTicket
|
||||
return o
|
||||
522
fail2ban-master/fail2ban/server/transmitter.py
Normal file
522
fail2ban-master/fail2ban/server/transmitter.py
Normal file
@@ -0,0 +1,522 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Author: Cyril Jaquier
|
||||
#
|
||||
|
||||
__author__ = "Cyril Jaquier"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
|
||||
__license__ = "GPL"
|
||||
|
||||
import time
|
||||
import json
|
||||
|
||||
from ..helpers import getLogger, logging
|
||||
from .. import version
|
||||
|
||||
# Gets the instance of the logger.
|
||||
logSys = getLogger(__name__)
|
||||
|
||||
|
||||
class Transmitter:
|
||||
|
||||
##
|
||||
# Constructor.
|
||||
#
|
||||
# @param The server reference
|
||||
|
||||
def __init__(self, server):
|
||||
self.__server = server
|
||||
self.__quiet = 0
|
||||
|
||||
##
|
||||
# Proceeds a command.
|
||||
#
|
||||
# Proceeds an incoming command.
|
||||
# @param command The incoming command
|
||||
|
||||
def proceed(self, command):
|
||||
# Deserialize object
|
||||
logSys.log(5, "Command: %r", command)
|
||||
try:
|
||||
ret = self.__commandHandler(command)
|
||||
ack = 0, ret
|
||||
except Exception as e:
|
||||
logSys.error("Command %r has failed. Received %r",
|
||||
command, e,
|
||||
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
ack = 1, e
|
||||
return ack
|
||||
|
||||
##
|
||||
# Handle an command.
|
||||
#
|
||||
#
|
||||
|
||||
def __commandHandler(self, command):
|
||||
name = command[0]
|
||||
if name == "ping":
|
||||
return "pong"
|
||||
elif name == "add":
|
||||
name = command[1]
|
||||
if name == "--all":
|
||||
raise Exception("Reserved name %r" % (name,))
|
||||
try:
|
||||
backend = command[2]
|
||||
except IndexError:
|
||||
backend = "auto"
|
||||
self.__server.addJail(name, backend)
|
||||
return name
|
||||
elif name == "multi-set":
|
||||
return self.__commandSet(command[1:], True)
|
||||
elif name == "set":
|
||||
return self.__commandSet(command[1:])
|
||||
elif name == "start":
|
||||
name = command[1]
|
||||
self.__server.startJail(name)
|
||||
return None
|
||||
elif name == "stop":
|
||||
if len(command) == 1:
|
||||
self.__server.quit()
|
||||
elif command[1] == "--all":
|
||||
self.__server.stopAllJail()
|
||||
else:
|
||||
name = command[1]
|
||||
self.__server.stopJail(name)
|
||||
return None
|
||||
elif name == "reload":
|
||||
opts = command[1:3]
|
||||
self.__quiet = 1
|
||||
try:
|
||||
self.__server.reloadJails(*opts, begin=True)
|
||||
for cmd in command[3]:
|
||||
self.__commandHandler(cmd)
|
||||
finally:
|
||||
self.__quiet = 0
|
||||
self.__server.reloadJails(*opts, begin=False)
|
||||
return 'OK'
|
||||
elif name == "unban" and len(command) >= 2:
|
||||
# unban in all jails:
|
||||
value = command[1:]
|
||||
# if all ips:
|
||||
if len(value) == 1 and value[0] == "--all":
|
||||
return self.__server.setUnbanIP()
|
||||
return self.__server.setUnbanIP(None, value)
|
||||
elif name == "banned":
|
||||
# check IP is banned in all jails:
|
||||
return self.__server.banned(None, command[1:])
|
||||
elif name == "echo":
|
||||
return command[1:]
|
||||
elif name == "server-status":
|
||||
logSys.debug("Status: ready")
|
||||
return "Server ready"
|
||||
elif name == "server-stream":
|
||||
self.__quiet = 1
|
||||
try:
|
||||
for cmd in command[1]:
|
||||
self.__commandHandler(cmd)
|
||||
finally:
|
||||
self.__quiet = 0
|
||||
return None
|
||||
elif name == "sleep":
|
||||
value = command[1]
|
||||
time.sleep(float(value))
|
||||
return None
|
||||
elif name == "flushlogs":
|
||||
return self.__server.flushLogs()
|
||||
elif name == "get":
|
||||
return self.__commandGet(command[1:])
|
||||
elif name == "status":
|
||||
return self.status(command[1:])
|
||||
elif name in ("stats", "statistic", "statistics"):
|
||||
return self.__server.status("--all", "stats")
|
||||
elif name == "version":
|
||||
return version.version
|
||||
elif name == "config-error":
|
||||
logSys.error(command[1])
|
||||
return None
|
||||
raise Exception("Invalid command")
|
||||
|
||||
def __commandSet(self, command, multiple=False):
|
||||
name = command[0]
|
||||
# Logging
|
||||
if name == "loglevel":
|
||||
value = command[1]
|
||||
self.__server.setLogLevel(value)
|
||||
if self.__quiet: return
|
||||
return self.__server.getLogLevel()
|
||||
elif name == "logtarget":
|
||||
value = command[1]
|
||||
if self.__server.setLogTarget(value):
|
||||
if self.__quiet: return
|
||||
return self.__server.getLogTarget()
|
||||
else:
|
||||
raise Exception("Failed to change log target")
|
||||
elif name == "syslogsocket":
|
||||
value = command[1]
|
||||
if self.__server.setSyslogSocket(value):
|
||||
if self.__quiet: return
|
||||
return self.__server.getSyslogSocket()
|
||||
else:
|
||||
raise Exception("Failed to change syslog socket")
|
||||
elif name == "allowipv6":
|
||||
value = command[1]
|
||||
self.__server.setIPv6IsAllowed(value)
|
||||
if self.__quiet: return
|
||||
return value
|
||||
#Thread
|
||||
elif name == "thread":
|
||||
value = command[1]
|
||||
return self.__server.setThreadOptions(value)
|
||||
#Database
|
||||
elif name == "dbfile":
|
||||
self.__server.setDatabase(command[1])
|
||||
db = self.__server.getDatabase()
|
||||
if db is None:
|
||||
return None
|
||||
else:
|
||||
if self.__quiet: return
|
||||
return db.filename
|
||||
elif name == "dbmaxmatches":
|
||||
db = self.__server.getDatabase()
|
||||
if db is None:
|
||||
logSys.log(logging.MSG, "dbmaxmatches setting was not in effect since no db yet")
|
||||
return None
|
||||
else:
|
||||
db.maxMatches = int(command[1])
|
||||
if self.__quiet: return
|
||||
return db.maxMatches
|
||||
elif name == "dbpurgeage":
|
||||
db = self.__server.getDatabase()
|
||||
if db is None:
|
||||
logSys.log(logging.MSG, "dbpurgeage setting was not in effect since no db yet")
|
||||
return None
|
||||
else:
|
||||
db.purgeage = command[1]
|
||||
if self.__quiet: return
|
||||
return db.purgeage
|
||||
# Jail
|
||||
elif command[1] == "idle":
|
||||
if command[2] == "on":
|
||||
self.__server.setIdleJail(name, True)
|
||||
elif command[2] == "off":
|
||||
self.__server.setIdleJail(name, False)
|
||||
else:
|
||||
raise Exception("Invalid idle option, must be 'on' or 'off'")
|
||||
if self.__quiet: return
|
||||
return self.__server.getIdleJail(name)
|
||||
# Filter
|
||||
elif command[1] == "ignoreself":
|
||||
value = command[2]
|
||||
self.__server.setIgnoreSelf(name, value)
|
||||
if self.__quiet: return
|
||||
return self.__server.getIgnoreSelf(name)
|
||||
elif command[1] == "addignoreip":
|
||||
for value in command[2:]:
|
||||
self.__server.addIgnoreIP(name, value)
|
||||
if self.__quiet: return
|
||||
return self.__server.getIgnoreIP(name)
|
||||
elif command[1] == "delignoreip":
|
||||
value = command[2]
|
||||
self.__server.delIgnoreIP(name, value)
|
||||
if self.__quiet: return
|
||||
return self.__server.getIgnoreIP(name)
|
||||
elif command[1] == "ignorecommand":
|
||||
value = command[2]
|
||||
self.__server.setIgnoreCommand(name, value)
|
||||
if self.__quiet: return
|
||||
return self.__server.getIgnoreCommand(name)
|
||||
elif command[1] == "ignorecache":
|
||||
value = command[2]
|
||||
self.__server.setIgnoreCache(name, value)
|
||||
if self.__quiet: return
|
||||
return self.__server.getIgnoreCache(name)
|
||||
elif command[1] == "addlogpath":
|
||||
value = command[2]
|
||||
tail = False
|
||||
if len(command) == 4:
|
||||
if command[3].lower() == "tail":
|
||||
tail = True
|
||||
elif command[3].lower() != "head":
|
||||
raise ValueError("File option must be 'head' or 'tail'")
|
||||
elif len(command) > 4:
|
||||
raise ValueError("Only one file can be added at a time")
|
||||
self.__server.addLogPath(name, value, tail)
|
||||
if self.__quiet: return
|
||||
return self.__server.getLogPath(name)
|
||||
elif command[1] == "dellogpath":
|
||||
value = command[2]
|
||||
self.__server.delLogPath(name, value)
|
||||
if self.__quiet: return
|
||||
return self.__server.getLogPath(name)
|
||||
elif command[1] == "logencoding":
|
||||
value = command[2]
|
||||
self.__server.setLogEncoding(name, value)
|
||||
if self.__quiet: return
|
||||
return self.__server.getLogEncoding(name)
|
||||
elif command[1] == "addjournalmatch": # pragma: systemd no cover
|
||||
value = command[2:]
|
||||
self.__server.addJournalMatch(name, value)
|
||||
if self.__quiet: return
|
||||
return self.__server.getJournalMatch(name)
|
||||
elif command[1] == "deljournalmatch": # pragma: systemd no cover
|
||||
value = command[2:]
|
||||
self.__server.delJournalMatch(name, value)
|
||||
if self.__quiet: return
|
||||
return self.__server.getJournalMatch(name)
|
||||
elif command[1] == "prefregex":
|
||||
value = command[2]
|
||||
self.__server.setPrefRegex(name, value)
|
||||
if self.__quiet: return
|
||||
v = self.__server.getPrefRegex(name)
|
||||
return v.getRegex() if v else ""
|
||||
elif command[1] == "addfailregex":
|
||||
value = command[2]
|
||||
self.__server.addFailRegex(name, value, multiple=multiple)
|
||||
if multiple:
|
||||
return True
|
||||
if self.__quiet: return
|
||||
return self.__server.getFailRegex(name)
|
||||
elif command[1] == "delfailregex":
|
||||
value = int(command[2])
|
||||
self.__server.delFailRegex(name, value)
|
||||
if self.__quiet: return
|
||||
return self.__server.getFailRegex(name)
|
||||
elif command[1] == "addignoreregex":
|
||||
value = command[2]
|
||||
self.__server.addIgnoreRegex(name, value, multiple=multiple)
|
||||
if multiple:
|
||||
return True
|
||||
if self.__quiet: return
|
||||
return self.__server.getIgnoreRegex(name)
|
||||
elif command[1] == "delignoreregex":
|
||||
value = int(command[2])
|
||||
self.__server.delIgnoreRegex(name, value)
|
||||
if self.__quiet: return
|
||||
return self.__server.getIgnoreRegex(name)
|
||||
elif command[1] == "usedns":
|
||||
value = command[2]
|
||||
self.__server.setUseDns(name, value)
|
||||
if self.__quiet: return
|
||||
return self.__server.getUseDns(name)
|
||||
elif command[1] == "findtime":
|
||||
value = command[2]
|
||||
self.__server.setFindTime(name, value)
|
||||
if self.__quiet: return
|
||||
return self.__server.getFindTime(name)
|
||||
elif command[1] == "datepattern":
|
||||
value = command[2]
|
||||
self.__server.setDatePattern(name, value)
|
||||
if self.__quiet: return
|
||||
return self.__server.getDatePattern(name)
|
||||
elif command[1] == "logtimezone":
|
||||
value = command[2]
|
||||
self.__server.setLogTimeZone(name, value)
|
||||
if self.__quiet: return
|
||||
return self.__server.getLogTimeZone(name)
|
||||
elif command[1] == "maxmatches":
|
||||
value = command[2]
|
||||
self.__server.setMaxMatches(name, int(value))
|
||||
if self.__quiet: return
|
||||
return self.__server.getMaxMatches(name)
|
||||
elif command[1] == "maxretry":
|
||||
value = command[2]
|
||||
self.__server.setMaxRetry(name, int(value))
|
||||
if self.__quiet: return
|
||||
return self.__server.getMaxRetry(name)
|
||||
elif command[1] == "maxlines":
|
||||
value = command[2]
|
||||
self.__server.setMaxLines(name, int(value))
|
||||
if self.__quiet: return
|
||||
return self.__server.getMaxLines(name)
|
||||
# command
|
||||
elif command[1] == "bantime":
|
||||
value = command[2]
|
||||
self.__server.setBanTime(name, value)
|
||||
if self.__quiet: return
|
||||
return self.__server.getBanTime(name)
|
||||
elif command[1] == "attempt":
|
||||
value = command[2:]
|
||||
return self.__server.addAttemptIP(name, *value)
|
||||
elif command[1].startswith("bantime."):
|
||||
value = command[2]
|
||||
opt = command[1][len("bantime."):]
|
||||
self.__server.setBanTimeExtra(name, opt, value)
|
||||
if self.__quiet: return
|
||||
return self.__server.getBanTimeExtra(name, opt)
|
||||
elif command[1] == "banip":
|
||||
value = command[2:]
|
||||
return self.__server.setBanIP(name,value)
|
||||
elif command[1] == "unbanip":
|
||||
ifexists = True
|
||||
if command[2] != "--report-absent":
|
||||
value = command[2:]
|
||||
else:
|
||||
ifexists = False
|
||||
value = command[3:]
|
||||
return self.__server.setUnbanIP(name, value, ifexists=ifexists)
|
||||
elif command[1] == "addaction":
|
||||
args = [command[2]]
|
||||
if len(command) > 3:
|
||||
args.extend([command[3], json.loads(command[4])])
|
||||
self.__server.addAction(name, *args)
|
||||
if self.__quiet: return
|
||||
return args[0]
|
||||
elif command[1] == "delaction":
|
||||
value = command[2]
|
||||
self.__server.delAction(name, value)
|
||||
return None
|
||||
elif command[1] == "action":
|
||||
actionname = command[2]
|
||||
action = self.__server.getAction(name, actionname)
|
||||
if multiple:
|
||||
for cmd in command[3]:
|
||||
logSys.log(5, " %r", cmd)
|
||||
actionkey = cmd[0]
|
||||
if callable(getattr(action, actionkey, None)):
|
||||
actionvalue = json.loads(cmd[1]) if len(cmd)>1 else {}
|
||||
getattr(action, actionkey)(**actionvalue)
|
||||
else:
|
||||
actionvalue = cmd[1]
|
||||
setattr(action, actionkey, actionvalue)
|
||||
return True
|
||||
else:
|
||||
actionkey = command[3]
|
||||
if callable(getattr(action, actionkey, None)):
|
||||
actionvalue = json.loads(command[4]) if len(command)>4 else {}
|
||||
if self.__quiet: return
|
||||
return getattr(action, actionkey)(**actionvalue)
|
||||
else:
|
||||
actionvalue = command[4]
|
||||
setattr(action, actionkey, actionvalue)
|
||||
if self.__quiet: return
|
||||
return getattr(action, actionkey)
|
||||
raise Exception("Invalid command %r (no set action or not yet implemented)" % (command[1],))
|
||||
|
||||
def __commandGet(self, command):
|
||||
name = command[0]
|
||||
# Logging
|
||||
if name == "loglevel":
|
||||
return self.__server.getLogLevel()
|
||||
elif name == "logtarget":
|
||||
return self.__server.getLogTarget()
|
||||
elif name == "syslogsocket":
|
||||
return self.__server.getSyslogSocket()
|
||||
#Thread
|
||||
elif name == "thread":
|
||||
return self.__server.getThreadOptions()
|
||||
#Database
|
||||
elif name == "dbfile":
|
||||
db = self.__server.getDatabase()
|
||||
if db is None:
|
||||
return None
|
||||
else:
|
||||
return db.filename
|
||||
elif name == "dbmaxmatches":
|
||||
db = self.__server.getDatabase()
|
||||
if db is None:
|
||||
return None
|
||||
else:
|
||||
return db.maxMatches
|
||||
elif name == "dbpurgeage":
|
||||
db = self.__server.getDatabase()
|
||||
if db is None:
|
||||
return None
|
||||
else:
|
||||
return db.purgeage
|
||||
# Jail, Filter
|
||||
elif command[1] == "banned":
|
||||
# check IP is banned in all jails:
|
||||
return self.__server.banned(name, command[2:])
|
||||
elif command[1] == "logpath":
|
||||
return self.__server.getLogPath(name)
|
||||
elif command[1] == "logencoding":
|
||||
return self.__server.getLogEncoding(name)
|
||||
elif command[1] == "journalmatch": # pragma: systemd no cover
|
||||
return self.__server.getJournalMatch(name)
|
||||
elif command[1] == "ignoreself":
|
||||
return self.__server.getIgnoreSelf(name)
|
||||
elif command[1] == "ignoreip":
|
||||
return self.__server.getIgnoreIP(name)
|
||||
elif command[1] == "ignorecommand":
|
||||
return self.__server.getIgnoreCommand(name)
|
||||
elif command[1] == "ignorecache":
|
||||
return self.__server.getIgnoreCache(name)
|
||||
elif command[1] == "prefregex":
|
||||
v = self.__server.getPrefRegex(name)
|
||||
return v.getRegex() if v else ""
|
||||
elif command[1] == "failregex":
|
||||
return self.__server.getFailRegex(name)
|
||||
elif command[1] == "ignoreregex":
|
||||
return self.__server.getIgnoreRegex(name)
|
||||
elif command[1] == "usedns":
|
||||
return self.__server.getUseDns(name)
|
||||
elif command[1] == "findtime":
|
||||
return self.__server.getFindTime(name)
|
||||
elif command[1] == "datepattern":
|
||||
return self.__server.getDatePattern(name)
|
||||
elif command[1] == "logtimezone":
|
||||
return self.__server.getLogTimeZone(name)
|
||||
elif command[1] == "maxmatches":
|
||||
return self.__server.getMaxMatches(name)
|
||||
elif command[1] == "maxretry":
|
||||
return self.__server.getMaxRetry(name)
|
||||
elif command[1] == "maxlines":
|
||||
return self.__server.getMaxLines(name)
|
||||
# Action
|
||||
elif command[1] == "bantime":
|
||||
return self.__server.getBanTime(name)
|
||||
elif command[1] == "banip":
|
||||
return self.__server.getBanList(name,
|
||||
withTime=len(command) > 2 and command[2] == "--with-time")
|
||||
elif command[1].startswith("bantime."):
|
||||
opt = command[1][len("bantime."):]
|
||||
return self.__server.getBanTimeExtra(name, opt)
|
||||
elif command[1] == "actions":
|
||||
return list(self.__server.getActions(name).keys())
|
||||
elif command[1] == "action":
|
||||
actionname = command[2]
|
||||
actionvalue = command[3]
|
||||
action = self.__server.getAction(name, actionname)
|
||||
return getattr(action, actionvalue)
|
||||
elif command[1] == "actionproperties":
|
||||
actionname = command[2]
|
||||
action = self.__server.getAction(name, actionname)
|
||||
return [
|
||||
key for key in dir(action)
|
||||
if not key.startswith("_") and
|
||||
not callable(getattr(action, key))]
|
||||
elif command[1] == "actionmethods":
|
||||
actionname = command[2]
|
||||
action = self.__server.getAction(name, actionname)
|
||||
return [
|
||||
key for key in dir(action)
|
||||
if not key.startswith("_") and callable(getattr(action, key))]
|
||||
raise Exception("Invalid command (no get action or not yet implemented)")
|
||||
|
||||
def status(self, command):
|
||||
if len(command) == 0:
|
||||
return self.__server.status()
|
||||
elif len(command) >= 1 and len(command) <= 2:
|
||||
name = command[0]
|
||||
flavor = command[1] if len(command) == 2 else "basic"
|
||||
if name == "--all":
|
||||
return self.__server.status("--all", flavor)
|
||||
return self.__server.statusJail(name, flavor=flavor)
|
||||
raise Exception("Invalid command (no status)")
|
||||
359
fail2ban-master/fail2ban/server/utils.py
Normal file
359
fail2ban-master/fail2ban/server/utils.py
Normal file
@@ -0,0 +1,359 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
__author__ = "Serg G. Brester (sebres) and Fail2Ban Contributors"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier, 2011-2012 Yaroslav Halchenko, 2012-2015 Serg G. Brester"
|
||||
__license__ = "GPL"
|
||||
|
||||
import fcntl
|
||||
import logging
|
||||
import os
|
||||
import signal
|
||||
import subprocess
|
||||
import sys
|
||||
from threading import Lock
|
||||
import time
|
||||
import types
|
||||
from ..helpers import getLogger, _merge_dicts, uni_decode
|
||||
from collections import OrderedDict
|
||||
|
||||
import importlib.machinery
|
||||
|
||||
# Gets the instance of the logger.
|
||||
logSys = getLogger(__name__)
|
||||
|
||||
# Some hints on common abnormal exit codes
|
||||
_RETCODE_HINTS = {
|
||||
127: '"Command not found". Make sure that all commands in %(realCmd)r '
|
||||
'are in the PATH of fail2ban-server process '
|
||||
'(grep -a PATH= /proc/`pidof -x fail2ban-server`/environ). '
|
||||
'You may want to start '
|
||||
'"fail2ban-server -f" separately, initiate it with '
|
||||
'"fail2ban-client reload" in another shell session and observe if '
|
||||
'additional informative error messages appear in the terminals.'
|
||||
}
|
||||
|
||||
# Dictionary to lookup signal name from number
|
||||
signame = dict((num, name)
|
||||
for name, num in signal.__dict__.items() if name.startswith("SIG"))
|
||||
|
||||
class Utils():
|
||||
"""Utilities provide diverse static methods like executes OS shell commands, etc.
|
||||
"""
|
||||
|
||||
DEFAULT_SLEEP_TIME = 2
|
||||
DEFAULT_SLEEP_INTERVAL = 0.2
|
||||
DEFAULT_SHORT_INTERVAL = 0.001
|
||||
DEFAULT_SHORTEST_INTERVAL = DEFAULT_SHORT_INTERVAL / 100
|
||||
|
||||
|
||||
class Cache(object):
|
||||
"""A simple cache with a TTL and limit on size
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.setOptions(*args, **kwargs)
|
||||
self._cache = OrderedDict()
|
||||
self.__lock = Lock()
|
||||
|
||||
def setOptions(self, maxCount=1000, maxTime=60):
|
||||
self.maxCount = maxCount
|
||||
self.maxTime = maxTime
|
||||
|
||||
def __len__(self):
|
||||
return len(self._cache)
|
||||
|
||||
def get(self, k, defv=None):
|
||||
v = self._cache.get(k)
|
||||
if v:
|
||||
if v[1] > time.time():
|
||||
return v[0]
|
||||
self.unset(k)
|
||||
return defv
|
||||
|
||||
def set(self, k, v):
|
||||
t = time.time()
|
||||
# avoid multiple modification of dict multi-threaded:
|
||||
cache = self._cache
|
||||
with self.__lock:
|
||||
# clean cache if max count reached:
|
||||
if len(cache) >= self.maxCount:
|
||||
# ordered (so remove some from ahead, FIFO)
|
||||
while cache:
|
||||
(ck, cv) = cache.popitem(last=False)
|
||||
# if not yet expired (but has free slot for new entry):
|
||||
if cv[1] > t and len(cache) < self.maxCount:
|
||||
break
|
||||
# set now:
|
||||
cache[k] = (v, t + self.maxTime)
|
||||
|
||||
def unset(self, k):
|
||||
with self.__lock:
|
||||
self._cache.pop(k, None)
|
||||
|
||||
def clear(self):
|
||||
with self.__lock:
|
||||
self._cache.clear()
|
||||
|
||||
|
||||
@staticmethod
|
||||
def setFBlockMode(fhandle, value):
|
||||
flags = fcntl.fcntl(fhandle, fcntl.F_GETFL)
|
||||
if not value:
|
||||
flags |= os.O_NONBLOCK
|
||||
else:
|
||||
flags &= ~os.O_NONBLOCK
|
||||
fcntl.fcntl(fhandle, fcntl.F_SETFL, flags)
|
||||
return flags
|
||||
|
||||
@staticmethod
|
||||
def buildShellCmd(realCmd, varsDict):
|
||||
"""Generates new shell command as array, contains map as variables to
|
||||
arguments statement (varsStat), the command (realCmd) used this variables and
|
||||
the list of the arguments, mapped from varsDict
|
||||
|
||||
Example:
|
||||
buildShellCmd('echo "V2: $v2, V1: $v1"', {"v1": "val 1", "v2": "val 2", "vUnused": "unused var"})
|
||||
returns:
|
||||
['v1=$0 v2=$1 vUnused=$2 \necho "V2: $v2, V1: $v1"', 'val 1', 'val 2', 'unused var']
|
||||
"""
|
||||
# build map as array of vars and command line array:
|
||||
varsStat = ""
|
||||
if not isinstance(realCmd, list):
|
||||
realCmd = [realCmd]
|
||||
i = len(realCmd)-1
|
||||
for k, v in varsDict.items():
|
||||
varsStat += "%s=$%s " % (k, i)
|
||||
realCmd.append(v)
|
||||
i += 1
|
||||
realCmd[0] = varsStat + "\n" + realCmd[0]
|
||||
return realCmd
|
||||
|
||||
@staticmethod
|
||||
def executeCmd(realCmd, timeout=60, shell=True, output=False, tout_kill_tree=True,
|
||||
success_codes=(0,), varsDict=None):
|
||||
"""Executes a command.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
realCmd : str
|
||||
The command to execute.
|
||||
timeout : int
|
||||
The time out in seconds for the command.
|
||||
shell : bool
|
||||
If shell is True (default), the specified command (may be a string) will be
|
||||
executed through the shell.
|
||||
output : bool
|
||||
If output is True, the function returns tuple (success, stdoutdata, stderrdata, returncode).
|
||||
If False, just indication of success is returned
|
||||
varsDict: dict
|
||||
variables supplied to the command (or to the shell script)
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool or (bool, str, str, int)
|
||||
True if the command succeeded and with stdout, stderr, returncode if output was set to True
|
||||
|
||||
Raises
|
||||
------
|
||||
OSError
|
||||
If command fails to be executed.
|
||||
RuntimeError
|
||||
If command execution times out.
|
||||
"""
|
||||
stdout = stderr = None
|
||||
retcode = None
|
||||
popen = env = None
|
||||
if varsDict:
|
||||
if shell:
|
||||
# build map as array of vars and command line array:
|
||||
realCmd = Utils.buildShellCmd(realCmd, varsDict)
|
||||
else: # pragma: no cover - currently unused
|
||||
env = _merge_dicts(os.environ, varsDict)
|
||||
realCmdId = id(realCmd)
|
||||
logCmd = lambda level: logSys.log(level, "%x -- exec: %s", realCmdId, realCmd)
|
||||
try:
|
||||
popen = subprocess.Popen(
|
||||
realCmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell, env=env,
|
||||
preexec_fn=os.setsid # so that killpg does not kill our process
|
||||
)
|
||||
# wait with timeout for process has terminated:
|
||||
retcode = popen.poll()
|
||||
if retcode is None:
|
||||
def _popen_wait_end():
|
||||
retcode = popen.poll()
|
||||
return (True, retcode) if retcode is not None else None
|
||||
# popen.poll is fast operation so we can use the shortest sleep interval:
|
||||
retcode = Utils.wait_for(_popen_wait_end, timeout, Utils.DEFAULT_SHORTEST_INTERVAL)
|
||||
if retcode:
|
||||
retcode = retcode[1]
|
||||
# if timeout:
|
||||
if retcode is None:
|
||||
if logCmd: logCmd(logging.ERROR); logCmd = None
|
||||
logSys.error("%x -- timed out after %s seconds." %
|
||||
(realCmdId, timeout))
|
||||
pgid = os.getpgid(popen.pid)
|
||||
# if not tree - first try to terminate and then kill, otherwise - kill (-9) only:
|
||||
os.killpg(pgid, signal.SIGTERM) # Terminate the process
|
||||
time.sleep(Utils.DEFAULT_SLEEP_INTERVAL)
|
||||
retcode = popen.poll()
|
||||
#logSys.debug("%s -- terminated %s ", realCmd, retcode)
|
||||
if retcode is None or tout_kill_tree: # Still going...
|
||||
os.killpg(pgid, signal.SIGKILL) # Kill the process
|
||||
time.sleep(Utils.DEFAULT_SLEEP_INTERVAL)
|
||||
if retcode is None: # pragma: no cover - too sporadic
|
||||
retcode = popen.poll()
|
||||
#logSys.debug("%s -- killed %s ", realCmd, retcode)
|
||||
if retcode is None and not Utils.pid_exists(pgid): # pragma: no cover
|
||||
retcode = signal.SIGKILL
|
||||
except OSError as e:
|
||||
if logCmd: logCmd(logging.ERROR); logCmd = None
|
||||
stderr = "%s -- failed with %s" % (realCmd, e)
|
||||
logSys.error(stderr)
|
||||
if not popen:
|
||||
return False if not output else (False, stdout, stderr, retcode)
|
||||
|
||||
std_level = logging.DEBUG if retcode in success_codes else logging.ERROR
|
||||
if std_level >= logSys.getEffectiveLevel():
|
||||
if logCmd: logCmd(std_level-1 if std_level == logging.DEBUG else logging.ERROR); logCmd = None
|
||||
# if we need output (to return or to log it):
|
||||
if output or std_level >= logSys.getEffectiveLevel():
|
||||
|
||||
# if was timeouted (killed/terminated) - to prevent waiting, set std handles to non-blocking mode.
|
||||
if popen.stdout:
|
||||
try:
|
||||
if retcode is None or retcode < 0:
|
||||
Utils.setFBlockMode(popen.stdout, False)
|
||||
stdout = popen.stdout.read()
|
||||
except IOError as e: # pragma: no cover
|
||||
logSys.error(" ... -- failed to read stdout %s", e)
|
||||
if stdout is not None and stdout != '' and std_level >= logSys.getEffectiveLevel():
|
||||
for l in stdout.splitlines():
|
||||
logSys.log(std_level, "%x -- stdout: %r", realCmdId, uni_decode(l))
|
||||
if popen.stderr:
|
||||
try:
|
||||
if retcode is None or retcode < 0:
|
||||
Utils.setFBlockMode(popen.stderr, False)
|
||||
stderr = popen.stderr.read()
|
||||
except IOError as e: # pragma: no cover
|
||||
logSys.error(" ... -- failed to read stderr %s", e)
|
||||
if stderr is not None and stderr != '' and std_level >= logSys.getEffectiveLevel():
|
||||
for l in stderr.splitlines():
|
||||
logSys.log(std_level, "%x -- stderr: %r", realCmdId, uni_decode(l))
|
||||
|
||||
if popen.stdout: popen.stdout.close()
|
||||
if popen.stderr: popen.stderr.close()
|
||||
|
||||
success = False
|
||||
if retcode in success_codes:
|
||||
logSys.debug("%x -- returned successfully %i", realCmdId, retcode)
|
||||
success = True
|
||||
elif retcode is None:
|
||||
logSys.error("%x -- unable to kill PID %i", realCmdId, popen.pid)
|
||||
elif retcode < 0 or retcode > 128:
|
||||
# dash would return negative while bash 128 + n
|
||||
sigcode = -retcode if retcode < 0 else retcode - 128
|
||||
logSys.error("%x -- killed with %s (return code: %s)",
|
||||
realCmdId, signame.get(sigcode, "signal %i" % sigcode), retcode)
|
||||
else:
|
||||
msg = _RETCODE_HINTS.get(retcode, None)
|
||||
logSys.error("%x -- returned %i", realCmdId, retcode)
|
||||
if msg:
|
||||
logSys.info("HINT on %i: %s", retcode, msg % locals())
|
||||
if output:
|
||||
return success, stdout, stderr, retcode
|
||||
return success if len(success_codes) == 1 else (success, retcode)
|
||||
|
||||
@staticmethod
|
||||
def wait_for(cond, timeout, interval=None):
|
||||
"""Wait until condition expression `cond` is True, up to `timeout` sec
|
||||
|
||||
Parameters
|
||||
----------
|
||||
cond : callable
|
||||
The expression to check condition
|
||||
(should return equivalent to bool True if wait successful).
|
||||
timeout : float or callable
|
||||
The time out for end of wait
|
||||
(in seconds or callable that returns True if timeout occurred).
|
||||
interval : float (optional)
|
||||
Polling start interval for wait cycle in seconds.
|
||||
|
||||
Returns
|
||||
-------
|
||||
variable
|
||||
The return value of the last call of `cond`,
|
||||
logical False (or None, 0, etc) if timeout occurred.
|
||||
"""
|
||||
#logSys.log(5, " wait for %r, tout: %r / %r", cond, timeout, interval)
|
||||
ini = 1 # to delay initializations until/when necessary
|
||||
while True:
|
||||
ret = cond()
|
||||
if ret:
|
||||
return ret
|
||||
if ini:
|
||||
ini = stm = 0
|
||||
if not callable(timeout):
|
||||
time0 = time.time() + timeout
|
||||
timeout_expr = lambda: time.time() > time0
|
||||
else:
|
||||
timeout_expr = timeout
|
||||
if timeout_expr():
|
||||
break
|
||||
stm = min(stm + (interval or Utils.DEFAULT_SLEEP_INTERVAL), Utils.DEFAULT_SLEEP_TIME)
|
||||
time.sleep(stm)
|
||||
return ret
|
||||
|
||||
# Solution from http://stackoverflow.com/questions/568271/how-to-check-if-there-exists-a-process-with-a-given-pid
|
||||
# under cc by-sa 3.0
|
||||
if os.name == 'posix':
|
||||
@staticmethod
|
||||
def pid_exists(pid):
|
||||
"""Check whether pid exists in the current process table."""
|
||||
import errno
|
||||
if pid < 0:
|
||||
return False
|
||||
try:
|
||||
os.kill(pid, 0)
|
||||
except OSError as e:
|
||||
return e.errno == errno.EPERM
|
||||
else:
|
||||
return True
|
||||
else: # pragma: no cover (no windows currently supported)
|
||||
@staticmethod
|
||||
def pid_exists(pid):
|
||||
import ctypes
|
||||
kernel32 = ctypes.windll.kernel32
|
||||
SYNCHRONIZE = 0x100000
|
||||
|
||||
process = kernel32.OpenProcess(SYNCHRONIZE, 0, pid)
|
||||
if process != 0:
|
||||
kernel32.CloseHandle(process)
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def load_python_module(pythonModule):
|
||||
pythonModuleName = os.path.splitext(
|
||||
os.path.basename(pythonModule))[0]
|
||||
ldr = importlib.machinery.SourceFileLoader(pythonModuleName, pythonModule)
|
||||
mod = types.ModuleType(ldr.name)
|
||||
ldr.exec_module(mod)
|
||||
return mod
|
||||
42
fail2ban-master/fail2ban/setup.py
Normal file
42
fail2ban-master/fail2ban/setup.py
Normal file
@@ -0,0 +1,42 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
__author__ = "Serg G. Brester"
|
||||
__license__ = "GPL"
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
|
||||
def updatePyExec(bindir, executable=None):
|
||||
"""Update fail2ban-python link to current python version (where f2b-modules located/installed)
|
||||
"""
|
||||
bindir = os.path.realpath(bindir)
|
||||
if executable is None:
|
||||
executable = sys.executable
|
||||
pypath = os.path.join(bindir, 'fail2ban-python')
|
||||
# if not exists or point to another version - update link:
|
||||
isfile = os.path.isfile(os.path.realpath(pypath))
|
||||
if not isfile or os.path.realpath(pypath) != os.path.realpath(executable):
|
||||
if isfile:
|
||||
os.unlink(pypath)
|
||||
os.symlink(executable, pypath)
|
||||
# extend current environment path (e.g. if fail2ban not yet installed):
|
||||
if bindir not in os.environ["PATH"].split(os.pathsep):
|
||||
os.environ["PATH"] = os.environ["PATH"] + os.pathsep + bindir;
|
||||
25
fail2ban-master/fail2ban/tests/__init__.py
Normal file
25
fail2ban-master/fail2ban/tests/__init__.py
Normal file
@@ -0,0 +1,25 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Author: Cyril Jaquier
|
||||
#
|
||||
|
||||
__author__ = "Cyril Jaquier"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
|
||||
__license__ = "GPL"
|
||||
22
fail2ban-master/fail2ban/tests/action_d/__init__.py
Normal file
22
fail2ban-master/fail2ban/tests/action_d/__init__.py
Normal file
@@ -0,0 +1,22 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
__author__ = "Steven Hiscocks"
|
||||
__copyright__ = "Copyright (c) 2014 Steven Hiscocks"
|
||||
__license__ = "GPL"
|
||||
316
fail2ban-master/fail2ban/tests/action_d/test_smtp.py
Normal file
316
fail2ban-master/fail2ban/tests/action_d/test_smtp.py
Normal file
@@ -0,0 +1,316 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
import os
|
||||
import threading
|
||||
import unittest
|
||||
import re
|
||||
import sys
|
||||
import types
|
||||
import importlib
|
||||
|
||||
from ..dummyjail import DummyJail
|
||||
from ..utils import CONFIG_DIR, asyncserver, Utils, uni_decode
|
||||
|
||||
|
||||
class _SMTPActionTestCase():
|
||||
|
||||
def _reset_smtpd(self):
|
||||
for a in ('mailfrom', 'org_data', 'data'):
|
||||
if hasattr(self.smtpd, a): delattr(self.smtpd, a)
|
||||
self.ready = False
|
||||
|
||||
def _exec_and_wait(self, doaction, timeout=3, short=False):
|
||||
if short: timeout /= 25
|
||||
self.smtpd.ready = False
|
||||
doaction()
|
||||
Utils.wait_for(lambda: self.smtpd.ready, timeout)
|
||||
|
||||
def testStart(self):
|
||||
self._exec_and_wait(self.action.start)
|
||||
self.assertEqual(self.smtpd.mailfrom, "fail2ban")
|
||||
self.assertEqual(self.smtpd.rcpttos, ["root"])
|
||||
self.action.ssl = False # ensure it works without TLS as a sanity check
|
||||
self.assertTrue(
|
||||
"Subject: [Fail2Ban] %s: started" % self.jail.name
|
||||
in self.smtpd.data)
|
||||
|
||||
def testStop(self):
|
||||
self._exec_and_wait(self.action.stop)
|
||||
self.assertEqual(self.smtpd.mailfrom, "fail2ban")
|
||||
self.assertEqual(self.smtpd.rcpttos, ["root"])
|
||||
self.assertTrue(
|
||||
"Subject: [Fail2Ban] %s: stopped" %
|
||||
self.jail.name in self.smtpd.data)
|
||||
|
||||
def _testBan(self, restored=False):
|
||||
aInfo = {
|
||||
'ip': "127.0.0.2",
|
||||
'failures': 3,
|
||||
'matches': "Test fail 1\n",
|
||||
'ipjailmatches': "Test fail 1\nTest Fail2\n",
|
||||
'ipmatches': "Test fail 1\nTest Fail2\nTest Fail3\n",
|
||||
}
|
||||
if restored:
|
||||
aInfo['restored'] = 1
|
||||
|
||||
self._exec_and_wait(lambda: self.action.ban(aInfo), short=restored)
|
||||
if restored: # no mail, should raises attribute error:
|
||||
self.assertRaises(AttributeError, lambda: self.smtpd.mailfrom)
|
||||
return
|
||||
self.assertEqual(self.smtpd.mailfrom, "fail2ban")
|
||||
self.assertEqual(self.smtpd.rcpttos, ["root"])
|
||||
subject = "Subject: [Fail2Ban] %s: banned %s" % (
|
||||
self.jail.name, aInfo['ip'])
|
||||
self.assertIn(subject, self.smtpd.data)
|
||||
self.assertIn(
|
||||
"%i attempts" % aInfo['failures'], self.smtpd.data)
|
||||
|
||||
self.action.matches = "matches"
|
||||
self._exec_and_wait(lambda: self.action.ban(aInfo))
|
||||
self.assertIn(aInfo['matches'], self.smtpd.data)
|
||||
|
||||
self.action.matches = "ipjailmatches"
|
||||
self._exec_and_wait(lambda: self.action.ban(aInfo))
|
||||
self.assertIn(aInfo['ipjailmatches'], self.smtpd.data)
|
||||
|
||||
self.action.matches = "ipmatches"
|
||||
self._exec_and_wait(lambda: self.action.ban(aInfo))
|
||||
self.assertIn(aInfo['ipmatches'], self.smtpd.data)
|
||||
|
||||
def testBan(self):
|
||||
self._testBan()
|
||||
|
||||
def testNOPByRestored(self):
|
||||
self._testBan(restored=True)
|
||||
|
||||
def testOptions(self):
|
||||
self._exec_and_wait(self.action.start)
|
||||
self.assertEqual(self.smtpd.mailfrom, "fail2ban")
|
||||
self.assertEqual(self.smtpd.rcpttos, ["root"])
|
||||
|
||||
self.action.fromname = "Test"
|
||||
self.action.fromaddr = "test@example.com"
|
||||
self.action.toaddr = "test@example.com, test2@example.com"
|
||||
self._exec_and_wait(self.action.start)
|
||||
self.assertEqual(self.smtpd.mailfrom, "test@example.com")
|
||||
self.assertTrue("From: %s <%s>" %
|
||||
(self.action.fromname, self.action.fromaddr) in self.smtpd.data)
|
||||
self.assertEqual(set(self.smtpd.rcpttos), set(["test@example.com", "test2@example.com"]))
|
||||
|
||||
try:
|
||||
import smtpd
|
||||
|
||||
class TestSMTPServer(smtpd.SMTPServer):
|
||||
|
||||
def __init__(self, *args):
|
||||
smtpd.SMTPServer.__init__(self, *args)
|
||||
self.ready = False
|
||||
|
||||
def process_message(self, peer, mailfrom, rcpttos, data, **kwargs):
|
||||
self.peer = peer
|
||||
self.mailfrom = mailfrom
|
||||
self.rcpttos = rcpttos
|
||||
self.org_data = data
|
||||
# replace new line (with tab or space) for possible mime translations (word wrap),
|
||||
self.data = re.sub(r"\n[\t ]", " ", uni_decode(data))
|
||||
self.ready = True
|
||||
|
||||
|
||||
class SMTPActionTest(unittest.TestCase, _SMTPActionTestCase):
|
||||
|
||||
def setUpClass():
|
||||
"""Call before tests."""
|
||||
unittest.F2B.SkipIfCfgMissing(action='smtp.py')
|
||||
|
||||
cls = SMTPActionTest
|
||||
cls.smtpd = TestSMTPServer(("localhost", 0), None)
|
||||
cls.port = cls.smtpd.socket.getsockname()[1]
|
||||
|
||||
## because of bug in loop (see loop in asyncserver.py) use it's loop instead of asyncore.loop:
|
||||
cls._active = True
|
||||
cls._loop_thread = threading.Thread(
|
||||
target=asyncserver.loop, kwargs={'active': lambda: cls._active})
|
||||
cls._loop_thread.daemon = True
|
||||
cls._loop_thread.start()
|
||||
|
||||
def tearDownClass():
|
||||
"""Call after tests."""
|
||||
cls = SMTPActionTest
|
||||
cls.smtpd.close()
|
||||
cls._active = False
|
||||
cls._loop_thread.join()
|
||||
|
||||
def setUp(self):
|
||||
"""Call before every test case."""
|
||||
unittest.F2B.SkipIfCfgMissing(action='smtp.py')
|
||||
super(SMTPActionTest, self).setUp()
|
||||
self.jail = DummyJail()
|
||||
pythonModule = os.path.join(CONFIG_DIR, "action.d", "smtp.py")
|
||||
pythonModuleName = os.path.basename(pythonModule.rstrip(".py"))
|
||||
customActionModule = importlib.machinery.SourceFileLoader(
|
||||
pythonModuleName, pythonModule).load_module()
|
||||
|
||||
self.action = customActionModule.Action(
|
||||
self.jail, "test", host="localhost:%i" % self.port)
|
||||
|
||||
def tearDown(self):
|
||||
"""Call after every test case."""
|
||||
self._reset_smtpd()
|
||||
super(SMTPActionTest, self).tearDown()
|
||||
|
||||
except ImportError as e:
|
||||
if tuple(sys.version_info) <= (3, 11):
|
||||
print("I: Skipping smtp tests: %s" % e)
|
||||
|
||||
|
||||
try:
|
||||
import asyncio
|
||||
from aiosmtpd.controller import Controller
|
||||
import socket
|
||||
import ssl
|
||||
import tempfile
|
||||
|
||||
class TestSMTPHandler:
|
||||
def __init__(self, *args):
|
||||
self.ready = False
|
||||
|
||||
async def handle_DATA(self, server, session, envelope):
|
||||
self.peer = session.peer
|
||||
self.mailfrom = envelope.mail_from
|
||||
self.rcpttos = envelope.rcpt_tos
|
||||
self.org_data = envelope.content.decode()
|
||||
# normalize CRLF -> LF:
|
||||
self.data = re.sub(r"\r\n", "\n", uni_decode(self.org_data))
|
||||
self.ready = True
|
||||
return '250 OK'
|
||||
|
||||
async def handle_exception(self, error):
|
||||
print(error)
|
||||
return '542 Internal server error'
|
||||
|
||||
|
||||
class AIOSMTPActionTest(unittest.TestCase, _SMTPActionTestCase):
|
||||
|
||||
@classmethod
|
||||
def create_temp_self_signed_cert(cls):
|
||||
"""
|
||||
Create a self signed SSL certificate in temporary files for host
|
||||
'localhost'
|
||||
|
||||
Returns a tuple containing the certificate file name and the key
|
||||
file name.
|
||||
|
||||
The cert (ECC:256, 100years) created with:
|
||||
openssl req -x509 -out /tmp/f2b-localhost.crt -keyout /tmp/f2b-localhost.key -days 36500 -newkey ec:<(openssl ecparam -name prime256v1) -nodes -sha256 \
|
||||
-subj '/CN=localhost' -extensions EXT -config <( \
|
||||
printf "[dn]\nCN=localhost\n[req]\ndistinguished_name = dn\n[EXT]\nsubjectAltName=DNS:localhost\nkeyUsage=digitalSignature\nextendedKeyUsage=serverAuth" \
|
||||
)
|
||||
cat /tmp/f2b-localhost.*
|
||||
rm /tmp/f2b-localhost.*
|
||||
|
||||
"""
|
||||
if hasattr(cls, 'crtfiles'): return cls.crtfiles
|
||||
cls.crtfiles = crtfiles = (tempfile.mktemp(".crt", "f2b_cert_"), tempfile.mktemp(".key", "f2b_cert_"))
|
||||
with open(crtfiles[0], 'w') as f:
|
||||
f.write(
|
||||
'-----BEGIN CERTIFICATE-----\n'
|
||||
'MIIBhDCCASugAwIBAgIUCuW168kD3G7XrpFwGHwE6vGfoJkwCgYIKoZIzj0EAwIw\n'
|
||||
'FDESMBAGA1UEAwwJbG9jYWxob3N0MCAXDTIzMTIzMDE3NDUzNFoYDzIxMjMxMjA2\n'
|
||||
'MTc0NTM0WjAUMRIwEAYDVQQDDAlsb2NhbGhvc3QwWTATBgcqhkjOPQIBBggqhkjO\n'
|
||||
'PQMBBwNCAARDa8BO/UE4axzvnOQ/pCc/ZTp351X1TqIfjEFaMoZOItz1/MW3ZCuS\n'
|
||||
'2vuby3rMn0WZ59RWVotBqA6lcMVcgDq3o1kwVzAUBgNVHREEDTALgglsb2NhbGhv\n'
|
||||
'c3QwCwYDVR0PBAQDAgeAMBMGA1UdJQQMMAoGCCsGAQUFBwMBMB0GA1UdDgQWBBS8\n'
|
||||
'kH1Ucuq+wlex5DxxHDe1kKGdcjAKBggqhkjOPQQDAgNHADBEAiBmv05+BvXWMzLg\n'
|
||||
'TtF4McoQNrU/0TTKhV8o+mgd+47tMAIgaaSNRnfjGIfJMbXg7Bh53qOIu5+lnm1b\n'
|
||||
'ySygMgFmePs=\n'
|
||||
'-----END CERTIFICATE-----\n'
|
||||
)
|
||||
with open(crtfiles[1], 'w') as f:
|
||||
f.write(
|
||||
'-----BEGIN PRIVATE KEY-----\n'
|
||||
'MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgoBGcojKPZMYut7aP\n'
|
||||
'JGe2GW+2lVV0zJpgCsZ7816a9uqhRANCAARDa8BO/UE4axzvnOQ/pCc/ZTp351X1\n'
|
||||
'TqIfjEFaMoZOItz1/MW3ZCuS2vuby3rMn0WZ59RWVotBqA6lcMVcgDq3\n'
|
||||
'-----END PRIVATE KEY-----\n'
|
||||
)
|
||||
# return file names
|
||||
return crtfiles
|
||||
|
||||
@classmethod
|
||||
def _del_cert(cls):
|
||||
if hasattr(cls, 'crtfiles') and cls.crtfiles:
|
||||
for f in cls.crtfiles:
|
||||
try:
|
||||
os.unlink(f)
|
||||
except FileNotFoundError: pass
|
||||
cls.crtfiles = None
|
||||
|
||||
@staticmethod
|
||||
def _free_port():
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||
s.bind(('localhost', 0))
|
||||
return s.getsockname()[1]
|
||||
|
||||
def setUpClass():
|
||||
"""Call before tests."""
|
||||
unittest.F2B.SkipIfCfgMissing(action='smtp.py')
|
||||
|
||||
cert_file, cert_key = AIOSMTPActionTest.create_temp_self_signed_cert()
|
||||
ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
|
||||
ssl_context.load_cert_chain(cert_file, cert_key)
|
||||
|
||||
cls = AIOSMTPActionTest
|
||||
cls.port = cls._free_port()
|
||||
cls.smtpd = TestSMTPHandler()
|
||||
cls.controller = Controller(cls.smtpd, hostname='localhost', server_hostname='localhost', port=cls.port,
|
||||
server_kwargs={'tls_context': ssl_context, 'require_starttls': False})
|
||||
# Run the event loop in a separate thread.
|
||||
cls.controller.start()
|
||||
|
||||
def tearDownClass():
|
||||
"""Call after tests."""
|
||||
cls = AIOSMTPActionTest
|
||||
cls.controller.stop()
|
||||
cls._del_cert()
|
||||
|
||||
def setUp(self):
|
||||
"""Call before every test case."""
|
||||
unittest.F2B.SkipIfCfgMissing(action='smtp.py')
|
||||
super(AIOSMTPActionTest, self).setUp()
|
||||
self.jail = DummyJail()
|
||||
pythonModule = os.path.join(CONFIG_DIR, "action.d", "smtp.py")
|
||||
pythonModuleName = os.path.basename(pythonModule.rstrip(".py"))
|
||||
ldr = importlib.machinery.SourceFileLoader(pythonModuleName, pythonModule)
|
||||
mod = types.ModuleType(ldr.name)
|
||||
ldr.exec_module(mod)
|
||||
|
||||
self.action = mod.Action(
|
||||
self.jail, "test", host="localhost:%i" % self.port)
|
||||
|
||||
self.action.ssl = True
|
||||
|
||||
def tearDown(self):
|
||||
"""Call after every test case."""
|
||||
self._reset_smtpd()
|
||||
super(AIOSMTPActionTest, self).tearDown()
|
||||
|
||||
except ImportError as e:
|
||||
if tuple(sys.version_info) >= (3, 10):
|
||||
print("I: Skipping SSL smtp tests: %s" % e)
|
||||
512
fail2ban-master/fail2ban/tests/actionstestcase.py
Normal file
512
fail2ban-master/fail2ban/tests/actionstestcase.py
Normal file
@@ -0,0 +1,512 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Author: Daniel Black
|
||||
#
|
||||
|
||||
__author__ = "Daniel Black"
|
||||
__copyright__ = "Copyright (c) 2013 Daniel Black"
|
||||
__license__ = "GPL"
|
||||
|
||||
import time
|
||||
import os
|
||||
import tempfile
|
||||
|
||||
from ..server.ticket import FailTicket
|
||||
from ..server.utils import Utils
|
||||
from .dummyjail import DummyJail
|
||||
from .utils import LogCaptureTestCase, with_alt_time, with_tmpdir, MyTime
|
||||
|
||||
TEST_FILES_DIR = os.path.join(os.path.dirname(__file__), "files")
|
||||
|
||||
|
||||
class ExecuteActions(LogCaptureTestCase):
|
||||
|
||||
def setUp(self):
|
||||
"""Call before every test case."""
|
||||
super(ExecuteActions, self).setUp()
|
||||
self.__jail = DummyJail()
|
||||
self.__actions = self.__jail.actions
|
||||
|
||||
def tearDown(self):
|
||||
super(ExecuteActions, self).tearDown()
|
||||
|
||||
def defaultAction(self, o={}):
|
||||
self.__actions.add('ip')
|
||||
act = self.__actions['ip']
|
||||
act.actionstart = 'echo ip start'+o.get('start', '')
|
||||
act.actionban = 'echo ip ban <ip>'+o.get('ban', '')
|
||||
act.actionunban = 'echo ip unban <ip>'+o.get('unban', '')
|
||||
act.actioncheck = 'echo ip check'+o.get('check', '')
|
||||
act.actionflush = 'echo ip flush'+o.get('flush', '')
|
||||
act.actionstop = 'echo ip stop'+o.get('stop', '')
|
||||
return act
|
||||
|
||||
def testActionsAddDuplicateName(self):
|
||||
self.__actions.add('test')
|
||||
self.assertRaises(ValueError, self.__actions.add, 'test')
|
||||
|
||||
def testActionsManipulation(self):
|
||||
self.__actions.add('test')
|
||||
self.assertTrue(self.__actions['test'])
|
||||
self.assertIn('test', self.__actions)
|
||||
self.assertNotIn('nonexistent action', self.__actions)
|
||||
self.__actions.add('test1')
|
||||
del self.__actions['test']
|
||||
del self.__actions['test1']
|
||||
self.assertNotIn('test', self.__actions)
|
||||
self.assertEqual(len(self.__actions), 0)
|
||||
|
||||
self.__actions.setBanTime(127)
|
||||
self.assertEqual(self.__actions.getBanTime(),127)
|
||||
self.assertRaises(ValueError, self.__actions.removeBannedIP, '127.0.0.1')
|
||||
|
||||
def testAddBannedIP(self):
|
||||
self.assertEqual(self.__actions.addBannedIP('192.0.2.1'), 1)
|
||||
self.assertLogged('Ban 192.0.2.1')
|
||||
self.pruneLog()
|
||||
self.assertEqual(self.__actions.addBannedIP(['192.0.2.1', '192.0.2.2', '192.0.2.3']), 2)
|
||||
self.assertLogged('192.0.2.1 already banned')
|
||||
self.assertNotLogged('Ban 192.0.2.1')
|
||||
self.assertLogged('Ban 192.0.2.2')
|
||||
self.assertLogged('Ban 192.0.2.3')
|
||||
|
||||
def testActionsOutput(self):
|
||||
self.defaultAction()
|
||||
self.__actions.start()
|
||||
self.assertLogged("stdout: %r" % 'ip start', wait=True)
|
||||
self.__actions.stop()
|
||||
self.__actions.join()
|
||||
self.assertLogged("stdout: %r" % 'ip flush', "stdout: %r" % 'ip stop')
|
||||
self.assertEqual(self.__actions.status(),[("Currently banned", 0 ),
|
||||
("Total banned", 0 ), ("Banned IP list", [] )])
|
||||
self.assertEqual(self.__actions.status('short'),[("Currently banned", 0 ),
|
||||
("Total banned", 0 )])
|
||||
|
||||
def testAddActionPython(self):
|
||||
self.__actions.add(
|
||||
"Action", os.path.join(TEST_FILES_DIR, "action.d/action.py"),
|
||||
{'opt1': 'value'})
|
||||
|
||||
self.assertLogged("TestAction initialised")
|
||||
|
||||
self.__actions.start()
|
||||
self.assertTrue( Utils.wait_for(lambda: self._is_logged("TestAction action start"), 3) )
|
||||
|
||||
self.__actions.stop()
|
||||
self.__actions.join()
|
||||
self.assertLogged("TestAction action stop")
|
||||
|
||||
self.assertRaises(IOError,
|
||||
self.__actions.add, "Action3", "/does/not/exist.py", {})
|
||||
|
||||
# With optional argument
|
||||
self.__actions.add(
|
||||
"Action4", os.path.join(TEST_FILES_DIR, "action.d/action.py"),
|
||||
{'opt1': 'value', 'opt2': 'value2'})
|
||||
# With too many arguments
|
||||
self.assertRaises(
|
||||
TypeError, self.__actions.add, "Action5",
|
||||
os.path.join(TEST_FILES_DIR, "action.d/action.py"),
|
||||
{'opt1': 'value', 'opt2': 'value2', 'opt3': 'value3'})
|
||||
# Missing required argument
|
||||
self.assertRaises(
|
||||
TypeError, self.__actions.add, "Action5",
|
||||
os.path.join(TEST_FILES_DIR, "action.d/action.py"), {})
|
||||
|
||||
def testAddPythonActionNOK(self):
|
||||
self.assertRaises(RuntimeError, self.__actions.add,
|
||||
"Action", os.path.join(TEST_FILES_DIR,
|
||||
"action.d/action_noAction.py"),
|
||||
{})
|
||||
self.assertRaises(RuntimeError, self.__actions.add,
|
||||
"Action", os.path.join(TEST_FILES_DIR,
|
||||
"action.d/action_nomethod.py"),
|
||||
{})
|
||||
self.__actions.add(
|
||||
"Action", os.path.join(TEST_FILES_DIR,
|
||||
"action.d/action_errors.py"),
|
||||
{})
|
||||
self.__actions.start()
|
||||
self.assertTrue( Utils.wait_for(lambda: self._is_logged("Failed to start"), 3) )
|
||||
self.__actions.stop()
|
||||
self.__actions.join()
|
||||
self.assertLogged("Failed to stop")
|
||||
|
||||
def testBanActionsAInfo(self):
|
||||
# Action which deletes IP address from aInfo
|
||||
self.__actions.add(
|
||||
"action1",
|
||||
os.path.join(TEST_FILES_DIR, "action.d/action_modifyainfo.py"),
|
||||
{})
|
||||
self.__actions.add(
|
||||
"action2",
|
||||
os.path.join(TEST_FILES_DIR, "action.d/action_modifyainfo.py"),
|
||||
{})
|
||||
self.__jail.putFailTicket(FailTicket("1.2.3.4"))
|
||||
self.__actions._Actions__checkBan()
|
||||
# Will fail if modification of aInfo from first action propagates
|
||||
# to second action, as both delete same key
|
||||
self.assertNotLogged("Failed to execute ban")
|
||||
self.assertLogged("action1 ban deleted aInfo IP")
|
||||
self.assertLogged("action2 ban deleted aInfo IP")
|
||||
|
||||
self.__actions._Actions__flushBan()
|
||||
# Will fail if modification of aInfo from first action propagates
|
||||
# to second action, as both delete same key
|
||||
self.assertNotLogged("Failed to execute unban")
|
||||
self.assertLogged("action1 unban deleted aInfo IP")
|
||||
self.assertLogged("action2 unban deleted aInfo IP")
|
||||
|
||||
@with_alt_time
|
||||
def testUnbanOnBusyBanBombing(self):
|
||||
# check unban happens in-between of "ban bombing" despite lower precedence,
|
||||
# if it is not work, we'll not see "Unbanned 30" (rather "Unbanned 50")
|
||||
# because then all the unbans occur earliest at flushing (after stop)
|
||||
|
||||
# each 3rd ban we should see an unban check (and up to 5 tickets gets unbanned):
|
||||
self.__actions.banPrecedence = 3
|
||||
self.__actions.unbanMaxCount = 5
|
||||
self.__actions.setBanTime(100)
|
||||
|
||||
self.__actions.start()
|
||||
|
||||
MyTime.setTime(0); # avoid "expired bantime" (in 0.11)
|
||||
i = 0
|
||||
while i < 20:
|
||||
ip = "192.0.2.%d" % i
|
||||
self.__jail.putFailTicket(FailTicket(ip, 0))
|
||||
i += 1
|
||||
|
||||
# wait for last ban (all 20 tickets gets banned):
|
||||
self.assertLogged(' / 20,', wait=True)
|
||||
|
||||
MyTime.setTime(200); # unban time for 20 tickets reached
|
||||
|
||||
while i < 50:
|
||||
ip = "192.0.2.%d" % i
|
||||
self.__jail.putFailTicket(FailTicket(ip, 200))
|
||||
i += 1
|
||||
|
||||
# wait for last ban (all 50 tickets gets banned):
|
||||
self.assertLogged(' / 50,', wait=True)
|
||||
self.__actions.stop()
|
||||
self.__actions.join()
|
||||
|
||||
self.assertLogged('Unbanned 30, 0 ticket(s)')
|
||||
self.assertNotLogged('Unbanned 50, 0 ticket(s)')
|
||||
|
||||
def testActionsConsistencyCheck(self):
|
||||
act = self.defaultAction({'check':' <family>', 'flush':' <family>'})
|
||||
# flush for inet6 is intentionally "broken" here - test no unhandled except and invariant check:
|
||||
act['actionflush?family=inet6'] = act.actionflush + '; exit 1'
|
||||
act.actionstart_on_demand = True
|
||||
# force errors via check in ban/unban:
|
||||
act.actionban = "<actioncheck> ; " + act.actionban
|
||||
act.actionunban = "<actioncheck> ; " + act.actionunban
|
||||
self.__actions.start()
|
||||
self.assertNotLogged("stdout: %r" % 'ip start')
|
||||
|
||||
self.assertEqual(self.__actions.addBannedIP('192.0.2.1'), 1)
|
||||
self.assertEqual(self.__actions.addBannedIP('2001:db8::1'), 1)
|
||||
self.assertLogged('Ban 192.0.2.1', 'Ban 2001:db8::1',
|
||||
"stdout: %r" % 'ip start',
|
||||
"stdout: %r" % 'ip ban 192.0.2.1',
|
||||
"stdout: %r" % 'ip ban 2001:db8::1',
|
||||
all=True, wait=True)
|
||||
|
||||
# check should fail (so cause stop/start):
|
||||
self.pruneLog('[test-phase 1a] simulate inconsistent irreparable env by unban')
|
||||
act['actioncheck?family=inet6'] = act.actioncheck + '; exit 1'
|
||||
self.__actions.removeBannedIP('2001:db8::1')
|
||||
self.assertLogged('Invariant check failed. Unban is impossible.',
|
||||
wait=True)
|
||||
self.pruneLog('[test-phase 1b] simulate inconsistent irreparable env by flush')
|
||||
self.__actions._Actions__flushBan()
|
||||
self.assertLogged(
|
||||
"stdout: %r" % 'ip flush inet4',
|
||||
"stdout: %r" % 'ip flush inet6',
|
||||
'Failed to flush bans',
|
||||
'No flush occurred, do consistency check',
|
||||
'Invariant check failed. Trying to restore a sane environment',
|
||||
"stdout: %r" % 'ip stop', # same for both families
|
||||
'Failed to flush bans',
|
||||
all=True, wait=True)
|
||||
|
||||
# check succeeds:
|
||||
self.pruneLog('[test-phase 2] consistent env')
|
||||
act['actioncheck?family=inet6'] = act.actioncheck
|
||||
self.assertEqual(self.__actions.addBannedIP('2001:db8::1'), 1)
|
||||
self.assertLogged('Ban 2001:db8::1',
|
||||
"stdout: %r" % 'ip start', # same for both families
|
||||
"stdout: %r" % 'ip ban 2001:db8::1',
|
||||
all=True, wait=True)
|
||||
self.assertNotLogged("stdout: %r" % 'ip check inet4',
|
||||
all=True)
|
||||
|
||||
self.pruneLog('[test-phase 3] failed flush in consistent env')
|
||||
self.__actions._Actions__flushBan()
|
||||
self.assertLogged('Failed to flush bans',
|
||||
'No flush occurred, do consistency check',
|
||||
"stdout: %r" % 'ip flush inet6',
|
||||
"stdout: %r" % 'ip check inet6',
|
||||
all=True, wait=True)
|
||||
self.assertNotLogged(
|
||||
"stdout: %r" % 'ip flush inet4',
|
||||
"stdout: %r" % 'ip stop',
|
||||
"stdout: %r" % 'ip start',
|
||||
'Unable to restore environment',
|
||||
all=True)
|
||||
|
||||
# stop, flush succeeds:
|
||||
self.pruneLog('[test-phase end] flush successful')
|
||||
act['actionflush?family=inet6'] = act.actionflush
|
||||
self.__actions.stop()
|
||||
self.__actions.join()
|
||||
self.assertLogged(
|
||||
"stdout: %r" % 'ip flush inet6',
|
||||
"stdout: %r" % 'ip stop', # same for both families
|
||||
'action ip terminated',
|
||||
all=True, wait=True)
|
||||
# no flush for inet4 (already successfully flushed):
|
||||
self.assertNotLogged("ERROR",
|
||||
"stdout: %r" % 'ip flush inet4',
|
||||
'Unban tickets each individually',
|
||||
all=True)
|
||||
|
||||
def testActionsConsistencyCheckDiffFam(self):
|
||||
# same as testActionsConsistencyCheck, but different start/stop commands for both families and repair on unban
|
||||
act = self.defaultAction({'start':' <family>', 'check':' <family>', 'flush':' <family>', 'stop':' <family>'})
|
||||
# flush for inet6 is intentionally "broken" here - test no unhandled except and invariant check:
|
||||
act['actionflush?family=inet6'] = act.actionflush + '; exit 1'
|
||||
act.actionstart_on_demand = True
|
||||
act.actionrepair_on_unban = True
|
||||
# force errors via check in ban/unban:
|
||||
act.actionban = "<actioncheck> ; " + act.actionban
|
||||
act.actionunban = "<actioncheck> ; " + act.actionunban
|
||||
self.__actions.start()
|
||||
self.assertNotLogged("stdout: %r" % 'ip start')
|
||||
|
||||
self.assertEqual(self.__actions.addBannedIP('192.0.2.1'), 1)
|
||||
self.assertEqual(self.__actions.addBannedIP('2001:db8::1'), 1)
|
||||
self.assertLogged('Ban 192.0.2.1', 'Ban 2001:db8::1',
|
||||
"stdout: %r" % 'ip start inet4',
|
||||
"stdout: %r" % 'ip ban 192.0.2.1',
|
||||
"stdout: %r" % 'ip start inet6',
|
||||
"stdout: %r" % 'ip ban 2001:db8::1',
|
||||
all=True, wait=True)
|
||||
|
||||
# check should fail (so cause stop/start):
|
||||
act['actioncheck?family=inet6'] = act.actioncheck + '; exit 1'
|
||||
self.pruneLog('[test-phase 1a] simulate inconsistent irreparable env by unban')
|
||||
self.__actions.removeBannedIP('2001:db8::1')
|
||||
self.assertLogged('Invariant check failed. Trying to restore a sane environment',
|
||||
"stdout: %r" % 'ip stop inet6',
|
||||
all=True, wait=True)
|
||||
self.assertNotLogged(
|
||||
"stdout: %r" % 'ip start inet6', # start on demand (not on repair)
|
||||
"stdout: %r" % 'ip stop inet4', # family inet4 is not affected
|
||||
"stdout: %r" % 'ip start inet4',
|
||||
all=True)
|
||||
|
||||
self.pruneLog('[test-phase 1b] simulate inconsistent irreparable env by ban')
|
||||
self.assertEqual(self.__actions.addBannedIP('2001:db8::1'), 1)
|
||||
self.assertLogged('Invariant check failed. Trying to restore a sane environment',
|
||||
"stdout: %r" % 'ip stop inet6',
|
||||
"stdout: %r" % 'ip start inet6',
|
||||
"stdout: %r" % 'ip check inet6',
|
||||
'Unable to restore environment',
|
||||
'Failed to execute ban',
|
||||
all=True, wait=True)
|
||||
self.assertNotLogged(
|
||||
"stdout: %r" % 'ip stop inet4', # family inet4 is not affected
|
||||
"stdout: %r" % 'ip start inet4',
|
||||
all=True)
|
||||
|
||||
act['actioncheck?family=inet6'] = act.actioncheck
|
||||
self.assertEqual(self.__actions.addBannedIP('2001:db8::2'), 1)
|
||||
act['actioncheck?family=inet6'] = act.actioncheck + '; exit 1'
|
||||
self.pruneLog('[test-phase 1c] simulate inconsistent irreparable env by flush')
|
||||
self.__actions._Actions__flushBan()
|
||||
self.assertLogged(
|
||||
"stdout: %r" % 'ip flush inet4',
|
||||
"stdout: %r" % 'ip flush inet6',
|
||||
'Failed to flush bans',
|
||||
'No flush occurred, do consistency check',
|
||||
'Invariant check failed. Trying to restore a sane environment',
|
||||
"stdout: %r" % 'ip stop inet6',
|
||||
'Failed to flush bans in jail',
|
||||
all=True, wait=True)
|
||||
# start/stop should be called for inet6 only:
|
||||
self.assertNotLogged(
|
||||
"stdout: %r" % 'ip stop inet4',
|
||||
all=True)
|
||||
|
||||
# check succeeds:
|
||||
self.pruneLog('[test-phase 2] consistent env')
|
||||
act['actioncheck?family=inet6'] = act.actioncheck
|
||||
self.assertEqual(self.__actions.addBannedIP('2001:db8::1'), 1)
|
||||
self.assertLogged('Ban 2001:db8::1',
|
||||
"stdout: %r" % 'ip start inet6',
|
||||
"stdout: %r" % 'ip ban 2001:db8::1',
|
||||
all=True, wait=True)
|
||||
self.assertNotLogged(
|
||||
"stdout: %r" % 'ip check inet4',
|
||||
"stdout: %r" % 'ip start inet4',
|
||||
all=True)
|
||||
|
||||
self.pruneLog('[test-phase 3] failed flush in consistent env')
|
||||
act['actioncheck?family=inet6'] = act.actioncheck
|
||||
self.__actions._Actions__flushBan()
|
||||
self.assertLogged('Failed to flush bans',
|
||||
'No flush occurred, do consistency check',
|
||||
"stdout: %r" % 'ip flush inet6',
|
||||
"stdout: %r" % 'ip check inet6',
|
||||
all=True, wait=True)
|
||||
self.assertNotLogged(
|
||||
"stdout: %r" % 'ip flush inet4',
|
||||
"stdout: %r" % 'ip stop inet4',
|
||||
"stdout: %r" % 'ip start inet4',
|
||||
"stdout: %r" % 'ip stop inet6',
|
||||
"stdout: %r" % 'ip start inet6',
|
||||
all=True)
|
||||
|
||||
# stop, flush succeeds:
|
||||
self.pruneLog('[test-phase end] flush successful')
|
||||
act['actionflush?family=inet6'] = act.actionflush
|
||||
self.__actions.stop()
|
||||
self.__actions.join()
|
||||
self.assertLogged(
|
||||
"stdout: %r" % 'ip flush inet6',
|
||||
"stdout: %r" % 'ip stop inet4',
|
||||
"stdout: %r" % 'ip stop inet6',
|
||||
'action ip terminated',
|
||||
all=True, wait=True)
|
||||
# no flush for inet4 (already successfully flushed):
|
||||
self.assertNotLogged("ERROR",
|
||||
"stdout: %r" % 'ip flush inet4',
|
||||
'Unban tickets each individually',
|
||||
all=True)
|
||||
|
||||
@with_alt_time
|
||||
@with_tmpdir
|
||||
def testActionsRebanBrokenAfterRepair(self, tmp):
|
||||
act = self.defaultAction({
|
||||
'start':' <family>; touch "<FN>"',
|
||||
'check':' <family>; test -f "<FN>"',
|
||||
'flush':' <family>; echo -n "" > "<FN>"',
|
||||
'stop': ' <family>; rm -f "<FN>"',
|
||||
'ban': ' <family>; echo "<ip> <family>" >> "<FN>"',
|
||||
})
|
||||
act['FN'] = tmp+'/<family>'
|
||||
act.actionstart_on_demand = True
|
||||
act.actionrepair = 'echo ip repair <family>; touch "<FN>"'
|
||||
act.actionreban = 'echo ip reban <ip> <family>; echo "<ip> <family> -- rebanned" >> "<FN>"'
|
||||
self.pruneLog('[test-phase 0] initial ban')
|
||||
self.assertEqual(self.__actions.addBannedIP(['192.0.2.1', '2001:db8::1']), 2)
|
||||
self.assertLogged('Ban 192.0.2.1', 'Ban 2001:db8::1',
|
||||
"stdout: %r" % 'ip start inet4',
|
||||
"stdout: %r" % 'ip ban 192.0.2.1 inet4',
|
||||
"stdout: %r" % 'ip start inet6',
|
||||
"stdout: %r" % 'ip ban 2001:db8::1 inet6',
|
||||
all=True)
|
||||
|
||||
self.pruneLog('[test-phase 1] check ban')
|
||||
self.dumpFile(tmp+'/inet4')
|
||||
self.assertLogged('192.0.2.1 inet4')
|
||||
self.assertNotLogged('2001:db8::1 inet6')
|
||||
self.pruneLog()
|
||||
self.dumpFile(tmp+'/inet6')
|
||||
self.assertLogged('2001:db8::1 inet6')
|
||||
self.assertNotLogged('192.0.2.1 inet4')
|
||||
|
||||
# simulate 3 seconds past:
|
||||
MyTime.setTime(MyTime.time() + 4)
|
||||
# already banned produces events:
|
||||
self.pruneLog('[test-phase 2] check already banned')
|
||||
self.assertEqual(self.__actions.addBannedIP(['192.0.2.1', '2001:db8::1', '2001:db8::2']), 1)
|
||||
self.assertLogged(
|
||||
'192.0.2.1 already banned', '2001:db8::1 already banned', 'Ban 2001:db8::2',
|
||||
"stdout: %r" % 'ip check inet4', # both checks occurred
|
||||
"stdout: %r" % 'ip check inet6',
|
||||
all=True)
|
||||
self.dumpFile(tmp+'/inet4')
|
||||
self.dumpFile(tmp+'/inet6')
|
||||
# no reban should occur:
|
||||
self.assertNotLogged('Reban 192.0.2.1', 'Reban 2001:db8::1',
|
||||
"stdout: %r" % 'ip ban 192.0.2.1 inet4',
|
||||
"stdout: %r" % 'ip reban 192.0.2.1 inet4',
|
||||
"stdout: %r" % 'ip ban 2001:db8::1 inet6',
|
||||
"stdout: %r" % 'ip reban 2001:db8::1 inet6',
|
||||
'192.0.2.1 inet4 -- repaired',
|
||||
'2001:db8::1 inet6 -- repaired',
|
||||
all=True)
|
||||
|
||||
# simulate 3 seconds past:
|
||||
MyTime.setTime(MyTime.time() + 4)
|
||||
# break env (remove both files, so check would fail):
|
||||
os.remove(tmp+'/inet4')
|
||||
os.remove(tmp+'/inet6')
|
||||
# test again already banned (it shall cause reban now):
|
||||
self.pruneLog('[test-phase 3a] check reban after sane env repaired')
|
||||
self.assertEqual(self.__actions.addBannedIP(['192.0.2.1', '2001:db8::1']), 2)
|
||||
self.assertLogged(
|
||||
"Invariant check failed. Trying to restore a sane environment",
|
||||
"stdout: %r" % 'ip repair inet4', # both repairs occurred
|
||||
"stdout: %r" % 'ip repair inet6',
|
||||
"Reban 192.0.2.1, action 'ip'", "Reban 2001:db8::1, action 'ip'", # both rebans also
|
||||
"stdout: %r" % 'ip reban 192.0.2.1 inet4',
|
||||
"stdout: %r" % 'ip reban 2001:db8::1 inet6',
|
||||
all=True)
|
||||
|
||||
# now last IP (2001:db8::2) - no repair, but still old epoch of ticket, so it gets rebanned:
|
||||
self.pruneLog('[test-phase 3a] check reban by epoch mismatch (without repair)')
|
||||
self.assertEqual(self.__actions.addBannedIP('2001:db8::2'), 1)
|
||||
self.assertLogged(
|
||||
"Reban 2001:db8::2, action 'ip'",
|
||||
"stdout: %r" % 'ip reban 2001:db8::2 inet6',
|
||||
all=True)
|
||||
self.assertNotLogged(
|
||||
"Invariant check failed. Trying to restore a sane environment",
|
||||
"stdout: %r" % 'ip repair inet4', # both repairs occurred
|
||||
"stdout: %r" % 'ip repair inet6',
|
||||
"Reban 192.0.2.1, action 'ip'", "Reban 2001:db8::1, action 'ip'", # both rebans also
|
||||
"stdout: %r" % 'ip reban 192.0.2.1 inet4',
|
||||
"stdout: %r" % 'ip reban 2001:db8::1 inet6',
|
||||
all=True)
|
||||
|
||||
# and bans present in files:
|
||||
self.pruneLog('[test-phase 4] check reban')
|
||||
self.dumpFile(tmp+'/inet4')
|
||||
self.assertLogged('192.0.2.1 inet4 -- rebanned')
|
||||
self.assertNotLogged('2001:db8::1 inet6 -- rebanned')
|
||||
self.pruneLog()
|
||||
self.dumpFile(tmp+'/inet6')
|
||||
self.assertLogged(
|
||||
'2001:db8::1 inet6 -- rebanned',
|
||||
'2001:db8::2 inet6 -- rebanned', all=True)
|
||||
self.assertNotLogged('192.0.2.1 inet4 -- rebanned')
|
||||
|
||||
# coverage - intended error in reban (no unhandled exception, message logged):
|
||||
act.actionreban = ''
|
||||
act.actionban = 'exit 1'
|
||||
self.assertEqual(self.__actions._Actions__reBan(FailTicket("192.0.2.1", 0)), 0)
|
||||
self.assertLogged(
|
||||
'Failed to execute reban',
|
||||
'Error banning 192.0.2.1', all=True)
|
||||
679
fail2ban-master/fail2ban/tests/actiontestcase.py
Normal file
679
fail2ban-master/fail2ban/tests/actiontestcase.py
Normal file
@@ -0,0 +1,679 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Author: Cyril Jaquier
|
||||
#
|
||||
|
||||
__author__ = "Cyril Jaquier"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
|
||||
__license__ = "GPL"
|
||||
|
||||
import os
|
||||
import tempfile
|
||||
import time
|
||||
import unittest
|
||||
|
||||
from ..server.action import CommandAction, CallingMap, substituteRecursiveTags
|
||||
from ..server.actions import OrderedDict, Actions
|
||||
from ..server.utils import Utils
|
||||
|
||||
from .dummyjail import DummyJail
|
||||
from .utils import pid_exists, with_tmpdir, LogCaptureTestCase
|
||||
|
||||
|
||||
class CommandActionTest(LogCaptureTestCase):
|
||||
|
||||
def setUp(self):
|
||||
"""Call before every test case."""
|
||||
LogCaptureTestCase.setUp(self)
|
||||
self.__action = CommandAction(None, "Test")
|
||||
# prevent execute stop if start fails (or event not started at all):
|
||||
self.__action_started = False
|
||||
orgstart = self.__action.start
|
||||
def _action_start():
|
||||
self.__action_started = True
|
||||
return orgstart()
|
||||
self.__action.start = _action_start
|
||||
|
||||
def tearDown(self):
|
||||
"""Call after every test case."""
|
||||
if self.__action_started:
|
||||
self.__action.stop()
|
||||
LogCaptureTestCase.tearDown(self)
|
||||
|
||||
def testSubstituteRecursiveTags(self):
|
||||
aInfo = {
|
||||
'HOST': "192.0.2.0",
|
||||
'ABC': "123 <HOST>",
|
||||
'xyz': "890 <ABC>",
|
||||
}
|
||||
# Recursion is bad
|
||||
self.assertRaises(ValueError,
|
||||
lambda: substituteRecursiveTags({'A': '<A>'}))
|
||||
self.assertRaises(ValueError,
|
||||
lambda: substituteRecursiveTags({'A': '<B>', 'B': '<A>'}))
|
||||
self.assertRaises(ValueError,
|
||||
lambda: substituteRecursiveTags({'A': '<B>', 'B': '<C>', 'C': '<A>'}))
|
||||
# Unresolveable substitution
|
||||
self.assertRaises(ValueError,
|
||||
lambda: substituteRecursiveTags({'A': 'to=<B> fromip=<IP>', 'C': '<B>', 'B': '<C>', 'D': ''}))
|
||||
self.assertRaises(ValueError,
|
||||
lambda: substituteRecursiveTags({'failregex': 'to=<honeypot> fromip=<IP>', 'sweet': '<honeypot>', 'honeypot': '<sweet>', 'ignoreregex': ''}))
|
||||
# No cyclic recursion, just multiple replacement of tag <T>, should be successful:
|
||||
self.assertEqual(substituteRecursiveTags( OrderedDict(
|
||||
(('X', 'x=x<T>'), ('T', '1'), ('Z', '<X> <T> <Y>'), ('Y', 'y=y<T>')))
|
||||
), {'X': 'x=x1', 'T': '1', 'Y': 'y=y1', 'Z': 'x=x1 1 y=y1'}
|
||||
)
|
||||
# No cyclic recursion, just multiple replacement of tag <T> in composite tags, should be successful:
|
||||
self.assertEqual(substituteRecursiveTags( OrderedDict(
|
||||
(('X', 'x=x<T> <Z> <<R1>> <<R2>>'), ('R1', 'Z'), ('R2', 'Y'), ('T', '1'), ('Z', '<T> <Y>'), ('Y', 'y=y<T>')))
|
||||
), {'X': 'x=x1 1 y=y1 1 y=y1 y=y1', 'R1': 'Z', 'R2': 'Y', 'T': '1', 'Z': '1 y=y1', 'Y': 'y=y1'}
|
||||
)
|
||||
# No cyclic recursion, just multiple replacement of same tags, should be successful:
|
||||
self.assertEqual(substituteRecursiveTags( OrderedDict((
|
||||
('actionstart', 'ipset create <ipmset> hash:ip timeout <bantime> family <ipsetfamily>\n<iptables> -I <chain> <actiontype>'),
|
||||
('ipmset', 'f2b-<name>'),
|
||||
('name', 'any'),
|
||||
('bantime', '600'),
|
||||
('ipsetfamily', 'inet'),
|
||||
('iptables', 'iptables <lockingopt>'),
|
||||
('lockingopt', '-w'),
|
||||
('chain', 'INPUT'),
|
||||
('actiontype', '<multiport>'),
|
||||
('multiport', '-p <protocol> -m multiport --dports <port> -m set --match-set <ipmset> src -j <blocktype>'),
|
||||
('protocol', 'tcp'),
|
||||
('port', 'ssh'),
|
||||
('blocktype', 'REJECT',),
|
||||
))
|
||||
), OrderedDict((
|
||||
('actionstart', 'ipset create f2b-any hash:ip timeout 600 family inet\niptables -w -I INPUT -p tcp -m multiport --dports ssh -m set --match-set f2b-any src -j REJECT'),
|
||||
('ipmset', 'f2b-any'),
|
||||
('name', 'any'),
|
||||
('bantime', '600'),
|
||||
('ipsetfamily', 'inet'),
|
||||
('iptables', 'iptables -w'),
|
||||
('lockingopt', '-w'),
|
||||
('chain', 'INPUT'),
|
||||
('actiontype', '-p tcp -m multiport --dports ssh -m set --match-set f2b-any src -j REJECT'),
|
||||
('multiport', '-p tcp -m multiport --dports ssh -m set --match-set f2b-any src -j REJECT'),
|
||||
('protocol', 'tcp'),
|
||||
('port', 'ssh'),
|
||||
('blocktype', 'REJECT')
|
||||
))
|
||||
)
|
||||
# Cyclic recursion by composite tag creation, tags "create" another tag, that closes cycle:
|
||||
self.assertRaises(ValueError, lambda: substituteRecursiveTags( OrderedDict((
|
||||
('A', '<<B><C>>'),
|
||||
('B', 'D'), ('C', 'E'),
|
||||
('DE', 'cycle <A>'),
|
||||
)) ))
|
||||
self.assertRaises(ValueError, lambda: substituteRecursiveTags( OrderedDict((
|
||||
('DE', 'cycle <A>'),
|
||||
('A', '<<B><C>>'),
|
||||
('B', 'D'), ('C', 'E'),
|
||||
)) ))
|
||||
|
||||
# missing tags are ok
|
||||
self.assertEqual(substituteRecursiveTags({'A': '<C>'}), {'A': '<C>'})
|
||||
self.assertEqual(substituteRecursiveTags({'A': '<C> <D> <X>','X':'fun'}), {'A': '<C> <D> fun', 'X':'fun'})
|
||||
self.assertEqual(substituteRecursiveTags({'A': '<C> <B>', 'B': 'cool'}), {'A': '<C> cool', 'B': 'cool'})
|
||||
# Escaped tags should be ignored
|
||||
self.assertEqual(substituteRecursiveTags({'A': '<matches> <B>', 'B': 'cool'}), {'A': '<matches> cool', 'B': 'cool'})
|
||||
# Multiple stuff on same line is ok
|
||||
self.assertEqual(substituteRecursiveTags({'failregex': 'to=<honeypot> fromip=<IP> evilperson=<honeypot>', 'honeypot': 'pokie', 'ignoreregex': ''}),
|
||||
{ 'failregex': "to=pokie fromip=<IP> evilperson=pokie",
|
||||
'honeypot': 'pokie',
|
||||
'ignoreregex': '',
|
||||
})
|
||||
# rest is just cool
|
||||
self.assertEqual(substituteRecursiveTags(aInfo),
|
||||
{ 'HOST': "192.0.2.0",
|
||||
'ABC': '123 192.0.2.0',
|
||||
'xyz': '890 123 192.0.2.0',
|
||||
})
|
||||
# obscure embedded case
|
||||
self.assertEqual(substituteRecursiveTags({'A': '<<PREF>HOST>', 'PREF': 'IPV4'}),
|
||||
{'A': '<IPV4HOST>', 'PREF': 'IPV4'})
|
||||
self.assertEqual(substituteRecursiveTags({'A': '<<PREF>HOST>', 'PREF': 'IPV4', 'IPV4HOST': '1.2.3.4'}),
|
||||
{'A': '1.2.3.4', 'PREF': 'IPV4', 'IPV4HOST': '1.2.3.4'})
|
||||
# more embedded within a string and two interpolations
|
||||
self.assertEqual(substituteRecursiveTags({'A': 'A <IP<PREF>HOST> B IP<PREF> C', 'PREF': 'V4', 'IPV4HOST': '1.2.3.4'}),
|
||||
{'A': 'A 1.2.3.4 B IPV4 C', 'PREF': 'V4', 'IPV4HOST': '1.2.3.4'})
|
||||
|
||||
def testSubstRec_DontTouchUnusedCallable(self):
|
||||
cm = CallingMap({
|
||||
'A':0,
|
||||
'B':lambda self: '<A><A>',
|
||||
'C':'',
|
||||
'D':''
|
||||
})
|
||||
#
|
||||
# should raise no exceptions:
|
||||
substituteRecursiveTags(cm)
|
||||
# add exception tag:
|
||||
cm['C'] = lambda self,i=0: 5 // int(self['A']) # raise error by access
|
||||
# test direct get of callable (should raise an error):
|
||||
self.assertRaises(ZeroDivisionError, lambda: cm['C'])
|
||||
# should raise no exceptions (tag "C" still unused):
|
||||
substituteRecursiveTags(cm)
|
||||
# add reference to "broken" tag:
|
||||
cm['D'] = 'test=<C>'
|
||||
# should raise an exception (BOOM by replacement of tag "D" recursive):
|
||||
self.assertRaises(ZeroDivisionError, lambda: substituteRecursiveTags(cm))
|
||||
#
|
||||
# should raise no exceptions:
|
||||
self.assertEqual(self.__action.replaceTag('test=<A>', cm), "test=0")
|
||||
# **Important**: recursive replacement of dynamic data from calling map should be prohibited,
|
||||
# otherwise may be vulnerable on foreign user-input:
|
||||
self.assertEqual(self.__action.replaceTag('test=<A>--<B>--<A>', cm), "test=0--<A><A>--0")
|
||||
# should raise an exception (BOOM by replacement of tag "C"):
|
||||
self.assertRaises(ZeroDivisionError, lambda: self.__action.replaceTag('test=<C>', cm))
|
||||
# should raise no exceptions (replaces tag "D" only):
|
||||
self.assertEqual(self.__action.replaceTag('<D>', cm), "test=<C>")
|
||||
|
||||
def testReplaceTag(self):
|
||||
aInfo = {
|
||||
'HOST': "192.0.2.0",
|
||||
'ABC': "123",
|
||||
'xyz': "890",
|
||||
}
|
||||
self.assertEqual(
|
||||
self.__action.replaceTag("Text<br>text", aInfo),
|
||||
"Text\ntext")
|
||||
self.assertEqual(
|
||||
self.__action.replaceTag("Text <HOST> text", aInfo),
|
||||
"Text 192.0.2.0 text")
|
||||
self.assertEqual(
|
||||
self.__action.replaceTag("Text <xyz> text <ABC> ABC", aInfo),
|
||||
"Text 890 text 123 ABC")
|
||||
self.assertEqual(
|
||||
self.__action.replaceTag("<matches>",
|
||||
{'matches': "some >char< should \\< be[ escap}ed&\n"}),
|
||||
"some \\>char\\< should \\\\\\< be\\[ escap\\}ed\\&\\n")
|
||||
self.assertEqual(
|
||||
self.__action.replaceTag("<ipmatches>",
|
||||
{'ipmatches': "some >char< should \\< be[ escap}ed&\n"}),
|
||||
"some \\>char\\< should \\\\\\< be\\[ escap\\}ed\\&\\n")
|
||||
self.assertEqual(
|
||||
self.__action.replaceTag("<ipjailmatches>",
|
||||
{'ipjailmatches': "some >char< should \\< be[ escap}ed&\r\n"}),
|
||||
"some \\>char\\< should \\\\\\< be\\[ escap\\}ed\\&\\r\\n")
|
||||
|
||||
# Recursive
|
||||
aInfo["ABC"] = "<xyz>"
|
||||
self.assertEqual(
|
||||
self.__action.replaceTag("Text <xyz> text <ABC> ABC", aInfo),
|
||||
"Text 890 text 890 ABC")
|
||||
|
||||
# Callable
|
||||
self.assertEqual(
|
||||
self.__action.replaceTag("09 <matches> 11",
|
||||
CallingMap(matches=lambda self: str(10))),
|
||||
"09 10 11")
|
||||
|
||||
def testReplaceNoTag(self):
|
||||
# As tag not present, therefore callable should not be called
|
||||
# Will raise ValueError if it is
|
||||
self.assertEqual(
|
||||
self.__action.replaceTag("abc",
|
||||
CallingMap(matches=lambda self: int("a"))), "abc")
|
||||
|
||||
def testReplaceTagSelfRecursion(self):
|
||||
setattr(self.__action, 'a', "<a")
|
||||
setattr(self.__action, 'b', "c>")
|
||||
setattr(self.__action, 'b?family=inet6', "b>")
|
||||
setattr(self.__action, 'ac', "<a><b>")
|
||||
setattr(self.__action, 'ab', "<ac>")
|
||||
setattr(self.__action, 'x?family=inet6', "")
|
||||
# produce self-referencing properties except:
|
||||
self.assertRaisesRegex(ValueError, r"properties contain self referencing definitions",
|
||||
lambda: self.__action.replaceTag("<a><b>",
|
||||
self.__action._properties, conditional="family=inet4")
|
||||
)
|
||||
# remote self-referencing in props:
|
||||
delattr(self.__action, 'ac')
|
||||
# produce self-referencing query except:
|
||||
self.assertRaisesRegex(ValueError, r"possible self referencing definitions in query",
|
||||
lambda: self.__action.replaceTag("<x"*30+">"*30,
|
||||
self.__action._properties, conditional="family=inet6")
|
||||
)
|
||||
|
||||
def testReplaceTagConditionalCached(self):
|
||||
setattr(self.__action, 'abc', "123")
|
||||
setattr(self.__action, 'abc?family=inet4', "345")
|
||||
setattr(self.__action, 'abc?family=inet6', "567")
|
||||
setattr(self.__action, 'xyz', "890-<abc>")
|
||||
setattr(self.__action, 'banaction', "Text <xyz> text <abc>")
|
||||
# test replacement in sub tags and direct, conditional, cached:
|
||||
cache = self.__action._substCache
|
||||
for i in range(2):
|
||||
self.assertEqual(
|
||||
self.__action.replaceTag("<banaction> '<abc>'", self.__action._properties,
|
||||
conditional="", cache=cache),
|
||||
"Text 890-123 text 123 '123'")
|
||||
self.assertEqual(
|
||||
self.__action.replaceTag("<banaction> '<abc>'", self.__action._properties,
|
||||
conditional="family=inet4", cache=cache),
|
||||
"Text 890-345 text 345 '345'")
|
||||
self.assertEqual(
|
||||
self.__action.replaceTag("<banaction> '<abc>'", self.__action._properties,
|
||||
conditional="family=inet6", cache=cache),
|
||||
"Text 890-567 text 567 '567'")
|
||||
self.assertTrue(len(cache) >= 3)
|
||||
# set one parameter - internal properties and cache should be reset:
|
||||
setattr(self.__action, 'xyz', "000-<abc>")
|
||||
self.assertEqual(len(cache), 0)
|
||||
# test againg, should have 000 instead of 890:
|
||||
for i in range(2):
|
||||
self.assertEqual(
|
||||
self.__action.replaceTag("<banaction> '<abc>'", self.__action._properties,
|
||||
conditional="", cache=cache),
|
||||
"Text 000-123 text 123 '123'")
|
||||
self.assertEqual(
|
||||
self.__action.replaceTag("<banaction> '<abc>'", self.__action._properties,
|
||||
conditional="family=inet4", cache=cache),
|
||||
"Text 000-345 text 345 '345'")
|
||||
self.assertEqual(
|
||||
self.__action.replaceTag("<banaction> '<abc>'", self.__action._properties,
|
||||
conditional="family=inet6", cache=cache),
|
||||
"Text 000-567 text 567 '567'")
|
||||
self.assertTrue(len(cache) >= 3)
|
||||
|
||||
@with_tmpdir
|
||||
def testExecuteActionBan(self, tmp):
|
||||
tmp += "/fail2ban.test"
|
||||
self.__action.actionstart = "touch '%s'" % tmp
|
||||
self.__action.actionrepair = self.__action.actionstart
|
||||
self.assertEqual(self.__action.actionstart, "touch '%s'" % tmp)
|
||||
self.__action.actionstop = "rm -f '%s'" % tmp
|
||||
self.assertEqual(self.__action.actionstop, "rm -f '%s'" % tmp)
|
||||
self.__action.actionban = "<actioncheck> && echo -n"
|
||||
self.assertEqual(self.__action.actionban, "<actioncheck> && echo -n")
|
||||
self.__action.actioncheck = "[ -e '%s' ]" % tmp
|
||||
self.assertEqual(self.__action.actioncheck, "[ -e '%s' ]" % tmp)
|
||||
self.__action.actionunban = "true"
|
||||
self.assertEqual(self.__action.actionunban, 'true')
|
||||
self.pruneLog()
|
||||
|
||||
self.assertNotLogged('returned')
|
||||
# no action was actually executed yet
|
||||
|
||||
# start on demand is false, so it should cause failure on first attempt of ban:
|
||||
self.__action.ban({'ip': None})
|
||||
self.assertLogged('Invariant check failed')
|
||||
self.assertLogged('returned successfully')
|
||||
self.__action.stop()
|
||||
self.assertLogged(self.__action.actionstop)
|
||||
|
||||
def testExecuteActionEmptyUnban(self):
|
||||
# unban will be executed for actions with banned items only:
|
||||
self.__action.actionban = ""
|
||||
self.__action.actionunban = ""
|
||||
self.__action.actionflush = "echo -n 'flush'"
|
||||
self.__action.actionstop = "echo -n 'stop'"
|
||||
self.__action.start();
|
||||
self.__action.ban({});
|
||||
self.pruneLog()
|
||||
self.__action.unban({})
|
||||
self.assertLogged('Nothing to do', wait=True)
|
||||
# same as above but with interim flush, so no unban anymore:
|
||||
self.__action.ban({});
|
||||
self.pruneLog('[phase 2]')
|
||||
self.__action.flush()
|
||||
self.__action.unban({})
|
||||
self.__action.stop()
|
||||
self.assertLogged('stop', wait=True)
|
||||
self.assertNotLogged('Nothing to do')
|
||||
|
||||
@with_tmpdir
|
||||
def testExecuteActionStartCtags(self, tmp):
|
||||
tmp += '/fail2ban.test'
|
||||
self.__action.HOST = "192.0.2.0"
|
||||
self.__action.actionstart = "touch '%s.<HOST>'" % tmp
|
||||
self.__action.actionstop = "rm -f '%s.<HOST>'" % tmp
|
||||
self.__action.actioncheck = "[ -e '%s.192.0.2.0' ]" % tmp
|
||||
self.__action.start()
|
||||
self.__action.consistencyCheck()
|
||||
|
||||
@with_tmpdir
|
||||
def testExecuteActionCheckRestoreEnvironment(self, tmp):
|
||||
tmp += '/fail2ban.test'
|
||||
self.__action.actionstart = ""
|
||||
self.__action.actionstop = "rm -f '%s'" % tmp
|
||||
self.__action.actionban = "rm '%s'" % tmp
|
||||
self.__action.actioncheck = "[ -e '%s' ]" % tmp
|
||||
self.assertRaises(RuntimeError, self.__action.ban, {'ip': None})
|
||||
self.assertLogged('Invariant check failed', 'Unable to restore environment', all=True)
|
||||
# 2nd time, try to restore with producing error in stop, but succeeded start hereafter:
|
||||
self.pruneLog('[phase 2]')
|
||||
self.__action.actionstart = "touch '%s'" % tmp
|
||||
self.__action.actionstop = "rm '%s'" % tmp
|
||||
self.__action.actionban = """<actioncheck> && printf "%%%%b\n" <ip> >> '%s'""" % tmp
|
||||
self.__action.actioncheck = "[ -e '%s' ]" % tmp
|
||||
self.__action.ban({'ip': None})
|
||||
self.assertLogged('Invariant check failed')
|
||||
self.assertNotLogged('Unable to restore environment')
|
||||
|
||||
@with_tmpdir
|
||||
def testExecuteActionCheckOnBanFailure(self, tmp):
|
||||
tmp += '/fail2ban.test'
|
||||
self.__action.actionstart = "touch '%s'; echo 'started ...'" % tmp
|
||||
self.__action.actionstop = "rm -f '%s'" % tmp
|
||||
self.__action.actionban = "[ -e '%s' ] && echo 'banned '<ip>" % tmp
|
||||
self.__action.actioncheck = "[ -e '%s' ] && echo 'check ok' || { echo 'check failed'; exit 1; }" % tmp
|
||||
self.__action.actionrepair = "echo 'repair ...'; touch '%s'" % tmp
|
||||
self.__action.actionstart_on_demand = False
|
||||
self.__action.start()
|
||||
# phase 1: with repair;
|
||||
# phase 2: without repair (start/stop), not on demand;
|
||||
# phase 3: without repair (start/stop), start on demand.
|
||||
for i in (1, 2, 3):
|
||||
self.pruneLog('[phase %s]' % i)
|
||||
# 1st time with success ban:
|
||||
self.__action.ban({'ip': '192.0.2.1'})
|
||||
self.assertLogged(
|
||||
"stdout: %r" % 'banned 192.0.2.1', all=True)
|
||||
self.assertNotLogged("Invariant check failed. Trying",
|
||||
"stdout: %r" % 'check failed',
|
||||
"stdout: %r" % ('repair ...' if self.__action.actionrepair else 'started ...'),
|
||||
"stdout: %r" % 'check ok', all=True)
|
||||
# force error in ban:
|
||||
os.remove(tmp)
|
||||
self.pruneLog()
|
||||
# 2nd time with fail recognition, success repair, check and ban:
|
||||
self.__action.ban({'ip': '192.0.2.2'})
|
||||
self.assertLogged("Invariant check failed. Trying",
|
||||
"stdout: %r" % 'check failed',
|
||||
"stdout: %r" % ('repair ...' if self.__action.actionrepair else 'started ...'),
|
||||
"stdout: %r" % 'check ok',
|
||||
"stdout: %r" % 'banned 192.0.2.2', all=True)
|
||||
# repeat without repair (stop/start), herafter enable on demand:
|
||||
if self.__action.actionrepair:
|
||||
self.__action.actionrepair = ""
|
||||
elif not self.__action.actionstart_on_demand:
|
||||
self.__action.actionstart_on_demand = True
|
||||
|
||||
@with_tmpdir
|
||||
def testExecuteActionCheckRepairEnvironment(self, tmp):
|
||||
tmp += '/fail2ban.test'
|
||||
self.__action.actionstart = ""
|
||||
self.__action.actionstop = ""
|
||||
self.__action.actionban = "rm '%s'" % tmp
|
||||
self.__action.actioncheck = "[ -e '%s' ]" % tmp
|
||||
self.__action.actionrepair = "echo 'repair ...'; touch '%s'" % tmp
|
||||
# 1st time with success repair:
|
||||
self.__action.ban({'ip': None})
|
||||
self.assertLogged("Invariant check failed. Trying", "echo 'repair ...'", all=True)
|
||||
self.pruneLog()
|
||||
# 2nd time failed (not really repaired):
|
||||
self.__action.actionrepair = "echo 'repair ...'"
|
||||
self.assertRaises(RuntimeError, self.__action.ban, {'ip': None})
|
||||
self.assertLogged(
|
||||
"Invariant check failed. Trying",
|
||||
"echo 'repair ...'",
|
||||
"Unable to restore environment", all=True)
|
||||
|
||||
def testExecuteActionChangeCtags(self):
|
||||
self.assertRaises(AttributeError, getattr, self.__action, "ROST")
|
||||
self.__action.ROST = "192.0.2.0"
|
||||
self.assertEqual(self.__action.ROST,"192.0.2.0")
|
||||
|
||||
def testExecuteActionUnbanAinfo(self):
|
||||
aInfo = CallingMap({
|
||||
'ABC': "123",
|
||||
'ip': '192.0.2.1',
|
||||
'F-*': lambda self: {
|
||||
'fid': 111,
|
||||
'fport': 222,
|
||||
'user': "tester"
|
||||
}
|
||||
})
|
||||
self.__action.actionban = "echo '<ABC>, failure <F-ID> of <F-USER> -<F-TEST>- from <ip>:<F-PORT>'"
|
||||
self.__action.actionunban = "echo '<ABC>, user <F-USER> unbanned'"
|
||||
self.__action.ban(aInfo)
|
||||
self.__action.unban(aInfo)
|
||||
self.assertLogged(
|
||||
" -- stdout: '123, failure 111 of tester -- from 192.0.2.1:222'",
|
||||
" -- stdout: '123, user tester unbanned'",
|
||||
all=True
|
||||
)
|
||||
|
||||
def testExecuteActionStartEmpty(self):
|
||||
self.__action.actionstart = ""
|
||||
self.__action.start()
|
||||
self.assertTrue(self.__action.executeCmd(""))
|
||||
self.assertLogged('Nothing to do')
|
||||
self.pruneLog()
|
||||
self.assertTrue(self.__action._processCmd(""))
|
||||
self.assertLogged('Nothing to do')
|
||||
self.pruneLog()
|
||||
|
||||
def testExecuteWithVars(self):
|
||||
self.assertTrue(self.__action.executeCmd(
|
||||
r'''printf %b "foreign input:\n'''
|
||||
r''' -- $f2bV_A --\n'''
|
||||
r''' -- $f2bV_B --\n'''
|
||||
r''' -- $(echo -n $f2bV_C) --''' # echo just replaces \n to test it as single line
|
||||
r'''"''',
|
||||
varsDict={
|
||||
'f2bV_A': 'I\'m a hacker; && $(echo $f2bV_B)',
|
||||
'f2bV_B': 'I"m very bad hacker',
|
||||
'f2bV_C': '`Very | very\n$(bad & worst hacker)`'
|
||||
}))
|
||||
self.assertLogged(r"""foreign input:""",
|
||||
' -- I\'m a hacker; && $(echo $f2bV_B) --',
|
||||
' -- I"m very bad hacker --',
|
||||
' -- `Very | very $(bad & worst hacker)` --', all=True)
|
||||
|
||||
def testExecuteReplaceEscapeWithVars(self):
|
||||
self.__action.actionban = 'echo "** ban <ip>, reason: <reason> ...\\n<matches>"'
|
||||
self.__action.actionunban = 'echo "** unban <ip>"'
|
||||
self.__action.actionstop = 'echo "** stop monitoring"'
|
||||
matches = [
|
||||
'<actionunban>',
|
||||
'" Hooray! #',
|
||||
'`I\'m cool script kiddy',
|
||||
'`I`m very cool > /here-is-the-path/to/bin/.x-attempt.sh',
|
||||
'<actionstop>',
|
||||
]
|
||||
aInfo = {
|
||||
'ip': '192.0.2.1',
|
||||
'reason': 'hacking attempt ( he thought he knows how f2b internally works ;)',
|
||||
'matches': '\n'.join(matches)
|
||||
}
|
||||
self.pruneLog()
|
||||
self.__action.ban(aInfo)
|
||||
self.assertLogged(
|
||||
'** ban %s' % aInfo['ip'], aInfo['reason'], *matches, all=True)
|
||||
self.assertNotLogged(
|
||||
'** unban %s' % aInfo['ip'], '** stop monitoring', all=True)
|
||||
self.pruneLog()
|
||||
self.__action.unban(aInfo)
|
||||
self.__action.stop()
|
||||
self.assertLogged(
|
||||
'** unban %s' % aInfo['ip'], '** stop monitoring', all=True)
|
||||
|
||||
def testExecuteIncorrectCmd(self):
|
||||
CommandAction.executeCmd('/bin/ls >/dev/null\nbogusXXX now 2>/dev/null')
|
||||
self.assertLogged('HINT on 127: "Command not found"')
|
||||
|
||||
def testExecuteTimeout(self):
|
||||
stime = time.time()
|
||||
timeout = 1 if not unittest.F2B.fast else 0.01
|
||||
# Should take a 30 seconds (so timeout will occur)
|
||||
self.assertFalse(CommandAction.executeCmd('sleep 30', timeout=timeout))
|
||||
# give a test still 1 second, because system could be too busy
|
||||
self.assertTrue(time.time() >= stime + timeout and time.time() <= stime + timeout + 1)
|
||||
self.assertLogged('sleep 30', ' -- timed out after', all=True)
|
||||
self.assertLogged(' -- killed with SIGTERM',
|
||||
' -- killed with SIGKILL')
|
||||
|
||||
def testExecuteTimeoutWithNastyChildren(self):
|
||||
# temporary file for a nasty kid shell script
|
||||
tmpFilename = tempfile.mktemp(".sh", "fail2ban_")
|
||||
# Create a nasty script which would hang there for a while
|
||||
with open(tmpFilename, 'w') as f:
|
||||
f.write("""#!/bin/bash
|
||||
trap : HUP EXIT TERM
|
||||
|
||||
echo "$$" > %s.pid
|
||||
echo "my pid $$ . sleeping lo-o-o-ong"
|
||||
sleep 30
|
||||
""" % tmpFilename)
|
||||
stime = 0
|
||||
|
||||
# timeout as long as pid-file was not created, but max 5 seconds
|
||||
def getnasty_tout():
|
||||
return (
|
||||
getnastypid() is not None
|
||||
or time.time() - stime > 5
|
||||
)
|
||||
|
||||
def getnastypid():
|
||||
cpid = None
|
||||
if os.path.isfile(tmpFilename + '.pid'):
|
||||
with open(tmpFilename + '.pid') as f:
|
||||
try:
|
||||
cpid = int(f.read())
|
||||
except ValueError:
|
||||
pass
|
||||
return cpid
|
||||
|
||||
# First test if can kill the bastard
|
||||
stime = time.time()
|
||||
self.assertFalse(CommandAction.executeCmd(
|
||||
'bash %s' % tmpFilename, timeout=getnasty_tout))
|
||||
# Wait up to 3 seconds, the child got killed
|
||||
cpid = getnastypid()
|
||||
# Verify that the process itself got killed
|
||||
self.assertTrue(Utils.wait_for(lambda: not pid_exists(cpid), 3)) # process should have been killed
|
||||
self.assertLogged('my pid ', 'Resource temporarily unavailable')
|
||||
self.assertLogged('timed out')
|
||||
self.assertLogged('killed with SIGTERM',
|
||||
'killed with SIGKILL')
|
||||
os.unlink(tmpFilename + '.pid')
|
||||
|
||||
# A bit evolved case even though, previous test already tests killing children processes
|
||||
stime = time.time()
|
||||
self.assertFalse(CommandAction.executeCmd(
|
||||
'out=`bash %s`; echo ALRIGHT' % tmpFilename, timeout=getnasty_tout))
|
||||
# Wait up to 3 seconds, the child got killed
|
||||
cpid = getnastypid()
|
||||
# Verify that the process itself got killed
|
||||
self.assertTrue(Utils.wait_for(lambda: not pid_exists(cpid), 3))
|
||||
self.assertLogged('my pid ', 'Resource temporarily unavailable')
|
||||
self.assertLogged(' -- timed out')
|
||||
self.assertLogged(' -- killed with SIGTERM',
|
||||
' -- killed with SIGKILL')
|
||||
os.unlink(tmpFilename)
|
||||
os.unlink(tmpFilename + '.pid')
|
||||
|
||||
|
||||
def testCaptureStdOutErr(self):
|
||||
CommandAction.executeCmd('echo "How now brown cow"')
|
||||
self.assertLogged("stdout: 'How now brown cow'\n")
|
||||
CommandAction.executeCmd(
|
||||
'echo "The rain in Spain stays mainly in the plain" 1>&2')
|
||||
self.assertLogged(
|
||||
"stderr: 'The rain in Spain stays mainly in the plain'\n")
|
||||
|
||||
def testCallingMap(self):
|
||||
mymap = CallingMap(callme=lambda self: str(10), error=lambda self: int('a'),
|
||||
dontcallme= "string", number=17)
|
||||
|
||||
# Should work fine
|
||||
self.assertEqual(
|
||||
"%(callme)s okay %(dontcallme)s %(number)i" % mymap,
|
||||
"10 okay string 17")
|
||||
# Error will now trip, demonstrating delayed call
|
||||
self.assertRaises(ValueError, lambda x: "%(error)i" % x, mymap)
|
||||
|
||||
def testCallingMapModify(self):
|
||||
m = CallingMap({
|
||||
'a': lambda self: 2 + 3,
|
||||
'b': lambda self: self['a'] + 6,
|
||||
'c': 'test',
|
||||
})
|
||||
# test reset (without modifications):
|
||||
m.reset()
|
||||
# do modifications:
|
||||
m['a'] = 4
|
||||
del m['c']
|
||||
# test set and delete:
|
||||
self.assertEqual(len(m), 2)
|
||||
self.assertNotIn('c', m)
|
||||
self.assertEqual((m['a'], m['b']), (4, 10))
|
||||
# reset to original and test again:
|
||||
m.reset()
|
||||
s = repr(m)
|
||||
self.assertEqual(len(m), 3)
|
||||
self.assertIn('c', m)
|
||||
self.assertEqual((m['a'], m['b'], m['c']), (5, 11, 'test'))
|
||||
# immutability of copy:
|
||||
m['d'] = 'dddd'
|
||||
m2 = m.copy()
|
||||
m2['c'] = lambda self: self['a'] + 7
|
||||
m2['a'] = 1
|
||||
del m2['b']
|
||||
del m2['d']
|
||||
self.assertTrue('b' in m)
|
||||
self.assertTrue('d' in m)
|
||||
self.assertFalse('b' in m2)
|
||||
self.assertFalse('d' in m2)
|
||||
self.assertEqual((m['a'], m['b'], m['c'], m['d']), (5, 11, 'test', 'dddd'))
|
||||
self.assertEqual((m2['a'], m2['c']), (1, 8))
|
||||
|
||||
def testCallingMapRep(self):
|
||||
m = CallingMap({
|
||||
'a': lambda self: 2 + 3,
|
||||
'b': lambda self: self['a'] + 6,
|
||||
'c': ''
|
||||
})
|
||||
s = repr(m); # only stored values (no calculated)
|
||||
self.assertNotIn("'a': ", s)
|
||||
self.assertNotIn("'b': ", s)
|
||||
self.assertIn("'c': ''", s)
|
||||
|
||||
s = m._asrepr(True) # all values (including calculated)
|
||||
self.assertIn("'a': 5", s)
|
||||
self.assertIn("'b': 11", s)
|
||||
self.assertIn("'c': ''", s)
|
||||
|
||||
m['c'] = lambda self: self['xxx'] + 7; # unresolvable
|
||||
s = m._asrepr(True)
|
||||
self.assertIn("'a': 5", s)
|
||||
self.assertIn("'b': 11", s)
|
||||
self.assertIn("'c': ", s) # presents as callable
|
||||
self.assertNotIn("'c': ''", s) # but not empty
|
||||
|
||||
def testActionsIdleMode(self):
|
||||
a = Actions(DummyJail())
|
||||
a.sleeptime = 0.0001; # don't need to wait long
|
||||
# enter idle mode right now (start idle):
|
||||
a.idle = True;
|
||||
# start:
|
||||
a.start()
|
||||
# wait for enter/leave of idle mode:
|
||||
self.assertLogged("Actions: enter idle mode", wait=10)
|
||||
# leave idle mode:
|
||||
a.idle = False
|
||||
self.assertLogged("Actions: leave idle mode", wait=10)
|
||||
# stop it:
|
||||
a.active = False
|
||||
a.join()
|
||||
250
fail2ban-master/fail2ban/tests/banmanagertestcase.py
Normal file
250
fail2ban-master/fail2ban/tests/banmanagertestcase.py
Normal file
@@ -0,0 +1,250 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Author: Cyril Jaquier
|
||||
#
|
||||
|
||||
__author__ = "Cyril Jaquier"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
|
||||
__license__ = "GPL"
|
||||
|
||||
import unittest
|
||||
|
||||
from .utils import setUpMyTime, tearDownMyTime
|
||||
|
||||
from ..server.banmanager import BanManager
|
||||
from ..server.ipdns import DNSUtils
|
||||
from ..server.ticket import BanTicket
|
||||
|
||||
class AddFailure(unittest.TestCase):
|
||||
def setUp(self):
|
||||
"""Call before every test case."""
|
||||
super(AddFailure, self).setUp()
|
||||
setUpMyTime()
|
||||
self.__ticket = BanTicket('193.168.0.128', 1167605999.0)
|
||||
self.__banManager = BanManager()
|
||||
|
||||
def tearDown(self):
|
||||
"""Call after every test case."""
|
||||
super(AddFailure, self).tearDown()
|
||||
tearDownMyTime()
|
||||
|
||||
def testAdd(self):
|
||||
self.assertTrue(self.__banManager.addBanTicket(self.__ticket))
|
||||
self.assertEqual(self.__banManager.size(), 1)
|
||||
self.assertEqual(self.__banManager.getBanTotal(), 1)
|
||||
self.__banManager.setBanTotal(0)
|
||||
self.assertEqual(self.__banManager.getBanTotal(), 0)
|
||||
|
||||
def testAddDuplicate(self):
|
||||
self.assertTrue(self.__banManager.addBanTicket(self.__ticket))
|
||||
self.assertFalse(self.__banManager.addBanTicket(self.__ticket))
|
||||
self.assertEqual(self.__banManager.size(), 1)
|
||||
|
||||
def testAddDuplicateWithTime(self):
|
||||
defBanTime = self.__banManager.getBanTime()
|
||||
prevEndOfBanTime = 0
|
||||
# add again a duplicate :
|
||||
# 0) with same start time and the same (default) ban time
|
||||
# 1) with newer start time and the same (default) ban time
|
||||
# 2) with same start time and longer ban time
|
||||
# 3) with permanent ban time (-1)
|
||||
for tnew, btnew in (
|
||||
(1167605999.0, None),
|
||||
(1167605999.0 + 100, None),
|
||||
(1167605999.0, 24*60*60),
|
||||
(1167605999.0, -1),
|
||||
):
|
||||
ticket1 = BanTicket('193.168.0.128', 1167605999.0)
|
||||
ticket2 = BanTicket('193.168.0.128', tnew)
|
||||
if btnew is not None:
|
||||
ticket2.setBanTime(btnew)
|
||||
self.assertTrue(self.__banManager.addBanTicket(ticket1))
|
||||
self.assertFalse(self.__banManager.addBanTicket(ticket2))
|
||||
self.assertEqual(self.__banManager.size(), 1)
|
||||
# pop ticket and check it was prolonged :
|
||||
banticket = self.__banManager.getTicketByID(ticket2.getID())
|
||||
self.assertEqual(banticket.getEndOfBanTime(defBanTime), ticket2.getEndOfBanTime(defBanTime))
|
||||
self.assertTrue(banticket.getEndOfBanTime(defBanTime) > prevEndOfBanTime)
|
||||
prevEndOfBanTime = ticket1.getEndOfBanTime(defBanTime)
|
||||
# but the start time should not be changed (+ 100 is ignored):
|
||||
self.assertEqual(banticket.getTime(), 1167605999.0)
|
||||
# if prolong to permanent, it should also have permanent ban time:
|
||||
if btnew == -1:
|
||||
self.assertEqual(banticket.getBanTime(defBanTime), -1)
|
||||
|
||||
def testInListOK(self):
|
||||
self.assertTrue(self.__banManager.addBanTicket(self.__ticket))
|
||||
ticket = BanTicket('193.168.0.128', 1167605999.0)
|
||||
self.assertTrue(self.__banManager._inBanList(ticket))
|
||||
|
||||
def testInListNOK(self):
|
||||
self.assertTrue(self.__banManager.addBanTicket(self.__ticket))
|
||||
ticket = BanTicket('111.111.1.111', 1167605999.0)
|
||||
self.assertFalse(self.__banManager._inBanList(ticket))
|
||||
|
||||
def testBanTimeIncr(self):
|
||||
ticket = BanTicket(self.__ticket.getID(), self.__ticket.getTime())
|
||||
## increase twice and at end permanent, check time/count increase:
|
||||
c = 0
|
||||
for i in (1000, 2000, -1):
|
||||
self.__banManager.addBanTicket(self.__ticket); c += 1
|
||||
ticket.setBanTime(i)
|
||||
self.assertFalse(self.__banManager.addBanTicket(ticket)); # no incr of c (already banned)
|
||||
self.assertEqual(str(self.__banManager.getTicketByID(ticket.getID())),
|
||||
"BanTicket: ip=%s time=%s bantime=%s bancount=%s #attempts=0 matches=[]" % (ticket.getID(), ticket.getTime(), i, c))
|
||||
## after permanent, it should remain permanent ban time (-1):
|
||||
self.__banManager.addBanTicket(self.__ticket); c += 1
|
||||
ticket.setBanTime(-1)
|
||||
self.assertFalse(self.__banManager.addBanTicket(ticket)); # no incr of c (already banned)
|
||||
ticket.setBanTime(1000)
|
||||
self.assertFalse(self.__banManager.addBanTicket(ticket)); # no incr of c (already banned)
|
||||
self.assertEqual(str(self.__banManager.getTicketByID(ticket.getID())),
|
||||
"BanTicket: ip=%s time=%s bantime=%s bancount=%s #attempts=0 matches=[]" % (ticket.getID(), ticket.getTime(), -1, c))
|
||||
|
||||
def testUnban(self):
|
||||
btime = self.__banManager.getBanTime()
|
||||
stime = self.__ticket.getTime()
|
||||
self.assertTrue(self.__banManager.addBanTicket(self.__ticket))
|
||||
self.assertTrue(self.__banManager._inBanList(self.__ticket))
|
||||
self.assertEqual(self.__banManager.unBanList(stime), [])
|
||||
self.assertEqual(self.__banManager.unBanList(stime + btime + 1), [self.__ticket])
|
||||
self.assertEqual(self.__banManager.size(), 0)
|
||||
## again, but now we will prolong ban-time and then try to unban again (1st too early):
|
||||
self.assertTrue(self.__banManager.addBanTicket(self.__ticket))
|
||||
# prolong ban:
|
||||
ticket = BanTicket(self.__ticket.getID(), stime + 600)
|
||||
self.assertFalse(self.__banManager.addBanTicket(ticket))
|
||||
# try unban too early:
|
||||
self.assertEqual(len(self.__banManager.unBanList(stime + btime + 1)), 0)
|
||||
# try unban using correct time:
|
||||
self.assertEqual(len(self.__banManager.unBanList(stime + btime + 600 + 1)), 1)
|
||||
## again, but now we test removing tickets particular (to test < 2/3-rule):
|
||||
for i in range(5):
|
||||
ticket = BanTicket('193.168.0.%s' % i, stime)
|
||||
ticket.setBanTime(ticket.getBanTime(btime) + i*10)
|
||||
self.assertTrue(self.__banManager.addBanTicket(ticket))
|
||||
self.assertEqual(len(self.__banManager.unBanList(stime + btime + 1*10 + 1)), 2)
|
||||
self.assertEqual(len(self.__banManager.unBanList(stime + btime + 5*10 + 1)), 3)
|
||||
self.assertEqual(self.__banManager.size(), 0)
|
||||
|
||||
def testUnbanPermanent(self):
|
||||
btime = self.__banManager.getBanTime()
|
||||
self.__banManager.setBanTime(-1)
|
||||
try:
|
||||
self.assertTrue(self.__banManager.addBanTicket(self.__ticket))
|
||||
self.assertTrue(self.__banManager._inBanList(self.__ticket))
|
||||
self.assertEqual(self.__banManager.unBanList(self.__ticket.getTime() + btime + 1), [])
|
||||
self.assertEqual(self.__banManager.size(), 1)
|
||||
finally:
|
||||
self.__banManager.setBanTime(btime)
|
||||
|
||||
def testBanList(self):
|
||||
tickets = [
|
||||
BanTicket('192.0.2.1', 1167605999.0),
|
||||
BanTicket('192.0.2.2', 1167605999.0),
|
||||
]
|
||||
tickets[1].setBanTime(-1)
|
||||
for t in tickets:
|
||||
self.__banManager.addBanTicket(t)
|
||||
self.assertSortedEqual(self.__banManager.getBanList(ordered=True, withTime=True),
|
||||
[
|
||||
'192.0.2.1 \t2006-12-31 23:59:59 + 600 = 2007-01-01 00:09:59',
|
||||
'192.0.2.2 \t2006-12-31 23:59:59 + -1 = 9999-12-31 23:59:59'
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
class StatusExtendedCymruInfo(unittest.TestCase):
|
||||
def setUp(self):
|
||||
"""Call before every test case."""
|
||||
super(StatusExtendedCymruInfo, self).setUp()
|
||||
unittest.F2B.SkipIfNoNetwork()
|
||||
setUpMyTime()
|
||||
self.__ban_ip = next(iter(DNSUtils.dnsToIp("resolver1.opendns.com")))
|
||||
self.__asn = "36692"
|
||||
self.__country = "US"
|
||||
self.__rir = "arin"
|
||||
ticket = BanTicket(self.__ban_ip, 1167605999.0)
|
||||
self.__banManager = BanManager()
|
||||
self.assertTrue(self.__banManager.addBanTicket(ticket))
|
||||
|
||||
def tearDown(self):
|
||||
"""Call after every test case."""
|
||||
super(StatusExtendedCymruInfo, self).tearDown()
|
||||
tearDownMyTime()
|
||||
|
||||
available = True, None
|
||||
|
||||
def _getBanListExtendedCymruInfo(self):
|
||||
tc = StatusExtendedCymruInfo
|
||||
if tc.available[0]:
|
||||
cymru_info = self.__banManager.getBanListExtendedCymruInfo(
|
||||
timeout=(2 if unittest.F2B.fast else 20))
|
||||
else: # pragma: no cover - availability (once after error case only)
|
||||
cymru_info = tc.available[1]
|
||||
if cymru_info.get("error"): # pragma: no cover - availability
|
||||
tc.available = False, cymru_info
|
||||
raise unittest.SkipTest('Skip test because service is not available: %s' % cymru_info["error"])
|
||||
return cymru_info
|
||||
|
||||
|
||||
def testCymruInfo(self):
|
||||
cymru_info = self._getBanListExtendedCymruInfo()
|
||||
self.assertDictEqual(cymru_info,
|
||||
{"asn": [self.__asn],
|
||||
"country": [self.__country],
|
||||
"rir": [self.__rir]})
|
||||
|
||||
def testCymruInfoASN(self):
|
||||
self.assertEqual(
|
||||
self.__banManager.geBanListExtendedASN(self._getBanListExtendedCymruInfo()),
|
||||
[self.__asn])
|
||||
|
||||
def testCymruInfoCountry(self):
|
||||
self.assertEqual(
|
||||
self.__banManager.geBanListExtendedCountry(self._getBanListExtendedCymruInfo()),
|
||||
[self.__country])
|
||||
|
||||
def testCymruInfoRIR(self):
|
||||
self.assertEqual(
|
||||
self.__banManager.geBanListExtendedRIR(self._getBanListExtendedCymruInfo()),
|
||||
[self.__rir])
|
||||
|
||||
def testCymruInfoNxdomain(self):
|
||||
self.__banManager = BanManager()
|
||||
|
||||
# non-existing IP
|
||||
ticket = BanTicket("0.0.0.0", 1167605999.0)
|
||||
self.assertTrue(self.__banManager.addBanTicket(ticket))
|
||||
cymru_info = self._getBanListExtendedCymruInfo()
|
||||
self.assertDictEqual(cymru_info,
|
||||
{"asn": ["nxdomain"],
|
||||
"country": ["nxdomain"],
|
||||
"rir": ["nxdomain"]})
|
||||
|
||||
# Since it outputs for all active tickets we would get previous results
|
||||
# and new ones
|
||||
ticket = BanTicket("8.0.0.0", 1167606000.0)
|
||||
self.assertTrue(self.__banManager.addBanTicket(ticket))
|
||||
cymru_info = self._getBanListExtendedCymruInfo()
|
||||
self.assertSortedEqual(cymru_info,
|
||||
{"asn": ["nxdomain", "3356",],
|
||||
"country": ["nxdomain", "US"],
|
||||
"rir": ["nxdomain", "arin"]}, level=-1, key=str)
|
||||
358
fail2ban-master/fail2ban/tests/clientbeautifiertestcase.py
Normal file
358
fail2ban-master/fail2ban/tests/clientbeautifiertestcase.py
Normal file
@@ -0,0 +1,358 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
__author__ = "Alexander Koeppe"
|
||||
__copyright__ = "Copyright (c) 2016 Cyril Jaquier, 2011-2013 Yaroslav Halchenko"
|
||||
__license__ = "GPL"
|
||||
|
||||
import unittest
|
||||
|
||||
from ..client.beautifier import Beautifier
|
||||
from ..version import version
|
||||
from ..server.ipdns import IPAddr, FileIPAddrSet
|
||||
from ..exceptions import UnknownJailException, DuplicateJailException
|
||||
|
||||
class BeautifierTest(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
""" Call before every test case """
|
||||
super(BeautifierTest, self).setUp()
|
||||
self.b = Beautifier()
|
||||
self.b.encUtf = 0; ## we prefer ascii in test suite (see #3750)
|
||||
|
||||
def tearDown(self):
|
||||
""" Call after every test case """
|
||||
super(BeautifierTest, self).tearDown()
|
||||
|
||||
def testGetInputCmd(self):
|
||||
cmd = ["test"]
|
||||
self.b.setInputCmd(cmd)
|
||||
self.assertEqual(self.b.getInputCmd(), cmd)
|
||||
|
||||
def testPing(self):
|
||||
self.b.setInputCmd(["ping"])
|
||||
self.assertEqual(self.b.beautify("pong"), "Server replied: pong")
|
||||
|
||||
def testVersion(self):
|
||||
self.b.setInputCmd(["version"])
|
||||
self.assertEqual(self.b.beautify(version), version)
|
||||
|
||||
def testAddJail(self):
|
||||
self.b.setInputCmd(["add"])
|
||||
self.assertEqual(self.b.beautify("ssh"), "Added jail ssh")
|
||||
|
||||
def testStartJail(self):
|
||||
self.b.setInputCmd(["start"])
|
||||
self.assertEqual(self.b.beautify(None), "Jail started")
|
||||
|
||||
def testStopJail(self):
|
||||
self.b.setInputCmd(["stop", "ssh"])
|
||||
self.assertEqual(self.b.beautify(None), "Jail stopped")
|
||||
|
||||
def testShutdown(self):
|
||||
self.b.setInputCmd(["stop"])
|
||||
self.assertEqual(self.b.beautify(None), "Shutdown successful")
|
||||
|
||||
def testStatus(self):
|
||||
self.b.setInputCmd(["status"])
|
||||
response = (("Number of jails", 2), ("Jail list", ", ".join(["ssh", "exim4"])))
|
||||
output = "Status\n|- Number of jails:\t2\n`- Jail list:\tssh, exim4"
|
||||
self.assertEqual(self.b.beautify(response), output)
|
||||
|
||||
self.b.setInputCmd(["status", "ssh"])
|
||||
response = (
|
||||
("Filter", [
|
||||
("Currently failed", 0),
|
||||
("Total failed", 0),
|
||||
("File list", "/var/log/auth.log")
|
||||
]
|
||||
),
|
||||
("Actions", [
|
||||
("Currently banned", 3),
|
||||
("Total banned", 3),
|
||||
("Banned IP list", [
|
||||
IPAddr("192.168.0.1"),
|
||||
IPAddr("::ffff:10.2.2.1"),
|
||||
IPAddr("2001:db8::1")
|
||||
]
|
||||
)
|
||||
]
|
||||
)
|
||||
)
|
||||
output = "Status for the jail: ssh\n"
|
||||
output += "|- Filter\n"
|
||||
output += "| |- Currently failed: 0\n"
|
||||
output += "| |- Total failed: 0\n"
|
||||
output += "| `- File list: /var/log/auth.log\n"
|
||||
output += "`- Actions\n"
|
||||
output += " |- Currently banned: 3\n"
|
||||
output += " |- Total banned: 3\n"
|
||||
output += " `- Banned IP list: 192.168.0.1 10.2.2.1 2001:db8::1"
|
||||
self.assertEqual(self.b.beautify(response), output)
|
||||
|
||||
self.b.setInputCmd(["status", "--all"])
|
||||
response = (("Number of jails", 2), ("Jail list", ", ".join(["ssh", "exim4"])), {
|
||||
"ssh": (
|
||||
("Filter", [
|
||||
("Currently failed", 0),
|
||||
("Total failed", 0),
|
||||
("File list", "/var/log/auth.log")
|
||||
]
|
||||
),
|
||||
("Actions", [
|
||||
("Currently banned", 3),
|
||||
("Total banned", 3),
|
||||
("Banned IP list", [
|
||||
IPAddr("192.168.0.1"),
|
||||
IPAddr("::ffff:10.2.2.1"),
|
||||
IPAddr("2001:db8::1")
|
||||
]
|
||||
)
|
||||
]
|
||||
)
|
||||
),
|
||||
"exim4": (
|
||||
("Filter", [
|
||||
("Currently failed", 3),
|
||||
("Total failed", 6),
|
||||
("File list", "/var/log/exim4/mainlog")
|
||||
]
|
||||
),
|
||||
("Actions", [
|
||||
("Currently banned", 0),
|
||||
("Total banned", 0),
|
||||
("Banned IP list", []
|
||||
)
|
||||
]
|
||||
)
|
||||
)
|
||||
})
|
||||
output = (
|
||||
"Status\n"
|
||||
+ "|- Number of jails:\t2\n"
|
||||
+ "|- Jail list:\tssh, exim4\n"
|
||||
+ "`- Status for the jails:\n"
|
||||
+ " |- Jail: ssh\n"
|
||||
+ " | |- Filter\n"
|
||||
+ " | | |- Currently failed: 0\n"
|
||||
+ " | | |- Total failed: 0\n"
|
||||
+ " | | `- File list: /var/log/auth.log\n"
|
||||
+ " | `- Actions\n"
|
||||
+ " | |- Currently banned: 3\n"
|
||||
+ " | |- Total banned: 3\n"
|
||||
+ " | `- Banned IP list: 192.168.0.1 10.2.2.1 2001:db8::1\n"
|
||||
+ " `- Jail: exim4\n"
|
||||
+ " |- Filter\n"
|
||||
+ " | |- Currently failed: 3\n"
|
||||
+ " | |- Total failed: 6\n"
|
||||
+ " | `- File list: /var/log/exim4/mainlog\n"
|
||||
+ " `- Actions\n"
|
||||
+ " |- Currently banned: 0\n"
|
||||
+ " |- Total banned: 0\n"
|
||||
+ " `- Banned IP list: "
|
||||
)
|
||||
self.assertEqual(self.b.beautify(response), output)
|
||||
|
||||
def testStatusStats(self):
|
||||
self.b.setInputCmd(["stats"])
|
||||
## no jails:
|
||||
self.assertEqual(self.b.beautify({}), "No jails found.")
|
||||
## 3 jails:
|
||||
response = {
|
||||
"ssh": ["systemd", (3, 6), (12, 24)],
|
||||
"exim4": ["pyinotify", (6, 12), (20, 20)],
|
||||
"jail-with-long-name": ["polling", (0, 0), (0, 0)]
|
||||
}
|
||||
output = (""
|
||||
+ " | | Filter | Actions \n"
|
||||
+ " Jail | Backend |-----------x-----------\n"
|
||||
+ " | | cur | tot | cur | tot\n"
|
||||
+ "---------------------x-----------x-----------x-----------\n"
|
||||
+ " ssh | systemd | 3 | 6 | 12 | 24\n"
|
||||
+ " exim4 | pyinotify | 6 | 12 | 20 | 20\n"
|
||||
+ " jail-with-long-name | polling | 0 | 0 | 0 | 0\n"
|
||||
+ "---------------------------------------------------------"
|
||||
)
|
||||
response = self.b.beautify(response)
|
||||
self.assertEqual(response, output)
|
||||
|
||||
|
||||
def testFlushLogs(self):
|
||||
self.b.setInputCmd(["flushlogs"])
|
||||
self.assertEqual(self.b.beautify("rolled over"), "logs: rolled over")
|
||||
|
||||
def testSyslogSocket(self):
|
||||
self.b.setInputCmd(["get", "syslogsocket"])
|
||||
output = "Current syslog socket is:\n`- auto"
|
||||
self.assertEqual(self.b.beautify("auto"), output)
|
||||
|
||||
def testLogTarget(self):
|
||||
self.b.setInputCmd(["get", "logtarget"])
|
||||
output = "Current logging target is:\n`- /var/log/fail2ban.log"
|
||||
self.assertEqual(self.b.beautify("/var/log/fail2ban.log"), output)
|
||||
|
||||
def testLogLevel(self):
|
||||
self.b.setInputCmd(["get", "loglevel"])
|
||||
output = "Current logging level is 'INFO'"
|
||||
self.assertEqual(self.b.beautify("INFO"), output)
|
||||
|
||||
def testDbFile(self):
|
||||
self.b.setInputCmd(["get", "dbfile"])
|
||||
response = "/var/lib/fail2ban/fail2ban.sqlite3"
|
||||
output = "Current database file is:\n`- " + response
|
||||
self.assertEqual(self.b.beautify(response), output)
|
||||
self.assertEqual(self.b.beautify(None), "Database currently disabled")
|
||||
|
||||
def testDbPurgeAge(self):
|
||||
self.b.setInputCmd(["get", "dbpurgeage"])
|
||||
output = "Current database purge age is:\n`- 86400seconds"
|
||||
self.assertEqual(self.b.beautify(86400), output)
|
||||
self.assertEqual(self.b.beautify(None), "Database currently disabled")
|
||||
|
||||
def testLogPath(self):
|
||||
self.b.setInputCmd(["get", "sshd", "logpath"])
|
||||
response = []
|
||||
output = "No file is currently monitored"
|
||||
self.assertEqual(self.b.beautify(response), output)
|
||||
response = ["/var/log/auth.log"]
|
||||
output = "Current monitored log file(s):\n`- /var/log/auth.log"
|
||||
self.assertEqual(self.b.beautify(response), output)
|
||||
|
||||
self.b.setInputCmd(["set", "sshd", "addlogpath", "/var/log/messages"])
|
||||
response = ["/var/log/messages", "/var/log/auth.log"]
|
||||
outputadd = "Current monitored log file(s):\n"
|
||||
outputadd += "|- /var/log/messages\n`- /var/log/auth.log"
|
||||
self.assertEqual(self.b.beautify(response), outputadd)
|
||||
|
||||
self.b.setInputCmd(["set", "sshd", "dellogpath", "/var/log/messages"])
|
||||
response = ["/var/log/auth.log"]
|
||||
self.assertEqual(self.b.beautify(response), output)
|
||||
|
||||
def testLogEncoding(self):
|
||||
self.b.setInputCmd(["get", "sshd", "logencoding"])
|
||||
output = "Current log encoding is set to:\nUTF-8"
|
||||
self.assertEqual(self.b.beautify("UTF-8"), output)
|
||||
|
||||
def testJournalMatch(self):
|
||||
self.b.setInputCmd(["get", "sshd", "journalmatch"])
|
||||
self.assertEqual(self.b.beautify([]), "No journal match filter set")
|
||||
|
||||
self.b.setInputCmd(["set", "sshd", "addjournalmatch"])
|
||||
response = [["_SYSTEMD_UNIT", "sshd.service"]]
|
||||
output = "Current match filter:\n"
|
||||
output += "_SYSTEMD_UNIT sshd.service"
|
||||
self.assertEqual(self.b.beautify(response), output)
|
||||
|
||||
response.append(["_COMM", "sshd"])
|
||||
output += " + _COMM sshd"
|
||||
self.assertEqual(self.b.beautify(response), output)
|
||||
|
||||
self.b.setInputCmd(["set", "sshd", "deljournalmatch"])
|
||||
response.remove(response[1])
|
||||
self.assertEqual(self.b.beautify(response), output.split(" + ")[0])
|
||||
|
||||
def testDatePattern(self):
|
||||
self.b.setInputCmd(["get", "sshd", "datepattern"])
|
||||
output = "Current date pattern set to: "
|
||||
response = (None, "Default Detectors")
|
||||
self.assertEqual(self.b.beautify(None),
|
||||
output + "Not set/required")
|
||||
self.assertEqual(self.b.beautify(response),
|
||||
output + "Default Detectors")
|
||||
self.assertEqual(self.b.beautify(("test", "test")),
|
||||
output + "test (test)")
|
||||
|
||||
def testIgnoreIP(self):
|
||||
self.b.setInputCmd(["get", "sshd", "ignoreip"])
|
||||
output = "No IP address/network is ignored"
|
||||
self.assertEqual(self.b.beautify([]), output)
|
||||
|
||||
self.b.setInputCmd(["set", "sshd", "addignoreip"])
|
||||
response = [
|
||||
IPAddr("127.0.0.0", 8),
|
||||
IPAddr("::1"),
|
||||
IPAddr("2001:db8::", 32),
|
||||
IPAddr("::ffff:10.0.2.1")
|
||||
]
|
||||
output = "These IP addresses/networks are ignored:\n"
|
||||
output += "|- 127.0.0.0/8\n"
|
||||
output += "|- ::1\n"
|
||||
output += "|- 2001:db8::/32\n"
|
||||
output += "`- 10.0.2.1"
|
||||
self.assertEqual(self.b.beautify(response), output)
|
||||
|
||||
def testIgnoreIPFile(self):
|
||||
self.b.setInputCmd(["set", "sshd", "addignoreip"])
|
||||
response = [FileIPAddrSet("/test/file-ipaddr-set")]
|
||||
output = ("These IP addresses/networks are ignored:\n"
|
||||
"`- file://test/file-ipaddr-set")
|
||||
self.assertEqual(self.b.beautify(response), output)
|
||||
|
||||
def testFailRegex(self):
|
||||
self.b.setInputCmd(["get", "sshd", "failregex"])
|
||||
output = "No regular expression is defined"
|
||||
self.assertEqual(self.b.beautify([]), output)
|
||||
|
||||
output = "The following regular expression are defined:\n"
|
||||
output += "|- [0]: ^$\n`- [1]: .*"
|
||||
self.assertEqual(self.b.beautify(["^$", ".*"]), output)
|
||||
|
||||
def testActions(self):
|
||||
self.b.setInputCmd(["get", "sshd", "actions"])
|
||||
output = "No actions for jail sshd"
|
||||
self.assertEqual(self.b.beautify([]), output)
|
||||
|
||||
output = "The jail sshd has the following actions:\n"
|
||||
output += "iptables-multiport"
|
||||
self.assertEqual(self.b.beautify(["iptables-multiport"]), output)
|
||||
|
||||
def testActionProperties(self):
|
||||
self.b.setInputCmd(["get", "sshd", "actionproperties", "iptables"])
|
||||
output = "No properties for jail sshd action iptables"
|
||||
self.assertEqual(self.b.beautify([]), output)
|
||||
|
||||
output = "The jail sshd action iptables has the following properties:"
|
||||
output += "\nactionban, actionunban"
|
||||
response = ("actionban", "actionunban")
|
||||
self.assertEqual(self.b.beautify(response), output)
|
||||
|
||||
def testActionMethods(self):
|
||||
self.b.setInputCmd(["get", "sshd", "actionmethods", "iptables"])
|
||||
output = "No methods for jail sshd action iptables"
|
||||
self.assertEqual(self.b.beautify([]), output)
|
||||
|
||||
output = "The jail sshd action iptables has the following methods:\n"
|
||||
output += "ban, unban"
|
||||
self.assertEqual(self.b.beautify(["ban", "unban"]), output)
|
||||
|
||||
# def testException(self):
|
||||
# self.b.setInputCmd(["get", "sshd", "logpath"])
|
||||
# self.assertRaises(self.b.beautify(1), TypeError)
|
||||
|
||||
def testBeautifyError(self):
|
||||
response = UnknownJailException("sshd")
|
||||
output = "Sorry but the jail 'sshd' does not exist"
|
||||
self.assertEqual(self.b.beautifyError(response), output)
|
||||
|
||||
response = DuplicateJailException("sshd")
|
||||
output = "The jail 'sshd' already exists"
|
||||
self.assertEqual(self.b.beautifyError(response), output)
|
||||
|
||||
output = "Sorry but the command is invalid"
|
||||
self.assertEqual(self.b.beautifyError(IndexError()), output)
|
||||
1074
fail2ban-master/fail2ban/tests/clientreadertestcase.py
Normal file
1074
fail2ban-master/fail2ban/tests/clientreadertestcase.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,4 @@
|
||||
|
||||
[Definition]
|
||||
|
||||
actionban = echo "name: <actname>, ban: <ip>, logs: %(logpath)s"
|
||||
@@ -0,0 +1,4 @@
|
||||
|
||||
[Definition]
|
||||
|
||||
actionban = hit with big stick <ip>
|
||||
5
fail2ban-master/fail2ban/tests/config/fail2ban.conf
Normal file
5
fail2ban-master/fail2ban/tests/config/fail2ban.conf
Normal file
@@ -0,0 +1,5 @@
|
||||
[Definition]
|
||||
|
||||
# 3 = INFO
|
||||
loglevel = 3
|
||||
|
||||
@@ -0,0 +1,31 @@
|
||||
# Fail2Ban configuration file
|
||||
#
|
||||
|
||||
[INCLUDES]
|
||||
|
||||
# Read common prefixes (logtype is set in default section)
|
||||
before = ../../../../config/filter.d/common.conf
|
||||
|
||||
[Definition]
|
||||
|
||||
_daemon = test
|
||||
|
||||
failregex = ^<lt_<logtype>/__prefix_line> failure from <HOST>$
|
||||
ignoreregex =
|
||||
|
||||
# following sections define prefix line considering logtype:
|
||||
|
||||
# backend-related (retrieved from backend, overwrite default):
|
||||
[lt_file]
|
||||
__prefix_line = FILE
|
||||
|
||||
[lt_journal]
|
||||
__prefix_line = JRNL
|
||||
|
||||
# specified in definition section of filter (see filter checklogtype_test.conf):
|
||||
[lt_test]
|
||||
__prefix_line = TEST
|
||||
|
||||
# specified in init parameter of jail (see ../jail.conf, jail checklogtype_init):
|
||||
[lt_init]
|
||||
__prefix_line = INIT
|
||||
@@ -0,0 +1,12 @@
|
||||
# Fail2Ban configuration file
|
||||
#
|
||||
|
||||
[INCLUDES]
|
||||
|
||||
# Read common prefixes (logtype is set in default section)
|
||||
before = checklogtype.conf
|
||||
|
||||
[Definition]
|
||||
|
||||
# overwrite logtype in definition (no backend anymore):
|
||||
logtype = test
|
||||
@@ -0,0 +1,4 @@
|
||||
|
||||
[Definition]
|
||||
|
||||
failregex = <IP>
|
||||
13
fail2ban-master/fail2ban/tests/config/filter.d/test.conf
Normal file
13
fail2ban-master/fail2ban/tests/config/filter.d/test.conf
Normal file
@@ -0,0 +1,13 @@
|
||||
#[INCLUDES]
|
||||
#before = common.conf
|
||||
|
||||
[DEFAULT]
|
||||
_daemon = default
|
||||
|
||||
[Definition]
|
||||
where = conf
|
||||
failregex = failure <_daemon> <one> (filter.d/test.%(where)s) <HOST>
|
||||
|
||||
[Init]
|
||||
# test parameter, should be overridden in jail by "filter=test[one=1,...]"
|
||||
one = *1*
|
||||
16
fail2ban-master/fail2ban/tests/config/filter.d/test.local
Normal file
16
fail2ban-master/fail2ban/tests/config/filter.d/test.local
Normal file
@@ -0,0 +1,16 @@
|
||||
#[INCLUDES]
|
||||
#before = common.conf
|
||||
|
||||
[Definition]
|
||||
# overwrite default daemon, additionally it should be accessible in jail with "%(known/_daemon)s":
|
||||
_daemon = test
|
||||
# interpolate previous regex (from test.conf) + new 2nd + dynamical substitution) of "two" an "where":
|
||||
failregex = %(known/failregex)s
|
||||
failure %(_daemon)s <two> (filter.d/test.<where>) <HOST>
|
||||
# parameter "two" should be specified in jail by "filter=test[..., two=2]"
|
||||
|
||||
[Init]
|
||||
# this parameter can be used in jail with "%(known/three)s":
|
||||
three = 3
|
||||
# this parameter "where" does not overwrite "where" in definition of test.conf (dynamical values only):
|
||||
where = local
|
||||
@@ -0,0 +1,27 @@
|
||||
# Fail2Ban generic example resp. test filter
|
||||
#
|
||||
# Author: Serg G. Brester (sebres)
|
||||
#
|
||||
|
||||
[INCLUDES]
|
||||
|
||||
# Read common prefixes. If any customizations available -- read them from
|
||||
# common.local. common.conf is a symlink to the original common.conf and
|
||||
# should be copied (dereferenced) during installation
|
||||
before = ../../../../config/filter.d/common.conf
|
||||
|
||||
[Definition]
|
||||
|
||||
_daemon = test-demo
|
||||
|
||||
failregex = ^%(__prefix_line)sF2B: failure from <HOST>$
|
||||
^%(__prefix_line)sF2B: error from <HOST>$
|
||||
|
||||
# just to test multiple ignoreregex:
|
||||
ignoreregex = ^%(__prefix_line)sF2B: error from 192.0.2.251$
|
||||
^%(__prefix_line)sF2B: error from 192.0.2.252$
|
||||
|
||||
# specify only exact date patterns, +1 with %%Y to test usage of last known date by wrong dates like 0000-00-00...
|
||||
datepattern = {^LN-BEG}%%ExY(?P<_sep>[-/.])%%m(?P=_sep)%%d[T ]%%H:%%M:%%S(?:[.,]%%f)?(?:\s*%%z)?
|
||||
{^LN-BEG}(?:%%a )?%%b %%d %%H:%%M:%%S(?:\.%%f)?(?: %%ExY)?
|
||||
{^LN-BEG}%%Y(?P<_sep>[-/.])%%m(?P=_sep)%%d[T ]%%H:%%M:%%S(?:[.,]%%f)?(?:\s*%%z)?
|
||||
@@ -0,0 +1,102 @@
|
||||
# Fail2Ban obsolete multiline example resp. test filter (previously sshd.conf)
|
||||
#
|
||||
|
||||
[INCLUDES]
|
||||
|
||||
# Read common prefixes. If any customizations available -- read them from
|
||||
# common.local
|
||||
before = ../../../../config/filter.d/common.conf
|
||||
|
||||
[DEFAULT]
|
||||
|
||||
_daemon = sshd(?:-session)?
|
||||
|
||||
# optional prefix (logged from several ssh versions) like "error: ", "error: PAM: " or "fatal: "
|
||||
__pref = (?:(?:error|fatal): (?:PAM: )?)?
|
||||
# optional suffix (logged from several ssh versions) like " [preauth]"
|
||||
__suff = (?: (?:port \d+|on \S+|\[preauth\])){0,3}\s*
|
||||
__on_port_opt = (?: (?:port \d+|on \S+)){0,2}
|
||||
# close by authenticating user:
|
||||
__authng_user = (?: authenticating user <F-USER>\S+|.+?</F-USER>)?
|
||||
|
||||
# single line prefix:
|
||||
__prefix_line_sl = %(__prefix_line)s%(__pref)s
|
||||
# multi line prefixes (for first and second lines):
|
||||
__prefix_line_ml1 = (?P<__prefix>%(__prefix_line)s)%(__pref)s
|
||||
__prefix_line_ml2 = %(__suff)s$<SKIPLINES>^(?P=__prefix)%(__pref)s
|
||||
|
||||
# for all possible (also future) forms of "no matching (cipher|mac|MAC|compression method|key exchange method|host key type) found",
|
||||
# see ssherr.c for all possible SSH_ERR_..._ALG_MATCH errors.
|
||||
__alg_match = (?:(?:\w+ (?!found\b)){0,2}\w+)
|
||||
|
||||
# PAM authentication mechanism, can be overridden, e. g. `filter = sshd[__pam_auth='pam_ldap']`:
|
||||
__pam_auth = pam_[a-z]+
|
||||
|
||||
[Definition]
|
||||
|
||||
cmnfailre = ^%(__prefix_line_sl)s[aA]uthentication (?:failure|error|failed) for .* from <HOST>( via \S+)?\s*%(__suff)s$
|
||||
^%(__prefix_line_sl)sUser not known to the underlying authentication module for .* from <HOST>\s*%(__suff)s$
|
||||
^%(__prefix_line_sl)sFailed \S+ for invalid user <F-USER>(?P<cond_user>\S+)|(?:(?! from ).)*?</F-USER> from <HOST>%(__on_port_opt)s(?: ssh\d*)?(?(cond_user): |(?:(?:(?! from ).)*)$)
|
||||
^%(__prefix_line_sl)sFailed (?:<F-NOFAIL>publickey</F-NOFAIL>|\S+) for (?P<cond_inv>invalid user )?<F-USER>(?P<cond_user>\S+)|(?(cond_inv)(?:(?! from ).)*?|[^:]+)</F-USER> from <HOST>%(__on_port_opt)s(?: ssh\d*)?(?(cond_user): |(?:(?:(?! from ).)*)$)
|
||||
^%(__prefix_line_sl)sROOT LOGIN REFUSED FROM <HOST>
|
||||
^%(__prefix_line_sl)s[iI](?:llegal|nvalid) user .*? from <HOST>%(__suff)s$
|
||||
^%(__prefix_line_sl)sUser .+ from <HOST> not allowed because not listed in AllowUsers\s*%(__suff)s$
|
||||
^%(__prefix_line_sl)sUser .+ from <HOST> not allowed because listed in DenyUsers\s*%(__suff)s$
|
||||
^%(__prefix_line_sl)sUser .+ from <HOST> not allowed because not in any group\s*%(__suff)s$
|
||||
^%(__prefix_line_sl)srefused connect from \S+ \(<HOST>\)
|
||||
^%(__prefix_line_sl)sReceived disconnect from <HOST>%(__on_port_opt)s:\s*3: .*: Auth fail%(__suff)s$
|
||||
^%(__prefix_line_sl)sUser .+ from <HOST> not allowed because a group is listed in DenyGroups\s*%(__suff)s$
|
||||
^%(__prefix_line_sl)sUser .+ from <HOST> not allowed because none of user's groups are listed in AllowGroups\s*%(__suff)s$
|
||||
^%(__prefix_line_ml1)s%(__pam_auth)s\(sshd:auth\):\s+authentication failure;\s*logname=\S*\s*uid=\d*\s*euid=\d*\s*tty=\S*\s*ruser=\S*\s*rhost=<HOST>\s.*%(__suff)s$%(__prefix_line_ml2)sConnection closed
|
||||
^%(__prefix_line_sl)s(error: )?maximum authentication attempts exceeded for .* from <HOST>%(__on_port_opt)s(?: ssh\d*)? \[preauth\]$
|
||||
^%(__prefix_line_ml1)sUser .+ not allowed because account is locked%(__prefix_line_ml2)sReceived disconnect from <HOST>%(__on_port_opt)s:\s*11: .+%(__suff)s$
|
||||
^%(__prefix_line_ml1)sDisconnecting: Too many authentication failures(?: for .+?)?%(__suff)s%(__prefix_line_ml2)sConnection closed by%(__authng_user)s <HOST>%(__suff)s$
|
||||
^%(__prefix_line_ml1)sConnection from <HOST>%(__on_port_opt)s%(__prefix_line_ml2)sDisconnecting: Too many authentication failures(?: for .+?)?%(__suff)s$
|
||||
|
||||
mdre-normal =
|
||||
|
||||
mdre-ddos = ^%(__prefix_line_sl)sDid not receive identification string from <HOST>
|
||||
^%(__prefix_line_sl)sBad protocol version identification '.*' from <HOST>
|
||||
^%(__prefix_line_sl)sConnection (?:closed|reset) by%(__authng_user)s <HOST>%(__on_port_opt)s\s+\[preauth\]\s*$
|
||||
^%(__prefix_line_ml1)sSSH: Server;Ltype: (?:Authname|Version|Kex);Remote: <HOST>-\d+;[A-Z]\w+:.*%(__prefix_line_ml2)sRead from socket failed: Connection reset by peer%(__suff)s$
|
||||
|
||||
mdre-extra = ^%(__prefix_line_sl)sReceived disconnect from <HOST>%(__on_port_opt)s:\s*14: No(?: supported)? authentication methods available
|
||||
^%(__prefix_line_sl)sUnable to negotiate with <HOST>%(__on_port_opt)s: no matching <__alg_match> found.
|
||||
^%(__prefix_line_ml1)sConnection from <HOST>%(__on_port_opt)s%(__prefix_line_ml2)sUnable to negotiate a <__alg_match>
|
||||
^%(__prefix_line_ml1)sConnection from <HOST>%(__on_port_opt)s%(__prefix_line_ml2)sno matching <__alg_match> found:
|
||||
^%(__prefix_line_sl)sDisconnected(?: from)?(?: (?:invalid|authenticating)) user <F-USER>\S+</F-USER> <HOST>%(__on_port_opt)s \[preauth\]\s*$
|
||||
|
||||
mdre-aggressive = %(mdre-ddos)s
|
||||
%(mdre-extra)s
|
||||
|
||||
failregex = %(cmnfailre)s
|
||||
<mdre-<mode>>
|
||||
|
||||
# Parameter "mode": normal (default), ddos, extra or aggressive (combines all)
|
||||
# Usage example (for jail.local):
|
||||
# [sshd]
|
||||
# mode = extra
|
||||
# # or another jail (rewrite filter parameters of jail):
|
||||
# [sshd-aggressive]
|
||||
# filter = sshd[mode=aggressive]
|
||||
#
|
||||
mode = normal
|
||||
|
||||
ignoreregex =
|
||||
|
||||
# "maxlines" is number of log lines to buffer for multi-line regex searches
|
||||
maxlines = 10
|
||||
|
||||
journalmatch = _SYSTEMD_UNIT=sshd.service + _COMM=sshd
|
||||
|
||||
datepattern = {^LN-BEG}
|
||||
|
||||
# DEV Notes:
|
||||
#
|
||||
# "Failed \S+ for .*? from <HOST>..." failregex uses non-greedy catch-all because
|
||||
# it is coming before use of <HOST> which is not hard-anchored at the end as well,
|
||||
# and later catch-all's could contain user-provided input, which need to be greedily
|
||||
# matched away first.
|
||||
#
|
||||
# Author: Cyril Jaquier, Yaroslav Halchenko, Petr Voralek, Daniel Black
|
||||
|
||||
107
fail2ban-master/fail2ban/tests/config/jail.conf
Normal file
107
fail2ban-master/fail2ban/tests/config/jail.conf
Normal file
@@ -0,0 +1,107 @@
|
||||
|
||||
[DEFAULT]
|
||||
filter = simple
|
||||
logpath = /non/exist
|
||||
|
||||
[emptyaction]
|
||||
enabled = true
|
||||
filter =
|
||||
action =
|
||||
|
||||
[special]
|
||||
failregex = <IP>
|
||||
ignoreregex =
|
||||
ignoreip =
|
||||
|
||||
[test-known-interp]
|
||||
enabled = true
|
||||
filter = test[one=1,two=2]
|
||||
failregex = %(known/failregex)s
|
||||
failure %(known/_daemon)s %(known/three)s (jail.local) <HOST>
|
||||
|
||||
[missinglogfiles]
|
||||
enabled = true
|
||||
journalmatch = _COMM=test ;# allow to switch to systemd (by backend = `auto` and no logs found)
|
||||
logpath = /weapons/of/mass/destruction
|
||||
|
||||
[missinglogfiles_skip]
|
||||
enabled = true
|
||||
skip_if_nologs = true
|
||||
logpath = /weapons/of/mass/destruction
|
||||
|
||||
[brokenactiondef]
|
||||
enabled = true
|
||||
action = joho[foo
|
||||
|
||||
[brokenfilterdef]
|
||||
enabled = true
|
||||
filter = flt[test
|
||||
|
||||
[brokenaction]
|
||||
enabled = true
|
||||
action = brokenaction
|
||||
|
||||
[missingaction]
|
||||
enabled = true
|
||||
action = noactionfileforthisaction
|
||||
|
||||
[missingbitsjail]
|
||||
enabled = true
|
||||
filter = catchallthebadies
|
||||
action = thefunkychickendance
|
||||
|
||||
[parse_to_end_of_jail.conf]
|
||||
enabled = true
|
||||
action =
|
||||
|
||||
[tz_correct]
|
||||
enabled = true
|
||||
logtimezone = UTC+0200
|
||||
|
||||
[multi-log]
|
||||
enabled = false
|
||||
filter =
|
||||
logpath = a.log
|
||||
b.log
|
||||
c.log
|
||||
log2nd = %(logpath)s
|
||||
d.log
|
||||
action = action[actname='ban']
|
||||
action[actname='log', logpath="%(log2nd)s"]
|
||||
action[actname='test']
|
||||
|
||||
[sshd-override-flt-opts]
|
||||
filter = zzz-sshd-obsolete-multiline[logtype=short]
|
||||
backend = systemd
|
||||
prefregex = ^Test
|
||||
failregex = ^Test unused <ADDR>$
|
||||
ignoreregex = ^Test ignore <ADDR>$
|
||||
journalmatch = _COMM=test
|
||||
maxlines = 2
|
||||
usedns = no
|
||||
enabled = false
|
||||
|
||||
[checklogtype_jrnl]
|
||||
filter = checklogtype
|
||||
backend = systemd
|
||||
action = action
|
||||
enabled = false
|
||||
|
||||
[checklogtype_file]
|
||||
filter = checklogtype
|
||||
backend = polling
|
||||
logpath = README.md
|
||||
action = action
|
||||
enabled = false
|
||||
|
||||
[checklogtype_test]
|
||||
filter = checklogtype_test
|
||||
backend = systemd
|
||||
action = action
|
||||
enabled = false
|
||||
|
||||
[checklogtype_init]
|
||||
filter = checklogtype_test[logtype=init]
|
||||
backend = systemd
|
||||
action = action
|
||||
enabled = false
|
||||
632
fail2ban-master/fail2ban/tests/databasetestcase.py
Normal file
632
fail2ban-master/fail2ban/tests/databasetestcase.py
Normal file
@@ -0,0 +1,632 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Fail2Ban developers
|
||||
|
||||
__copyright__ = "Copyright (c) 2013 Steven Hiscocks"
|
||||
__license__ = "GPL"
|
||||
|
||||
import os
|
||||
import sys
|
||||
import unittest
|
||||
import tempfile
|
||||
import sqlite3
|
||||
import shutil
|
||||
|
||||
from ..server.filter import FileContainer, Filter
|
||||
from ..server.mytime import MyTime
|
||||
from ..server.ticket import FailTicket
|
||||
from ..server.actions import Actions, Utils
|
||||
from .dummyjail import DummyJail
|
||||
try:
|
||||
from ..server import database
|
||||
Fail2BanDb = database.Fail2BanDb
|
||||
except ImportError: # pragma: no cover
|
||||
Fail2BanDb = None
|
||||
from .utils import LogCaptureTestCase, logSys as DefLogSys, uni_decode
|
||||
|
||||
TEST_FILES_DIR = os.path.join(os.path.dirname(__file__), "files")
|
||||
|
||||
|
||||
# because of tests performance use memory instead of file:
|
||||
def getFail2BanDb(filename):
|
||||
if unittest.F2B.memory_db: # pragma: no cover
|
||||
return Fail2BanDb(':memory:')
|
||||
return Fail2BanDb(filename)
|
||||
|
||||
|
||||
class DatabaseTest(LogCaptureTestCase):
|
||||
|
||||
def setUp(self):
|
||||
"""Call before every test case."""
|
||||
super(DatabaseTest, self).setUp()
|
||||
if Fail2BanDb is None: # pragma: no cover
|
||||
raise unittest.SkipTest(
|
||||
"Unable to import fail2ban database module as sqlite is not "
|
||||
"available.")
|
||||
self.dbFilename = None
|
||||
if not unittest.F2B.memory_db:
|
||||
_, self.dbFilename = tempfile.mkstemp(".db", "fail2ban_")
|
||||
self._db = ':auto-create-in-memory:'
|
||||
|
||||
@property
|
||||
def db(self):
|
||||
if isinstance(self._db, str) and self._db == ':auto-create-in-memory:':
|
||||
self._db = getFail2BanDb(self.dbFilename)
|
||||
return self._db
|
||||
@db.setter
|
||||
def db(self, value):
|
||||
if isinstance(self._db, Fail2BanDb): # pragma: no cover
|
||||
self._db.close()
|
||||
self._db = value
|
||||
|
||||
def tearDown(self):
|
||||
"""Call after every test case."""
|
||||
super(DatabaseTest, self).tearDown()
|
||||
if Fail2BanDb is None: # pragma: no cover
|
||||
return
|
||||
# Cleanup
|
||||
if self.dbFilename is not None:
|
||||
os.remove(self.dbFilename)
|
||||
|
||||
def testGetFilename(self):
|
||||
if self.db.filename == ':memory:': # pragma: no cover
|
||||
raise unittest.SkipTest("in :memory: database")
|
||||
self.assertEqual(self.dbFilename, self.db.filename)
|
||||
|
||||
def testPurgeAge(self):
|
||||
self.assertEqual(self.db.purgeage, 86400)
|
||||
self.db.purgeage = '1y6mon15d5h30m'
|
||||
self.assertEqual(self.db.purgeage, 48652200)
|
||||
self.db.purgeage = '2y 12mon 30d 10h 60m'
|
||||
self.assertEqual(self.db.purgeage, 48652200*2)
|
||||
|
||||
def testCreateInvalidPath(self):
|
||||
self.assertRaises(
|
||||
sqlite3.OperationalError,
|
||||
Fail2BanDb,
|
||||
"/this/path/should/not/exist")
|
||||
|
||||
def testCreateAndReconnect(self):
|
||||
if self.db.filename == ':memory:': # pragma: no cover
|
||||
raise unittest.SkipTest("in :memory: database")
|
||||
self.testAddJail()
|
||||
# Reconnect...
|
||||
self.db = Fail2BanDb(self.dbFilename)
|
||||
# and check jail of same name still present
|
||||
self.assertTrue(
|
||||
self.jail.name in self.db.getJailNames(),
|
||||
"Jail not retained in Db after disconnect reconnect.")
|
||||
|
||||
@staticmethod
|
||||
def _mockupFailedDB(): # pragma: no cover -- only sqlite >= 3.42
|
||||
"""[Mock-Up] broken connect to cover reparable restore."""
|
||||
_org_connect = sqlite3.connect;
|
||||
class _mckp_Cursor(sqlite3.Cursor):
|
||||
def execute(*args, **kwargs):
|
||||
# intended BOOM (simulate broken database):
|
||||
raise sqlite3.Error("[mock-up] broken database");
|
||||
class _mckp_Connection(sqlite3.Connection):
|
||||
def cursor(*args, **kwargs):
|
||||
return _mckp_Cursor(*args, **kwargs)
|
||||
def _mckp_connect(*args, **kwargs):
|
||||
DefLogSys.debug("[mock-up] broken connect to cover reparable restore")
|
||||
# restore original connect immediately:
|
||||
sqlite3.connect = _org_connect
|
||||
# return mockup connect (caused BOOM during first cursor execute):
|
||||
return _mckp_Connection(*args, **kwargs);
|
||||
sqlite3.connect = _mckp_connect;
|
||||
|
||||
def testRepairDb(self):
|
||||
ret = Utils.executeCmd("sqlite3 --version", output=True)
|
||||
if not ret or not ret[0]: # pragma: no cover
|
||||
raise unittest.SkipTest("no sqlite3 command")
|
||||
# version:
|
||||
ret = uni_decode(ret[1]).split(' ')
|
||||
ret = tuple(map(int, (str(ret[0]).split('.'))))if ret else (3,0,0);
|
||||
self.db = None
|
||||
if self.dbFilename is None: # pragma: no cover
|
||||
_, self.dbFilename = tempfile.mkstemp(".db", "fail2ban_")
|
||||
# test truncated database with different sizes:
|
||||
# - 14000 bytes - seems to be reparable,
|
||||
# - 4000 bytes - is totally broken.
|
||||
for truncSize in (14000, 4000):
|
||||
if truncSize >= 14000 and ret > (3,42): # pragma: no cover -- only sqlite >= 3.42
|
||||
truncSize = 14400
|
||||
self._mockupFailedDB(); # mock-up it to ensure it fails by open
|
||||
self.pruneLog("[test-repair], next phase - file-size: %d" % truncSize)
|
||||
shutil.copyfile(
|
||||
os.path.join(TEST_FILES_DIR, 'database_v1.db'), self.dbFilename)
|
||||
# produce corrupt database:
|
||||
f = os.open(self.dbFilename, os.O_RDWR)
|
||||
os.ftruncate(f, truncSize)
|
||||
os.close(f)
|
||||
# test repair:
|
||||
try:
|
||||
self.db = Fail2BanDb(self.dbFilename)
|
||||
if truncSize >= 14000: # restored:
|
||||
self.assertLogged("Repair seems to be successful",
|
||||
"Check integrity", "Database updated", all=True)
|
||||
self.assertEqual(self.db.getLogPaths(), set(['/tmp/Fail2BanDb_pUlZJh.log']))
|
||||
self.assertEqual(len(self.db.getJailNames()), 1)
|
||||
else: # recreated:
|
||||
self.assertLogged("Repair seems to be failed",
|
||||
"Check integrity", "New database created.", all=True)
|
||||
self.assertEqual(len(self.db.getLogPaths()), 0)
|
||||
self.assertEqual(len(self.db.getJailNames()), 0)
|
||||
finally:
|
||||
if self.db and self.db._dbFilename != ":memory:":
|
||||
os.remove(self.db._dbBackupFilename)
|
||||
self.db = None
|
||||
|
||||
def testUpdateDb(self):
|
||||
self.db = None
|
||||
try:
|
||||
if self.dbFilename is None: # pragma: no cover
|
||||
_, self.dbFilename = tempfile.mkstemp(".db", "fail2ban_")
|
||||
shutil.copyfile(
|
||||
os.path.join(TEST_FILES_DIR, 'database_v1.db'), self.dbFilename)
|
||||
self.db = Fail2BanDb(self.dbFilename)
|
||||
self.assertEqual(self.db.getJailNames(), set(['DummyJail #29162448 with 0 tickets']))
|
||||
self.assertEqual(self.db.getLogPaths(), set(['/tmp/Fail2BanDb_pUlZJh.log']))
|
||||
ticket = FailTicket("127.0.0.1", 1388009242.26, ["abc\n"])
|
||||
self.assertEqual(self.db.getBans()[0], ticket)
|
||||
|
||||
self.assertEqual(self.db.updateDb(Fail2BanDb.__version__), Fail2BanDb.__version__)
|
||||
self.assertRaises(NotImplementedError, self.db.updateDb, Fail2BanDb.__version__ + 1)
|
||||
# check current bans (should find exactly 1 ticket after upgrade):
|
||||
tickets = self.db.getCurrentBans(fromtime=1388009242, correctBanTime=123456)
|
||||
self.assertEqual(len(tickets), 1)
|
||||
self.assertEqual(tickets[0].getBanTime(), 123456); # ban-time was unknown (normally updated from jail)
|
||||
finally:
|
||||
if self.db and self.db._dbFilename != ":memory:":
|
||||
os.remove(self.db._dbBackupFilename)
|
||||
|
||||
def testUpdateDb2(self):
|
||||
self.db = None
|
||||
if self.dbFilename is None: # pragma: no cover
|
||||
_, self.dbFilename = tempfile.mkstemp(".db", "fail2ban_")
|
||||
shutil.copyfile(
|
||||
os.path.join(TEST_FILES_DIR, 'database_v2.db'), self.dbFilename)
|
||||
self.db = Fail2BanDb(self.dbFilename)
|
||||
self.assertEqual(self.db.getJailNames(), set(['pam-generic']))
|
||||
self.assertEqual(self.db.getLogPaths(), set(['/var/log/auth.log']))
|
||||
bans = self.db.getBans()
|
||||
self.assertEqual(len(bans), 2)
|
||||
# compare first ticket completely:
|
||||
ticket = FailTicket("1.2.3.7", 1417595494, [
|
||||
'Dec 3 09:31:08 f2btest test:auth[27658]: pam_unix(test:auth): authentication failure; logname= uid=0 euid=0 tty=test ruser= rhost=1.2.3.7',
|
||||
'Dec 3 09:31:32 f2btest test:auth[27671]: pam_unix(test:auth): authentication failure; logname= uid=0 euid=0 tty=test ruser= rhost=1.2.3.7',
|
||||
'Dec 3 09:31:34 f2btest test:auth[27673]: pam_unix(test:auth): authentication failure; logname= uid=0 euid=0 tty=test ruser= rhost=1.2.3.7'
|
||||
])
|
||||
ticket.setAttempt(3)
|
||||
self.assertEqual(bans[0], ticket)
|
||||
# second ban found also:
|
||||
self.assertEqual(bans[1].getID(), "1.2.3.8")
|
||||
# updated ?
|
||||
self.assertEqual(self.db.updateDb(Fail2BanDb.__version__), Fail2BanDb.__version__)
|
||||
# check current bans (should find 2 tickets after upgrade):
|
||||
self.jail = DummyJail(name='pam-generic')
|
||||
tickets = self.db.getCurrentBans(jail=self.jail, fromtime=1417595494)
|
||||
self.assertEqual(len(tickets), 2)
|
||||
self.assertEqual(tickets[0].getBanTime(), 600)
|
||||
# further update should fail:
|
||||
self.assertRaises(NotImplementedError, self.db.updateDb, Fail2BanDb.__version__ + 1)
|
||||
# clean:
|
||||
os.remove(self.db._dbBackupFilename)
|
||||
|
||||
def testAddJail(self):
|
||||
self.jail = DummyJail()
|
||||
self.db.addJail(self.jail)
|
||||
self.assertTrue(
|
||||
self.jail.name in self.db.getJailNames(True),
|
||||
"Jail not added to database")
|
||||
|
||||
def _testAddLog(self):
|
||||
self.testAddJail() # Jail required
|
||||
|
||||
_, filename = tempfile.mkstemp(".log", "Fail2BanDb_")
|
||||
self.fileContainer = FileContainer(filename, "utf-8")
|
||||
|
||||
pos = self.db.addLog(self.jail, self.fileContainer)
|
||||
self.assertTrue(pos is None); # unknown previously
|
||||
|
||||
self.assertIn(filename, self.db.getLogPaths(self.jail))
|
||||
os.remove(filename)
|
||||
|
||||
def testUpdateLog(self):
|
||||
self._testAddLog() # Add log file
|
||||
|
||||
# Write some text
|
||||
filename = self.fileContainer.getFileName()
|
||||
file_ = open(filename, "w")
|
||||
file_.write("Some text to write which will change md5sum\n")
|
||||
file_.close()
|
||||
self.fileContainer.open()
|
||||
self.fileContainer.readline()
|
||||
self.fileContainer.close()
|
||||
|
||||
# Capture position which should be after line just written
|
||||
lastPos = self.fileContainer.getPos()
|
||||
self.assertTrue(lastPos > 0)
|
||||
self.db.updateLog(self.jail, self.fileContainer)
|
||||
|
||||
# New FileContainer for file
|
||||
self.fileContainer = FileContainer(filename, "utf-8")
|
||||
self.assertEqual(self.fileContainer.getPos(), 0)
|
||||
|
||||
# Database should return previous position in file
|
||||
self.assertEqual(
|
||||
self.db.addLog(self.jail, self.fileContainer), lastPos)
|
||||
|
||||
# Change md5sum
|
||||
file_ = open(filename, "w") # Truncate
|
||||
file_.write("Some different text to change md5sum\n")
|
||||
file_.close()
|
||||
|
||||
self.fileContainer = FileContainer(filename, "utf-8")
|
||||
self.assertEqual(self.fileContainer.getPos(), 0)
|
||||
|
||||
# Database should be aware of md5sum change, such doesn't return
|
||||
# last position in file
|
||||
self.assertEqual(
|
||||
self.db.addLog(self.jail, self.fileContainer), None)
|
||||
os.remove(filename)
|
||||
|
||||
def testUpdateJournal(self):
|
||||
self.testAddJail() # Jail required
|
||||
# not yet updated:
|
||||
self.assertEqual(self.db.getJournalPos(self.jail, 'systemd-journal'), None)
|
||||
# update 3 times (insert and 2 updates) and check it was set (and overwritten):
|
||||
for t in (1500000000, 1500000001, 1500000002):
|
||||
self.db.updateJournal(self.jail, 'systemd-journal', t, 'TEST'+str(t))
|
||||
self.assertEqual(self.db.getJournalPos(self.jail, 'systemd-journal'), t)
|
||||
|
||||
def testAddBan(self):
|
||||
self.testAddJail()
|
||||
ticket = FailTicket("127.0.0.1", 0, ["abc\n"])
|
||||
self.db.addBan(self.jail, ticket)
|
||||
|
||||
tickets = self.db.getBans(jail=self.jail)
|
||||
self.assertEqual(len(tickets), 1)
|
||||
self.assertTrue(
|
||||
isinstance(tickets[0], FailTicket))
|
||||
|
||||
def testAddBanInvalidEncoded(self):
|
||||
self.testAddJail()
|
||||
# invalid + valid, invalid + valid unicode, invalid + valid dual converted (like in filter:readline by fallback) ...
|
||||
tickets = [
|
||||
FailTicket("127.0.0.1", 0, ['user "test"', 'user "\xd1\xe2\xe5\xf2\xe0"', 'user "\xc3\xa4\xc3\xb6\xc3\xbc\xc3\x9f"']),
|
||||
FailTicket("127.0.0.2", 0, ['user "test"', 'user "\xd1\xe2\xe5\xf2\xe0"', 'user "\xc3\xa4\xc3\xb6\xc3\xbc\xc3\x9f"']),
|
||||
FailTicket("127.0.0.3", 0, ['user "test"', b'user "\xd1\xe2\xe5\xf2\xe0"', b'user "\xc3\xa4\xc3\xb6\xc3\xbc\xc3\x9f"']),
|
||||
FailTicket("127.0.0.4", 0, ['user "test"', 'user "\xd1\xe2\xe5\xf2\xe0"', 'user "\xe4\xf6\xfc\xdf"']),
|
||||
FailTicket("127.0.0.5", 0, ['user "test"', 'unterminated \xcf']),
|
||||
FailTicket("127.0.0.6", 0, ['user "test"', 'unterminated \xcf']),
|
||||
FailTicket("127.0.0.7", 0, ['user "test"', b'unterminated \xcf'])
|
||||
]
|
||||
for ticket in tickets:
|
||||
self.db.addBan(self.jail, ticket)
|
||||
|
||||
self.assertNotLogged("json dumps failed")
|
||||
|
||||
readtickets = self.db.getBans(jail=self.jail)
|
||||
|
||||
self.assertNotLogged("json loads failed")
|
||||
|
||||
## all tickets available
|
||||
self.assertEqual(len(readtickets), 7)
|
||||
|
||||
## too different to cover all possible constellations for python 2 and 3,
|
||||
## can replace/ignore some non-ascii chars by json dump/load (unicode/str),
|
||||
## so check ip and matches count only:
|
||||
for i, ticket in enumerate(tickets):
|
||||
DefLogSys.debug('readtickets[%d]: %r', i, readtickets[i].getData())
|
||||
DefLogSys.debug(' == tickets[%d]: %r', i, ticket.getData())
|
||||
self.assertEqual(readtickets[i].getID(), ticket.getID())
|
||||
self.assertEqual(len(readtickets[i].getMatches()), len(ticket.getMatches()))
|
||||
|
||||
self.pruneLog('[test-phase 2] simulate errors')
|
||||
## simulate errors in dumps/loads:
|
||||
priorEnc = database.PREFER_ENC
|
||||
try:
|
||||
database.PREFER_ENC = 'f2b-test::non-existing-encoding'
|
||||
|
||||
for ticket in tickets:
|
||||
self.db.addBan(self.jail, ticket)
|
||||
|
||||
self.assertLogged("json dumps failed")
|
||||
|
||||
readtickets = self.db.getBans(jail=self.jail)
|
||||
|
||||
self.assertLogged("json loads failed")
|
||||
|
||||
## despite errors all tickets written and loaded (check adapter-handlers are error-safe):
|
||||
self.assertEqual(len(readtickets), 14)
|
||||
finally:
|
||||
database.PREFER_ENC = priorEnc
|
||||
|
||||
## check the database is still operable (not locked) after all the errors:
|
||||
self.pruneLog('[test-phase 3] still operable?')
|
||||
self.db.addBan(self.jail, FailTicket("127.0.0.8"))
|
||||
readtickets = self.db.getBans(jail=self.jail)
|
||||
self.assertEqual(len(readtickets), 15)
|
||||
self.assertNotLogged("json loads failed", "json dumps failed")
|
||||
|
||||
def _testAdd3Bans(self):
|
||||
self.testAddJail()
|
||||
for i in (1, 2, 3):
|
||||
ticket = FailTicket(("192.0.2.%d" % i), 0, ["test\n"])
|
||||
self.db.addBan(self.jail, ticket)
|
||||
tickets = self.db.getBans(jail=self.jail)
|
||||
self.assertEqual(len(tickets), 3)
|
||||
return tickets
|
||||
|
||||
def testDelBan(self):
|
||||
tickets = self._testAdd3Bans()
|
||||
# delete single IP:
|
||||
self.db.delBan(self.jail, tickets[0].getID())
|
||||
self.assertEqual(len(self.db.getBans(jail=self.jail)), 2)
|
||||
# delete two IPs:
|
||||
self.db.delBan(self.jail, tickets[1].getID(), tickets[2].getID())
|
||||
self.assertEqual(len(self.db.getBans(jail=self.jail)), 0)
|
||||
|
||||
def testFlushBans(self):
|
||||
self._testAdd3Bans()
|
||||
# flush all bans:
|
||||
self.db.delBan(self.jail)
|
||||
self.assertEqual(len(self.db.getBans(jail=self.jail)), 0)
|
||||
|
||||
def testGetBansWithTime(self):
|
||||
self.testAddJail()
|
||||
self.db.addBan(
|
||||
self.jail, FailTicket("127.0.0.1", MyTime.time() - 60, ["abc\n"]))
|
||||
self.db.addBan(
|
||||
self.jail, FailTicket("127.0.0.1", MyTime.time() - 40, ["abc\n"]))
|
||||
self.assertEqual(len(self.db.getBans(jail=self.jail,bantime=50)), 1)
|
||||
self.assertEqual(len(self.db.getBans(jail=self.jail,bantime=20)), 0)
|
||||
# Negative values are for persistent bans, and such all bans should
|
||||
# be returned
|
||||
self.assertEqual(len(self.db.getBans(jail=self.jail,bantime=-1)), 2)
|
||||
|
||||
def testGetBansMerged_MaxMatches(self):
|
||||
self.testAddJail()
|
||||
maxMatches = 2
|
||||
failures = [
|
||||
{"matches": ["abc\n"], "user": set(['test'])},
|
||||
{"matches": ["123\n"], "user": set(['test'])},
|
||||
{"matches": ["ABC\n"], "user": set(['test', 'root'])},
|
||||
{"matches": ["1234\n"], "user": set(['test', 'root'])},
|
||||
]
|
||||
matches2find = [f["matches"][0] for f in failures]
|
||||
# add failures sequential:
|
||||
i = 80
|
||||
for f in failures:
|
||||
i -= 10
|
||||
ticket = FailTicket("127.0.0.1", MyTime.time() - i, data=f)
|
||||
ticket.setAttempt(1)
|
||||
self.db.addBan(self.jail, ticket)
|
||||
# should retrieve 2 matches only, but count of all attempts:
|
||||
self.db.maxMatches = maxMatches;
|
||||
ticket = self.db.getBansMerged("127.0.0.1")
|
||||
self.assertEqual(ticket.getID(), "127.0.0.1")
|
||||
self.assertEqual(ticket.getAttempt(), len(failures))
|
||||
self.assertEqual(len(ticket.getMatches()), maxMatches)
|
||||
self.assertEqual(ticket.getMatches(), matches2find[-maxMatches:])
|
||||
# add more failures at once:
|
||||
ticket = FailTicket("127.0.0.1", MyTime.time() - 10, matches2find,
|
||||
data={"user": set(['test', 'root'])})
|
||||
ticket.setAttempt(len(failures))
|
||||
self.db.addBan(self.jail, ticket)
|
||||
# should retrieve 2 matches only, but count of all attempts:
|
||||
ticket = self.db.getBansMerged("127.0.0.1")
|
||||
self.assertEqual(ticket.getAttempt(), 2 * len(failures))
|
||||
self.assertEqual(len(ticket.getMatches()), maxMatches)
|
||||
self.assertEqual(ticket.getMatches(), matches2find[-maxMatches:])
|
||||
# also using getCurrentBans:
|
||||
ticket = self.db.getCurrentBans(self.jail, "127.0.0.1", fromtime=MyTime.time()-100)
|
||||
self.assertTrue(ticket is not None)
|
||||
self.assertEqual(ticket.getAttempt(), len(failures))
|
||||
self.assertEqual(len(ticket.getMatches()), maxMatches)
|
||||
self.assertEqual(ticket.getMatches(), matches2find[-maxMatches:])
|
||||
# maxmatches of jail < dbmaxmatches (so read 1 match and 0 matches):
|
||||
ticket = self.db.getCurrentBans(self.jail, "127.0.0.1", fromtime=MyTime.time()-100,
|
||||
maxmatches=1)
|
||||
self.assertEqual(len(ticket.getMatches()), 1)
|
||||
self.assertEqual(ticket.getMatches(), failures[3]['matches'])
|
||||
ticket = self.db.getCurrentBans(self.jail, "127.0.0.1", fromtime=MyTime.time()-100,
|
||||
maxmatches=0)
|
||||
self.assertEqual(len(ticket.getMatches()), 0)
|
||||
# dbmaxmatches = 0, should retrieve 0 matches by last ban:
|
||||
ticket.setMatches(["1","2","3"])
|
||||
self.db.maxMatches = 0;
|
||||
self.db.addBan(self.jail, ticket)
|
||||
ticket = self.db.getCurrentBans(self.jail, "127.0.0.1", fromtime=MyTime.time()-100)
|
||||
self.assertTrue(ticket is not None)
|
||||
self.assertEqual(ticket.getAttempt(), len(failures))
|
||||
self.assertEqual(len(ticket.getMatches()), 0)
|
||||
|
||||
def testGetBansMerged(self):
|
||||
self.testAddJail()
|
||||
|
||||
jail2 = DummyJail(name='DummyJail-2')
|
||||
self.db.addJail(jail2)
|
||||
|
||||
ticket = FailTicket("127.0.0.1", MyTime.time() - 40, ["abc\n"])
|
||||
ticket.setAttempt(10)
|
||||
self.db.addBan(self.jail, ticket)
|
||||
ticket = FailTicket("127.0.0.1", MyTime.time() - 30, ["123\n"])
|
||||
ticket.setAttempt(20)
|
||||
self.db.addBan(self.jail, ticket)
|
||||
ticket = FailTicket("127.0.0.2", MyTime.time() - 20, ["ABC\n"])
|
||||
ticket.setAttempt(30)
|
||||
self.db.addBan(self.jail, ticket)
|
||||
ticket = FailTicket("127.0.0.1", MyTime.time() - 10, ["ABC\n"])
|
||||
ticket.setAttempt(40)
|
||||
self.db.addBan(jail2, ticket)
|
||||
|
||||
# All for IP 127.0.0.1
|
||||
ticket = self.db.getBansMerged("127.0.0.1")
|
||||
self.assertEqual(ticket.getID(), "127.0.0.1")
|
||||
self.assertEqual(ticket.getAttempt(), 70)
|
||||
self.assertEqual(ticket.getMatches(), ["abc\n", "123\n", "ABC\n"])
|
||||
|
||||
# All for IP 127.0.0.1 for single jail
|
||||
ticket = self.db.getBansMerged("127.0.0.1", jail=self.jail)
|
||||
self.assertEqual(ticket.getID(), "127.0.0.1")
|
||||
self.assertEqual(ticket.getAttempt(), 30)
|
||||
self.assertEqual(ticket.getMatches(), ["abc\n", "123\n"])
|
||||
|
||||
# Should cache result if no extra bans added
|
||||
self.assertEqual(
|
||||
id(ticket),
|
||||
id(self.db.getBansMerged("127.0.0.1", jail=self.jail)))
|
||||
|
||||
newTicket = FailTicket("127.0.0.2", MyTime.time() - 20, ["ABC\n"])
|
||||
ticket.setAttempt(40)
|
||||
# Add ticket, but not for same IP, so cache still valid
|
||||
self.db.addBan(self.jail, newTicket)
|
||||
self.assertEqual(
|
||||
id(ticket),
|
||||
id(self.db.getBansMerged("127.0.0.1", jail=self.jail)))
|
||||
|
||||
newTicket = FailTicket("127.0.0.1", MyTime.time() - 10, ["ABC\n"])
|
||||
ticket.setAttempt(40)
|
||||
self.db.addBan(self.jail, newTicket)
|
||||
# Added ticket, so cache should have been cleared
|
||||
self.assertNotEqual(
|
||||
id(ticket),
|
||||
id(self.db.getBansMerged("127.0.0.1", jail=self.jail)))
|
||||
|
||||
tickets = self.db.getBansMerged()
|
||||
self.assertEqual(len(tickets), 2)
|
||||
self.assertSortedEqual(
|
||||
list(set(ticket.getID() for ticket in tickets)),
|
||||
[ticket.getID() for ticket in tickets])
|
||||
|
||||
tickets = self.db.getBansMerged(jail=jail2)
|
||||
self.assertEqual(len(tickets), 1)
|
||||
|
||||
tickets = self.db.getBansMerged(bantime=25)
|
||||
self.assertEqual(len(tickets), 2)
|
||||
tickets = self.db.getBansMerged(bantime=15)
|
||||
self.assertEqual(len(tickets), 1)
|
||||
tickets = self.db.getBansMerged(bantime=5)
|
||||
self.assertEqual(len(tickets), 0)
|
||||
# Negative values are for persistent bans, and such all bans should
|
||||
# be returned
|
||||
tickets = self.db.getBansMerged(bantime=-1)
|
||||
self.assertEqual(len(tickets), 2)
|
||||
# getCurrentBans:
|
||||
tickets = self.db.getCurrentBans(jail=self.jail)
|
||||
self.assertEqual(len(tickets), 2)
|
||||
ticket = self.db.getCurrentBans(jail=None, ip="127.0.0.1");
|
||||
self.assertEqual(ticket.getID(), "127.0.0.1")
|
||||
|
||||
# positive case (1 ticket not yet expired):
|
||||
tickets = self.db.getCurrentBans(jail=self.jail, forbantime=15,
|
||||
fromtime=MyTime.time())
|
||||
self.assertEqual(len(tickets), 1)
|
||||
# negative case (all are expired in 1year):
|
||||
tickets = self.db.getCurrentBans(jail=self.jail, forbantime=15,
|
||||
fromtime=MyTime.time() + MyTime.str2seconds("1year"))
|
||||
self.assertEqual(len(tickets), 0)
|
||||
# persistent bantime (-1), so never expired (but no persistent tickets):
|
||||
tickets = self.db.getCurrentBans(jail=self.jail, forbantime=-1,
|
||||
fromtime=MyTime.time() + MyTime.str2seconds("1year"))
|
||||
self.assertEqual(len(tickets), 0)
|
||||
# add persistent one:
|
||||
ticket.setBanTime(-1)
|
||||
self.db.addBan(self.jail, ticket)
|
||||
# persistent bantime (-1), so never expired (but jail has other max bantime now):
|
||||
tickets = self.db.getCurrentBans(jail=self.jail, forbantime=-1,
|
||||
fromtime=MyTime.time() + MyTime.str2seconds("1year"))
|
||||
# no tickets should be found (max ban time = 600):
|
||||
self.assertEqual(len(tickets), 0)
|
||||
self.assertLogged("ignore ticket (with new max ban-time %r)" % self.jail.getMaxBanTime())
|
||||
# change jail to persistent ban and try again (1 persistent ticket):
|
||||
self.jail.actions.setBanTime(-1)
|
||||
tickets = self.db.getCurrentBans(jail=self.jail, forbantime=-1,
|
||||
fromtime=MyTime.time() + MyTime.str2seconds("1year"))
|
||||
self.assertEqual(len(tickets), 1)
|
||||
self.assertEqual(tickets[0].getBanTime(), -1); # current jail ban time.
|
||||
|
||||
def testActionWithDB(self):
|
||||
# test action together with database functionality
|
||||
self.testAddJail() # Jail required
|
||||
self.jail.database = self.db
|
||||
self.db.addJail(self.jail)
|
||||
actions = self.jail.actions
|
||||
actions.add(
|
||||
"action_checkainfo",
|
||||
os.path.join(TEST_FILES_DIR, "action.d/action_checkainfo.py"),
|
||||
{})
|
||||
actions.banManager.setBanTotal(20)
|
||||
self.jail._Jail__filter = flt = Filter(self.jail)
|
||||
flt.failManager.setFailTotal(50)
|
||||
ticket = FailTicket("1.2.3.4")
|
||||
ticket.setAttempt(5)
|
||||
ticket.setMatches(['test', 'test'])
|
||||
self.jail.putFailTicket(ticket)
|
||||
actions._Actions__checkBan()
|
||||
self.assertLogged("ban ainfo %s, %s, %s, %s" % (True, True, True, True))
|
||||
self.assertLogged("jail info %d, %d, %d, %d" % (1, 21, 0, 50))
|
||||
|
||||
def testDelAndAddJail(self):
|
||||
self.testAddJail() # Add jail
|
||||
# Delete jail (just disabled it):
|
||||
self.db.delJail(self.jail)
|
||||
jails = self.db.getJailNames()
|
||||
self.assertIn(len(jails) == 1 and self.jail.name, jails)
|
||||
jails = self.db.getJailNames(enabled=False)
|
||||
self.assertIn(len(jails) == 1 and self.jail.name, jails)
|
||||
jails = self.db.getJailNames(enabled=True)
|
||||
self.assertTrue(len(jails) == 0)
|
||||
# Add it again - should just enable it:
|
||||
self.db.addJail(self.jail)
|
||||
jails = self.db.getJailNames()
|
||||
self.assertIn(len(jails) == 1 and self.jail.name, jails)
|
||||
jails = self.db.getJailNames(enabled=True)
|
||||
self.assertIn(len(jails) == 1 and self.jail.name, jails)
|
||||
jails = self.db.getJailNames(enabled=False)
|
||||
self.assertTrue(len(jails) == 0)
|
||||
|
||||
def testPurge(self):
|
||||
self.testAddJail() # Add jail
|
||||
|
||||
self.db.purge() # Jail enabled by default so shouldn't be purged
|
||||
self.assertEqual(len(self.db.getJailNames()), 1)
|
||||
|
||||
self.db.delJail(self.jail)
|
||||
self.db.purge() # Should remove jail
|
||||
self.assertEqual(len(self.db.getJailNames()), 0)
|
||||
|
||||
self.testAddBan()
|
||||
self.db.delJail(self.jail)
|
||||
self.db.purge() # Purge should remove all bans
|
||||
self.assertEqual(len(self.db.getJailNames()), 0)
|
||||
self.assertEqual(len(self.db.getBans(jail=self.jail)), 0)
|
||||
|
||||
# Should leave jail
|
||||
self.testAddJail()
|
||||
self.db.addBan(
|
||||
self.jail, FailTicket("127.0.0.1", MyTime.time(), ["abc\n"]))
|
||||
self.db.delJail(self.jail)
|
||||
self.db.purge() # Should leave jail as ban present
|
||||
self.assertEqual(len(self.db.getJailNames()), 1)
|
||||
self.assertEqual(len(self.db.getBans(jail=self.jail)), 1)
|
||||
593
fail2ban-master/fail2ban/tests/datedetectortestcase.py
Normal file
593
fail2ban-master/fail2ban/tests/datedetectortestcase.py
Normal file
@@ -0,0 +1,593 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Author: Cyril Jaquier
|
||||
#
|
||||
|
||||
__author__ = "Cyril Jaquier"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
|
||||
__license__ = "GPL"
|
||||
|
||||
import unittest
|
||||
import time
|
||||
import datetime
|
||||
|
||||
from ..server.datedetector import DateDetector
|
||||
from ..server import datedetector
|
||||
from ..server.datetemplate import DatePatternRegex, DateTemplate
|
||||
from .utils import setUpMyTime, tearDownMyTime, LogCaptureTestCase
|
||||
from ..helpers import getLogger
|
||||
|
||||
logSys = getLogger("fail2ban")
|
||||
|
||||
|
||||
class DateDetectorTest(LogCaptureTestCase):
|
||||
|
||||
def setUp(self):
|
||||
"""Call before every test case."""
|
||||
LogCaptureTestCase.setUp(self)
|
||||
setUpMyTime()
|
||||
self.__datedetector = None
|
||||
|
||||
def tearDown(self):
|
||||
"""Call after every test case."""
|
||||
LogCaptureTestCase.tearDown(self)
|
||||
tearDownMyTime()
|
||||
|
||||
@property
|
||||
def datedetector(self):
|
||||
if self.__datedetector is None:
|
||||
self.__datedetector = DateDetector()
|
||||
self.__datedetector.addDefaultTemplate()
|
||||
return self.__datedetector
|
||||
|
||||
def testGetEpochTime(self):
|
||||
self.__datedetector = DateDetector()
|
||||
self.__datedetector.appendTemplate('EPOCH')
|
||||
# correct epoch time, using all variants:
|
||||
for dateUnix in (1138049999, 32535244799):
|
||||
for date in ("%s", "[%s]", "[%s.555]", "audit(%s.555:101)"):
|
||||
date = date % dateUnix
|
||||
log = date + " [sshd] error: PAM: Authentication failure"
|
||||
datelog = self.datedetector.getTime(log)
|
||||
self.assertTrue(datelog, "Parse epoch time for %s failed" % (date,))
|
||||
( datelog, matchlog ) = datelog
|
||||
self.assertEqual(int(datelog), dateUnix)
|
||||
self.assertIn(matchlog.group(1), (str(dateUnix), str(dateUnix)+'.555'))
|
||||
# wrong, no epoch time (< 10 digits, more as 11 digits, begin/end of word) :
|
||||
for dateUnix in ('123456789', '9999999999999999', '1138049999A', 'A1138049999'):
|
||||
for date in ("%s", "[%s]", "[%s.555]", "audit(%s.555:101)"):
|
||||
date = date % dateUnix
|
||||
log = date + " [sshd] error: PAM: Authentication failure"
|
||||
datelog = self.datedetector.getTime(log)
|
||||
self.assertFalse(datelog)
|
||||
|
||||
def testGetEpochMsTime(self):
|
||||
self.__datedetector = DateDetector()
|
||||
self.__datedetector.appendTemplate('LEPOCH')
|
||||
# correct short/long epoch time, using all variants:
|
||||
for fact in (1, 1000, 1000000):
|
||||
for dateUnix in (1138049999, 32535244799):
|
||||
for date in ("%s", "[%s]", "[%s]", "audit(%s:101)"):
|
||||
dateLong = dateUnix * fact
|
||||
date = date % dateLong
|
||||
log = date + " [sshd] error: PAM: Authentication failure"
|
||||
datelog = self.datedetector.getTime(log)
|
||||
self.assertTrue(datelog, "Parse epoch time for %s failed" % (date,))
|
||||
( datelog, matchlog ) = datelog
|
||||
self.assertEqual(int(datelog), dateUnix)
|
||||
self.assertEqual(matchlog.group(1), str(dateLong))
|
||||
# wrong, no epoch time (< 10 digits, more as 17 digits, begin/end of word) :
|
||||
for dateUnix in ('123456789', '999999999999999999', '1138049999A', 'A1138049999'):
|
||||
for date in ("%s", "[%s]", "[%s.555]", "audit(%s.555:101)"):
|
||||
date = date % dateUnix
|
||||
log = date + " [sshd] error: PAM: Authentication failure"
|
||||
datelog = self.datedetector.getTime(log)
|
||||
self.assertFalse(datelog)
|
||||
|
||||
def testGetEpochPattern(self):
|
||||
self.__datedetector = DateDetector()
|
||||
self.__datedetector.appendTemplate(r'(?<=\|\s){LEPOCH}(?=\s\|)')
|
||||
# correct short/long epoch time, using all variants:
|
||||
for fact in (1, 1000, 1000000):
|
||||
for dateUnix in (1138049999, 32535244799):
|
||||
dateLong = dateUnix * fact
|
||||
log = "auth-error | %s | invalid password" % dateLong
|
||||
datelog = self.datedetector.getTime(log)
|
||||
self.assertTrue(datelog, "Parse epoch time failed: %r" % (log,))
|
||||
( datelog, matchlog ) = datelog
|
||||
self.assertEqual(int(datelog), dateUnix)
|
||||
self.assertEqual(matchlog.group(1), str(dateLong))
|
||||
# wrong epoch time format (does not match pattern):
|
||||
for log in ("test%s123", "test-right | %stest", "test%s | test-left"):
|
||||
log = log % dateLong
|
||||
datelog = self.datedetector.getTime(log)
|
||||
self.assertFalse(datelog)
|
||||
|
||||
def testGetEpochPatternCut(self):
|
||||
self.__datedetector = DateDetector()
|
||||
self.__datedetector.appendTemplate(r'^type=\S+ msg=audit\(({EPOCH})')
|
||||
# correct epoch time and cut out epoch string only (captured group only, not the whole match):
|
||||
line = "type=USER_AUTH msg=audit(1106513999.000:987)"
|
||||
datelog = self.datedetector.getTime(line)
|
||||
timeMatch = datelog[1]
|
||||
self.assertEqual([int(datelog[0]), line[timeMatch.start(1):timeMatch.end(1)]], [1106513999, '1106513999.000'])
|
||||
|
||||
def testGetTime(self):
|
||||
log = "Jan 23 21:59:59 [sshd] error: PAM: Authentication failure"
|
||||
dateUnix = 1106513999.0
|
||||
# yoh: testing only up to 6 elements, since the day of the week
|
||||
# is not correctly determined atm, since year is not present
|
||||
# in the log entry. Since this doesn't effect the operation
|
||||
# of fail2ban -- we just ignore incorrect day of the week
|
||||
( datelog, matchlog ) = self.datedetector.getTime(log)
|
||||
self.assertEqual(datelog, dateUnix)
|
||||
self.assertEqual(matchlog.group(1), 'Jan 23 21:59:59')
|
||||
|
||||
def testDefaultTimeZone(self):
|
||||
# use special date-pattern (with %Exz), because %z currently does not supported
|
||||
# zone abbreviations except Z|UTC|GMT.
|
||||
dd = DateDetector()
|
||||
dd.appendTemplate('^%ExY-%Exm-%Exd %H:%M:%S(?: ?%Exz)?')
|
||||
dt = datetime.datetime
|
||||
logdt = "2017-01-23 15:00:00"
|
||||
dtUTC = dt(2017, 1, 23, 15, 0)
|
||||
for tz, log, desired in (
|
||||
# no TZ in input-string:
|
||||
('UTC+0300', logdt, dt(2017, 1, 23, 12, 0)), # so in UTC, it was noon
|
||||
('UTC', logdt, dtUTC), # UTC
|
||||
('UTC-0430', logdt, dt(2017, 1, 23, 19, 30)),
|
||||
('GMT+12', logdt, dt(2017, 1, 23, 3, 0)),
|
||||
(None, logdt, dt(2017, 1, 23, 14, 0)), # default CET in our test-framework
|
||||
# CET:
|
||||
('CET', logdt, dt(2017, 1, 23, 14, 0)),
|
||||
('+0100', logdt, dt(2017, 1, 23, 14, 0)),
|
||||
('CEST-01', logdt, dt(2017, 1, 23, 14, 0)),
|
||||
# CEST:
|
||||
('CEST', logdt, dt(2017, 1, 23, 13, 0)),
|
||||
('+0200', logdt, dt(2017, 1, 23, 13, 0)),
|
||||
('CET+01', logdt, dt(2017, 1, 23, 13, 0)),
|
||||
('CET+0100', logdt, dt(2017, 1, 23, 13, 0)),
|
||||
# check offset in minutes:
|
||||
('CET+0130', logdt, dt(2017, 1, 23, 12, 30)),
|
||||
# TZ in input-string have precedence:
|
||||
('UTC+0300', logdt+' GMT', dtUTC), # GMT wins
|
||||
('UTC', logdt+' GMT', dtUTC), # GMT wins
|
||||
('UTC-0430', logdt+' GMT', dtUTC), # GMT wins
|
||||
(None, logdt+' GMT', dtUTC), # GMT wins
|
||||
('UTC', logdt+' -1045', dt(2017, 1, 24, 1, 45)), # -1045 wins
|
||||
(None, logdt+' -10:45', dt(2017, 1, 24, 1, 45)), # -1045 wins
|
||||
('UTC', logdt+' +0945', dt(2017, 1, 23, 5, 15)), # +0945 wins
|
||||
(None, logdt+' +09:45', dt(2017, 1, 23, 5, 15)), # +0945 wins
|
||||
('UTC+0300', logdt+' Z', dtUTC), # Z wins (UTC)
|
||||
('GMT+12', logdt+' CET', dt(2017, 1, 23, 14, 0)), # CET wins
|
||||
('GMT+12', logdt+' CEST', dt(2017, 1, 23, 13, 0)), # CEST wins
|
||||
('GMT+12', logdt+' CET+0130', dt(2017, 1, 23, 12, 30)), # CET+0130 wins
|
||||
):
|
||||
logSys.debug('== test %r with TZ %r', log, tz)
|
||||
dd.default_tz=tz; datelog, _ = dd.getTime(log)
|
||||
val = dt.utcfromtimestamp(datelog)
|
||||
self.assertEqual(val, desired,
|
||||
"wrong offset %r != %r by %r with default TZ %r (%r)" % (val, desired, log, tz, dd.default_tz))
|
||||
|
||||
self.assertRaises(ValueError, setattr, dd, 'default_tz', 'WRONG-TZ')
|
||||
dd.default_tz = None
|
||||
|
||||
def testVariousTimes(self):
|
||||
"""Test detection of various common date/time formats f2b should understand
|
||||
"""
|
||||
dateUnix = 1106513999.0
|
||||
|
||||
# anchored - matching expression (pattern) is anchored
|
||||
# bound - pattern can be tested using word boundary (e.g. False if contains in front some optional part)
|
||||
# sdate - date string used in test log-line
|
||||
# rdate - if specified, the result match, which differs from sdate
|
||||
for anchored, bound, sdate, rdate in (
|
||||
(False, True, "Jan 23 21:59:59", None),
|
||||
(False, False, "Sun Jan 23 21:59:59 2005", None),
|
||||
(False, False, "Sun Jan 23 21:59:59", None),
|
||||
(False, False, "Sun Jan 23 2005 21:59:59", None),
|
||||
(False, True, "2005/01/23 21:59:59", None),
|
||||
(False, True, "2005.01.23 21:59:59", None),
|
||||
(False, True, "23/01/2005 21:59:59", None),
|
||||
(False, True, "23/01/05 21:59:59", None),
|
||||
(False, True, "23/Jan/2005:21:59:59", None),
|
||||
(False, True, "23/Jan/2005:21:59:59 +0100", None),
|
||||
(False, True, "01/23/2005:21:59:59", None),
|
||||
(False, True, "2005-01-23 21:59:59", None),
|
||||
(False, True, "2005-01-23 21:59:59,000", None), # proftpd
|
||||
(False, True, "23-Jan-2005 21:59:59", None),
|
||||
(False, True, "23-Jan-2005 21:59:59.02", None),
|
||||
(False, True, "23-Jan-2005 21:59:59 +0100", None),
|
||||
(False, True, "23-01-2005 21:59:59", None),
|
||||
(True, True, "1106513999", None), # Portsetry
|
||||
(False, True, "01-23-2005 21:59:59.252", None), # reported on f2b, causes Feb29 fix to break
|
||||
(False, False, "@4000000041f4104f00000000", None), # TAI64N
|
||||
(False, True, "2005-01-23T20:59:59.252Z", None), #ISO 8601 (UTC)
|
||||
(False, True, "2005-01-23T15:59:59-05:00", None), #ISO 8601 with TZ
|
||||
(False, True, "2005-01-23 21:59:59", None), #ISO 8601 no TZ, assume local
|
||||
(False, True, "20050123T215959", None), #Short ISO with T
|
||||
(False, True, "20050123 215959", None), #Short ISO with space
|
||||
(True, True, "<01/23/05@21:59:59>", None),
|
||||
(False, True, "050123 21:59:59", None), # MySQL
|
||||
(True, True, "Jan-23-05 21:59:59", None), # ASSP like
|
||||
(False, True, "Jan 23, 2005 9:59:59 PM", None), # Apache Tomcat
|
||||
(True, True, "1106513999", None), # Regular epoch
|
||||
(True, True, "1106513999.000", None), # Regular epoch with millisec
|
||||
(True, True, "[1106513999.000]", "1106513999.000"), # epoch squared (brackets are not in match)
|
||||
(False, True, "audit(1106513999.000:987)", "1106513999.000"), # SELinux
|
||||
(True, True, "no date line", None), # no date in string
|
||||
):
|
||||
if rdate is None and sdate != "no date line": rdate = sdate
|
||||
logSys.debug('== test %r', (anchored, bound, sdate, rdate))
|
||||
for should_match, prefix in (
|
||||
(rdate is not None, ""),
|
||||
(not anchored, "bogus-prefix "),
|
||||
(False, "word-boundary")
|
||||
):
|
||||
log = prefix + sdate + "[sshd] error: PAM: Authentication failure"
|
||||
# if not allowed boundary test:
|
||||
if not bound and prefix == "word-boundary": continue
|
||||
logSys.debug(' -- test %-5s for %r', should_match, log)
|
||||
# with getTime:
|
||||
logtime = self.datedetector.getTime(log)
|
||||
if should_match:
|
||||
self.assertNotEqual(logtime, None,
|
||||
"getTime retrieved nothing: failure for %s by prefix %r, anchored: %r, log: %s" % ( sdate, prefix, anchored, log))
|
||||
( logUnix, logMatch ) = logtime
|
||||
self.assertEqual(logUnix, dateUnix,
|
||||
"getTime comparison failure for %s: by prefix %r \"%s\" is not \"%s\"" % (sdate, prefix, logUnix, dateUnix))
|
||||
self.assertEqual(logMatch.group(1), rdate)
|
||||
else:
|
||||
self.assertEqual(logtime, None,
|
||||
"getTime should have not matched for %r by prefix %r Got: %s" % (sdate, prefix, logtime))
|
||||
# with getTime(matchTime) - this combination used in filter:
|
||||
(timeMatch, template) = matchTime = self.datedetector.matchTime(log)
|
||||
logtime = self.datedetector.getTime(log, matchTime)
|
||||
logSys.debug(' -- found - %r', template.name if timeMatch else False)
|
||||
if should_match:
|
||||
self.assertNotEqual(logtime, None,
|
||||
"getTime retrieved nothing: failure for %s by prefix %r, anchored: %r, log: %s" % ( sdate, prefix, anchored, log))
|
||||
( logUnix, logMatch ) = logtime
|
||||
self.assertEqual(logUnix, dateUnix,
|
||||
"getTime comparison failure for %s by prefix %r: \"%s\" is not \"%s\"" % (sdate, prefix, logUnix, dateUnix))
|
||||
self.assertEqual(logMatch.group(1), rdate)
|
||||
else:
|
||||
self.assertEqual(logtime, None,
|
||||
"getTime should have not matched for %r by prefix %r Got: %s" % (sdate, prefix, logtime))
|
||||
logSys.debug(' -- OK')
|
||||
|
||||
def testAllUniqueTemplateNames(self):
|
||||
self.assertRaises(ValueError, self.datedetector.appendTemplate,
|
||||
self.datedetector.templates[0])
|
||||
|
||||
def testFullYearMatch_gh130(self):
|
||||
# see https://github.com/fail2ban/fail2ban/pull/130
|
||||
# yoh: unfortunately this test is not really effective to reproduce the
|
||||
# situation but left in place to assure consistent behavior
|
||||
mu = time.mktime(datetime.datetime(2012, 10, 11, 2, 37, 17).timetuple())
|
||||
logdate = self.datedetector.getTime('2012/10/11 02:37:17 [error] 18434#0')
|
||||
self.assertNotEqual(logdate, None)
|
||||
( logTime, logMatch ) = logdate
|
||||
self.assertEqual(logTime, mu)
|
||||
self.assertEqual(logMatch.group(1), '2012/10/11 02:37:17')
|
||||
# confuse it with year being at the end
|
||||
for i in range(10):
|
||||
( logTime, logMatch ) = self.datedetector.getTime('11/10/2012 02:37:17 [error] 18434#0')
|
||||
self.assertEqual(logTime, mu)
|
||||
self.assertEqual(logMatch.group(1), '11/10/2012 02:37:17')
|
||||
# and now back to the original
|
||||
( logTime, logMatch ) = self.datedetector.getTime('2012/10/11 02:37:17 [error] 18434#0')
|
||||
self.assertEqual(logTime, mu)
|
||||
self.assertEqual(logMatch.group(1), '2012/10/11 02:37:17')
|
||||
|
||||
def testDateTemplate(self):
|
||||
t = DateTemplate()
|
||||
t.setRegex('^a{3,5}b?c*$')
|
||||
self.assertEqual(t.regex, '^(a{3,5}b?c*)$')
|
||||
self.assertRaises(Exception, t.getDate, '')
|
||||
self.assertEqual(t.matchDate('aaaac').group(1), 'aaaac')
|
||||
|
||||
## no word boundaries left and right:
|
||||
t = DatePatternRegex()
|
||||
t.pattern = '(?iu)**time:%ExY%Exm%ExdT%ExH%ExM%ExS**'
|
||||
# ** was removed from end-regex:
|
||||
self.assertFalse('**' in t.regex)
|
||||
# match date:
|
||||
dt = 'TIME:20050102T010203'
|
||||
self.assertEqual(t.matchDate('X' + dt + 'X').group(1), dt)
|
||||
self.assertEqual(t.matchDate(dt).group(1), dt)
|
||||
# wrong year (for exact %ExY):
|
||||
dt = 'TIME:50050102T010203'
|
||||
self.assertFalse(t.matchDate(dt))
|
||||
|
||||
## start boundary left and word boundary right (automatically if not **):
|
||||
t = DatePatternRegex()
|
||||
t.pattern = '{^LN-BEG}time:%ExY%Exm%ExdT%ExH%ExM%ExS'
|
||||
self.assertTrue('^' in t.regex)
|
||||
# try match date:
|
||||
dt = 'time:20050102T010203'
|
||||
self.assertFalse(t.matchDate('X' + dt))
|
||||
self.assertFalse(t.matchDate(dt + 'X'))
|
||||
self.assertEqual(t.matchDate('##' + dt + '...').group(1), dt)
|
||||
self.assertEqual(t.matchDate(dt).group(1), dt)
|
||||
# case sensitive:
|
||||
dt = 'TIME:20050102T010203'
|
||||
self.assertFalse(t.matchDate(dt))
|
||||
|
||||
## auto-switching "ignore case" and "unicode"
|
||||
t = DatePatternRegex()
|
||||
t.pattern = '^%Y %b %d'
|
||||
self.assertTrue('(?iu)' in t.regex)
|
||||
dt = '2005 jun 03'; self.assertEqual(t.matchDate(dt).group(1), dt)
|
||||
dt = '2005 Jun 03'; self.assertEqual(t.matchDate(dt).group(1), dt)
|
||||
dt = '2005 JUN 03'; self.assertEqual(t.matchDate(dt).group(1), dt)
|
||||
|
||||
def testNotAnchoredCollision(self):
|
||||
# try for patterns with and without word boundaries:
|
||||
for dp in (r'%H:%M:%S', r'{UNB}%H:%M:%S'):
|
||||
dd = DateDetector()
|
||||
dd.appendTemplate(dp)
|
||||
# boundary of timestamp changes right and left (and time is left and right in line):
|
||||
for fmt in ('%s test', '%8s test', 'test %s', 'test %8s'):
|
||||
for dt in (
|
||||
'00:01:02',
|
||||
'00:01:2',
|
||||
'00:1:2',
|
||||
'0:1:2',
|
||||
'00:1:2',
|
||||
'00:01:2',
|
||||
'00:01:02',
|
||||
'0:1:2',
|
||||
'00:01:02',
|
||||
):
|
||||
t = dd.getTime(fmt % dt)
|
||||
self.assertEqual((t[0], t[1].group()), (1123970462.0, dt))
|
||||
|
||||
def testAmbiguousInOrderedTemplates(self):
|
||||
dd = self.datedetector
|
||||
for (debit, line, cnt) in (
|
||||
# shortest distance to datetime should win:
|
||||
("030324 0:03:59", "some free text 030324 0:03:59 -- 2003-03-07 17:05:01 ...", 1),
|
||||
# some free text with datetime:
|
||||
("2003-03-07 17:05:01", "some free text 2003-03-07 17:05:01 test ...", 15),
|
||||
# distance collision detection (date from foreign input should not be found):
|
||||
("030324 0:04:00", "server mysqld[1000]: 030324 0:04:00 [Warning] Access denied ..."
|
||||
" foreign-input just some free text 2003-03-07 17:05:01 test", 10),
|
||||
# distance collision detection (first date should be found):
|
||||
("Sep 16 21:30:26", "server mysqld[1020]: Sep 16 21:30:26 server mysqld: 030916 21:30:26 [Warning] Access denied", 15),
|
||||
# just to test sorting:
|
||||
("2005-10-07 06:09:42", "server mysqld[5906]: 2005-10-07 06:09:42 5907 [Warning] Access denied", 20),
|
||||
("2005-10-08T15:26:18.237955", "server mysqld[5906]: 2005-10-08T15:26:18.237955 6 [Note] Access denied", 20),
|
||||
# date format changed again:
|
||||
("051009 10:05:30", "server mysqld[1000]: 051009 10:05:30 [Warning] Access denied ...", 50),
|
||||
):
|
||||
logSys.debug('== test: %r', (debit, line, cnt))
|
||||
for i in range(cnt):
|
||||
logSys.debug('Line: %s', line)
|
||||
match, template = dd.matchTime(line)
|
||||
self.assertTrue(match)
|
||||
self.assertEqual(match.group(1), debit)
|
||||
|
||||
def testLowLevelLogging(self):
|
||||
# test coverage for the deep (heavy) debug messages:
|
||||
try:
|
||||
self.__old_eff_level = datedetector.logLevel
|
||||
if datedetector.logLevel < logSys.getEffectiveLevel()+1:
|
||||
datedetector.logLevel = logSys.getEffectiveLevel()+1
|
||||
dd = self.datedetector
|
||||
i = 0
|
||||
for (line, cnt) in (
|
||||
("server mysqld[5906]: 2005-10-07 06:09:%02i 5907 [Warning] Access denied", 2),
|
||||
("server mysqld[5906]: 051007 06:10:%02i 5907 [Warning] Access denied", 5),
|
||||
("server mysqld[5906]: 2005-10-07 06:09:%02i 5907 [Warning] Access denied", 10),
|
||||
):
|
||||
for i in range(i, i+cnt+1):
|
||||
logSys.debug('== test: %r', (line % i, cnt))
|
||||
match, template = dd.matchTime(line % i)
|
||||
self.assertTrue(match)
|
||||
finally:
|
||||
datedetector.logLevel = self.__old_eff_level
|
||||
|
||||
def testWrongTemplate(self):
|
||||
t = DatePatternRegex('(%ExY%Exm%Exd')
|
||||
# lazy compiling used, so try match:
|
||||
self.assertRaises(Exception, t.matchDate, '(20050101')
|
||||
self.assertLogged("Compile %r failed" % t.name)
|
||||
# abstract:
|
||||
t = DateTemplate()
|
||||
self.assertRaises(Exception, t.getDate, 'no date line')
|
||||
|
||||
|
||||
iso8601 = DatePatternRegex(r"%Y-%m-%d[T ]%H:%M:%S(?:\.%f)?%z")
|
||||
|
||||
class CustomDateFormatsTest(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
"""Call before every test case."""
|
||||
unittest.TestCase.setUp(self)
|
||||
setUpMyTime()
|
||||
|
||||
def tearDown(self):
|
||||
"""Call after every test case."""
|
||||
unittest.TestCase.tearDown(self)
|
||||
tearDownMyTime()
|
||||
|
||||
def testIso8601(self):
|
||||
date = datetime.datetime.utcfromtimestamp(
|
||||
iso8601.getDate("2007-01-25T12:00:00Z")[0])
|
||||
self.assertEqual(
|
||||
date,
|
||||
datetime.datetime(2007, 1, 25, 12, 0))
|
||||
self.assertRaises(TypeError, iso8601.getDate, None)
|
||||
self.assertRaises(TypeError, iso8601.getDate, date)
|
||||
|
||||
self.assertEqual(iso8601.getDate(""), None)
|
||||
self.assertEqual(iso8601.getDate("Z"), None)
|
||||
|
||||
self.assertEqual(iso8601.getDate("2007-01-01T120:00:00Z"), None)
|
||||
self.assertEqual(iso8601.getDate("2007-13-01T12:00:00Z"), None)
|
||||
date = datetime.datetime.utcfromtimestamp(
|
||||
iso8601.getDate("2007-01-25T12:00:00+0400")[0])
|
||||
self.assertEqual(
|
||||
date,
|
||||
datetime.datetime(2007, 1, 25, 8, 0))
|
||||
date = datetime.datetime.utcfromtimestamp(
|
||||
iso8601.getDate("2007-01-25T12:00:00+04:00")[0])
|
||||
self.assertEqual(
|
||||
date,
|
||||
datetime.datetime(2007, 1, 25, 8, 0))
|
||||
date = datetime.datetime.utcfromtimestamp(
|
||||
iso8601.getDate("2007-01-25T12:00:00-0400")[0])
|
||||
self.assertEqual(
|
||||
date,
|
||||
datetime.datetime(2007, 1, 25, 16, 0))
|
||||
date = datetime.datetime.utcfromtimestamp(
|
||||
iso8601.getDate("2007-01-25T12:00:00-04")[0])
|
||||
self.assertEqual(
|
||||
date,
|
||||
datetime.datetime(2007, 1, 25, 16, 0))
|
||||
|
||||
def testAmbiguousDatePattern(self):
|
||||
defDD = DateDetector()
|
||||
defDD.addDefaultTemplate()
|
||||
for (matched, dp, line) in (
|
||||
# positive case:
|
||||
('Jan 23 21:59:59', None, 'Test failure Jan 23 21:59:59 for 192.0.2.1'),
|
||||
# ambiguous "unbound" patterns (missed):
|
||||
(False, None, 'Test failure TestJan 23 21:59:59.011 2015 for 192.0.2.1'),
|
||||
(False, None, 'Test failure Jan 23 21:59:59123456789 for 192.0.2.1'),
|
||||
# ambiguous "no optional year" patterns (matched):
|
||||
('Aug 8 11:25:50', None, 'Aug 8 11:25:50 20030f2329b8 Authentication failed from 192.0.2.1'),
|
||||
('Aug 8 11:25:50', None, '[Aug 8 11:25:50] 20030f2329b8 Authentication failed from 192.0.2.1'),
|
||||
('Aug 8 11:25:50 2014', None, 'Aug 8 11:25:50 2014 20030f2329b8 Authentication failed from 192.0.2.1'),
|
||||
# direct specified patterns:
|
||||
('20:00:00 01.02.2003', r'%H:%M:%S %d.%m.%Y$', '192.0.2.1 at 20:00:00 01.02.2003'),
|
||||
('[20:00:00 01.02.2003]', r'\[%H:%M:%S %d.%m.%Y\]', '192.0.2.1[20:00:00 01.02.2003]'),
|
||||
('[20:00:00 01.02.2003]', r'\[%H:%M:%S %d.%m.%Y\]', '[20:00:00 01.02.2003]192.0.2.1'),
|
||||
('[20:00:00 01.02.2003]', r'\[%H:%M:%S %d.%m.%Y\]$', '192.0.2.1[20:00:00 01.02.2003]'),
|
||||
('[20:00:00 01.02.2003]', r'^\[%H:%M:%S %d.%m.%Y\]', '[20:00:00 01.02.2003]192.0.2.1'),
|
||||
('[17/Jun/2011 17:00:45]', r'^\[%d/%b/%Y %H:%M:%S\]', '[17/Jun/2011 17:00:45] Attempt, IP address 192.0.2.1'),
|
||||
('[17/Jun/2011 17:00:45]', r'\[%d/%b/%Y %H:%M:%S\]', 'Attempt [17/Jun/2011 17:00:45] IP address 192.0.2.1'),
|
||||
('[17/Jun/2011 17:00:45]', r'\[%d/%b/%Y %H:%M:%S\]', 'Attempt IP address 192.0.2.1, date: [17/Jun/2011 17:00:45]'),
|
||||
# direct specified patterns (begin/end, missed):
|
||||
(False, r'%H:%M:%S %d.%m.%Y', '192.0.2.1x20:00:00 01.02.2003'),
|
||||
(False, r'%H:%M:%S %d.%m.%Y', '20:00:00 01.02.2003x192.0.2.1'),
|
||||
# direct specified unbound patterns (no begin/end boundary):
|
||||
('20:00:00 01.02.2003', r'**%H:%M:%S %d.%m.%Y**', '192.0.2.1x20:00:00 01.02.2003'),
|
||||
('20:00:00 01.02.2003', r'**%H:%M:%S %d.%m.%Y**', '20:00:00 01.02.2003x192.0.2.1'),
|
||||
# pattern enclosed with stars (in comparison to example above):
|
||||
('*20:00:00 01.02.2003*', r'\**%H:%M:%S %d.%m.%Y\**', 'test*20:00:00 01.02.2003*test'),
|
||||
# direct specified patterns (begin/end, matched):
|
||||
('20:00:00 01.02.2003', r'%H:%M:%S %d.%m.%Y', '192.0.2.1 20:00:00 01.02.2003'),
|
||||
('20:00:00 01.02.2003', r'%H:%M:%S %d.%m.%Y', '20:00:00 01.02.2003 192.0.2.1'),
|
||||
# wrong year in 1st date, so failed by convert using not precise year (filter used last known date),
|
||||
# in the 2nd and 3th tests (with precise year) it should find correct the 2nd date:
|
||||
(None, r'%Y-%Exm-%Exd %ExH:%ExM:%ExS', "0000-12-30 00:00:00 - 2003-12-30 00:00:00"),
|
||||
('2003-12-30 00:00:00', r'%ExY-%Exm-%Exd %ExH:%ExM:%ExS', "0000-12-30 00:00:00 - 2003-12-30 00:00:00"),
|
||||
('2003-12-30 00:00:00', None, "0000-12-30 00:00:00 - 2003-12-30 00:00:00"),
|
||||
# wrong date recognized short month/day (unbounded date pattern without separator between parts),
|
||||
# in the 2nd and 3th tests (with precise month and day) it should find correct the 2nd date:
|
||||
('200333 010203', r'%Y%m%d %H%M%S', "text:200333 010203 | date:20031230 010203"),
|
||||
('20031230 010203', r'%ExY%Exm%Exd %ExH%ExM%ExS', "text:200333 010203 | date:20031230 010203"),
|
||||
('20031230 010203', None, "text:200333 010203 | date:20031230 010203"),
|
||||
# Explicit bound in start of the line using {^LN-BEG} key,
|
||||
# (negative) in the 1st case without line begin boundary - wrong date may be found,
|
||||
# (positive) in the 2nd case with line begin boundary - unexpected date / log line (not found)
|
||||
# (positive) and in 3th case with line begin boundary - find the correct date
|
||||
("20030101 000000", "%ExY%Exm%Exd %ExH%ExM%ExS", "00001230 010203 - 20030101 000000"),
|
||||
(None, "{^LN-BEG}%ExY%Exm%Exd %ExH%ExM%ExS", "00001230 010203 - 20030101 000000"),
|
||||
("20031230 010203", "{^LN-BEG}%ExY%Exm%Exd %ExH%ExM%ExS", "20031230 010203 - 20030101 000000"),
|
||||
# Explicit bound in start of the line using {^LN-BEG} key,
|
||||
# up to 2 non-alphanumeric chars front, ** - no word boundary on the right
|
||||
("20031230010203", "{^LN-BEG}%ExY%Exm%Exd%ExH%ExM%ExS**", "2003123001020320030101000000"),
|
||||
("20031230010203", "{^LN-BEG}%ExY%Exm%Exd%ExH%ExM%ExS**", "#2003123001020320030101000000"),
|
||||
("20031230010203", "{^LN-BEG}%ExY%Exm%Exd%ExH%ExM%ExS**", "##2003123001020320030101000000"),
|
||||
("20031230010203", "{^LN-BEG}%ExY%Exm%Exd%ExH%ExM%ExS", "[20031230010203]20030101000000"),
|
||||
# UTC/GMT time zone offset (with %z and %Z):
|
||||
(1072746123.0 - 3600, "{^LN-BEG}%ExY-%Exm-%Exd %ExH:%ExM:%ExS(?: %z)?", "[2003-12-30 01:02:03] server ..."),
|
||||
(1072746123.0 - 3600, "{^LN-BEG}%ExY-%Exm-%Exd %ExH:%ExM:%ExS(?: %Z)?", "[2003-12-30 01:02:03] server ..."),
|
||||
(1072746123.0, "{^LN-BEG}%ExY-%Exm-%Exd %ExH:%ExM:%ExS(?: %z)?", "[2003-12-30 01:02:03 UTC] server ..."),
|
||||
(1072746123.0, "{^LN-BEG}%ExY-%Exm-%Exd %ExH:%ExM:%ExS(?: %Z)?", "[2003-12-30 01:02:03 UTC] server ..."),
|
||||
(1072746123.0, "{^LN-BEG}%ExY-%Exm-%Exd %ExH:%ExM:%ExS(?: %z)?", "[2003-12-30 01:02:03 Z] server ..."),
|
||||
(1072746123.0, "{^LN-BEG}%ExY-%Exm-%Exd %ExH:%ExM:%ExS(?: %z)?", "[2003-12-30 01:02:03 +0000] server ..."),
|
||||
(1072746123.0, "{^LN-BEG}%ExY-%Exm-%Exd %ExH:%ExM:%ExS(?: %Z)?", "[2003-12-30 01:02:03 Z] server ..."),
|
||||
):
|
||||
logSys.debug('== test: %r', (matched, dp, line))
|
||||
if dp is None:
|
||||
dd = defDD
|
||||
else:
|
||||
dd = DateDetector()
|
||||
dd.appendTemplate(dp)
|
||||
date = dd.getTime(line)
|
||||
if matched:
|
||||
self.assertTrue(date)
|
||||
if isinstance(matched, str):
|
||||
self.assertEqual(matched, date[1].group(1))
|
||||
else:
|
||||
self.assertEqual(matched, date[0])
|
||||
else:
|
||||
self.assertEqual(date, None)
|
||||
|
||||
def testVariousFormatSpecs(self):
|
||||
for (matched, dp, line) in (
|
||||
# cover %B (full-month-name) and %I (as 12 == 0):
|
||||
(1106438399.0, "^%B %Exd %I:%ExM:%ExS**", 'January 23 12:59:59'),
|
||||
# cover %U (week of year starts on sunday) and %A (weekday):
|
||||
(985208399.0, "^%y %U %A %ExH:%ExM:%ExS**", '01 11 Wednesday 21:59:59'),
|
||||
# cover %W (week of year starts on monday) and %A (weekday):
|
||||
(984603599.0, "^%y %W %A %ExH:%ExM:%ExS**", '01 11 Wednesday 21:59:59'),
|
||||
# cover %W (week of year starts on monday) and %w (weekday, 0 - sunday):
|
||||
(984949199.0, "^%y %W %w %ExH:%ExM:%ExS**", '01 11 0 21:59:59'),
|
||||
# cover %W (week of year starts on monday) and %w (weekday, 6 - saturday):
|
||||
(984862799.0, "^%y %W %w %ExH:%ExM:%ExS**", '01 11 6 21:59:59'),
|
||||
# cover time only, current date, in test cases now == 14 Aug 2005 12:00 -> back to yesterday (13 Aug):
|
||||
(1123963199.0, "^%ExH:%ExM:%ExS**", '21:59:59'),
|
||||
# cover time only, current date, in test cases now == 14 Aug 2005 12:00 -> today (14 Aug):
|
||||
(1123970401.0, "^%ExH:%ExM:%ExS**", '00:00:01'),
|
||||
# cover date with current year, in test cases now == Aug 2005 -> back to last year (Sep 2004):
|
||||
(1094068799.0, "^%m/%d %ExH:%ExM:%ExS**", '09/01 21:59:59'),
|
||||
# no time (only date) in pattern, assume local 00:00:00 for H:M:S :
|
||||
(1093989600.0, "^%Y-%m-%d**", '2004-09-01'),
|
||||
(1093996800.0, "^%Y-%m-%d%z**", '2004-09-01Z'),
|
||||
):
|
||||
logSys.debug('== test: %r', (matched, dp, line))
|
||||
dd = DateDetector()
|
||||
dd.appendTemplate(dp)
|
||||
date = dd.getTime(line)
|
||||
if matched:
|
||||
self.assertTrue(date)
|
||||
if isinstance(matched, str): # pragma: no cover
|
||||
self.assertEqual(matched, date[1].group(1))
|
||||
else:
|
||||
self.assertEqual(matched, date[0])
|
||||
else: # pragma: no cover
|
||||
self.assertEqual(date, None)
|
||||
|
||||
# def testDefaultTemplate(self):
|
||||
# self.__datedetector.setDefaultRegex("^\S{3}\s{1,2}\d{1,2} \d{2}:\d{2}:\d{2}")
|
||||
# self.__datedetector.setDefaultPattern("%b %d %H:%M:%S")
|
||||
#
|
||||
# log = "Jan 23 21:59:59 [sshd] error: PAM: Authentication failure"
|
||||
# date = [2005, 1, 23, 21, 59, 59, 1, 23, -1]
|
||||
# dateUnix = 1106513999.0
|
||||
#
|
||||
# self.assertEqual(self.__datedetector.getTime(log), date)
|
||||
# self.assertEqual(self.__datedetector.getUnixTime(log), dateUnix)
|
||||
|
||||
85
fail2ban-master/fail2ban/tests/dummyjail.py
Normal file
85
fail2ban-master/fail2ban/tests/dummyjail.py
Normal file
@@ -0,0 +1,85 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Fail2Ban developers
|
||||
|
||||
__copyright__ = "Copyright (c) 2012 Yaroslav Halchenko"
|
||||
__license__ = "GPL"
|
||||
|
||||
from threading import Lock
|
||||
|
||||
from ..server.jail import Jail
|
||||
from ..server.actions import Actions
|
||||
|
||||
|
||||
class DummyActions(Actions):
|
||||
def checkBan(self):
|
||||
return self._Actions__checkBan()
|
||||
|
||||
|
||||
class DummyJail(Jail):
|
||||
"""A simple 'jail' to suck in all the tickets generated by Filter's
|
||||
"""
|
||||
def __init__(self, name='DummyJail', backend=None):
|
||||
self.lock = Lock()
|
||||
self.queue = []
|
||||
super(DummyJail, self).__init__(name=name, backend=backend)
|
||||
self.__actions = DummyActions(self)
|
||||
|
||||
def __len__(self):
|
||||
with self.lock:
|
||||
return len(self.queue)
|
||||
|
||||
def isEmpty(self):
|
||||
with self.lock:
|
||||
return not self.queue
|
||||
|
||||
def isFilled(self):
|
||||
with self.lock:
|
||||
return bool(self.queue)
|
||||
|
||||
@property
|
||||
def hasFailTickets(self):
|
||||
return bool(self.queue)
|
||||
|
||||
def putFailTicket(self, ticket):
|
||||
with self.lock:
|
||||
self.queue.append(ticket)
|
||||
|
||||
def getFailTicket(self):
|
||||
with self.lock:
|
||||
try:
|
||||
return self.queue.pop()
|
||||
except IndexError:
|
||||
return False
|
||||
|
||||
@property
|
||||
def idle(self):
|
||||
return False;
|
||||
|
||||
@idle.setter
|
||||
def idle(self, value):
|
||||
pass
|
||||
|
||||
@property
|
||||
def actions(self):
|
||||
return self.__actions;
|
||||
|
||||
def isAlive(self):
|
||||
return True;
|
||||
1724
fail2ban-master/fail2ban/tests/fail2banclienttestcase.py
Normal file
1724
fail2ban-master/fail2ban/tests/fail2banclienttestcase.py
Normal file
File diff suppressed because it is too large
Load Diff
717
fail2ban-master/fail2ban/tests/fail2banregextestcase.py
Normal file
717
fail2ban-master/fail2ban/tests/fail2banregextestcase.py
Normal file
@@ -0,0 +1,717 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Fail2Ban developers
|
||||
|
||||
__author__ = "Serg Brester"
|
||||
__copyright__ = "Copyright (c) 2015 Serg G. Brester (sebres), 2008- Fail2Ban Contributors"
|
||||
__license__ = "GPL"
|
||||
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
import unittest
|
||||
|
||||
from ..client import fail2banregex
|
||||
from ..client.fail2banregex import Fail2banRegex, get_opt_parser, exec_command_line, output, str2LogLevel
|
||||
from .utils import setUpMyTime, tearDownMyTime, LogCaptureTestCase, logSys
|
||||
from .utils import CONFIG_DIR
|
||||
|
||||
|
||||
fail2banregex.logSys = logSys
|
||||
def _test_output(*args):
|
||||
logSys.notice('output: %s', args[0])
|
||||
|
||||
fail2banregex.output = _test_output
|
||||
|
||||
TEST_CONFIG_DIR = os.path.join(os.path.dirname(__file__), "config")
|
||||
TEST_FILES_DIR = os.path.join(os.path.dirname(__file__), "files")
|
||||
|
||||
DEV_NULL = None
|
||||
|
||||
def _Fail2banRegex(*args):
|
||||
parser = get_opt_parser()
|
||||
(opts, args) = parser.parse_args(list(args))
|
||||
# put down log-level if expected, because of too many debug-messages:
|
||||
if opts.log_level in ("notice", "warning"):
|
||||
logSys.setLevel(str2LogLevel(opts.log_level))
|
||||
return (opts, args, Fail2banRegex(opts))
|
||||
|
||||
def _test_exec(*args):
|
||||
(opts, args, fail2banRegex) = _Fail2banRegex(*args)
|
||||
return fail2banRegex.start(args)
|
||||
|
||||
class ExitException(Exception):
|
||||
def __init__(self, code):
|
||||
self.code = code
|
||||
self.msg = 'Exit with code: %s' % code
|
||||
|
||||
def _test_exec_command_line(*args):
|
||||
def _exit(code=0):
|
||||
raise ExitException(code)
|
||||
global DEV_NULL
|
||||
_org = {'exit': sys.exit, 'stdout': sys.stdout, 'stderr': sys.stderr}
|
||||
_exit_code = 0
|
||||
sys.exit = _exit
|
||||
if not DEV_NULL: DEV_NULL = open(os.devnull, "w")
|
||||
sys.stderr = sys.stdout = DEV_NULL
|
||||
try:
|
||||
exec_command_line(list(args))
|
||||
except ExitException as e:
|
||||
_exit_code = e.code
|
||||
finally:
|
||||
sys.exit = _org['exit']
|
||||
sys.stdout = _org['stdout']
|
||||
sys.stderr = _org['stderr']
|
||||
return _exit_code
|
||||
|
||||
def _reset():
|
||||
# reset global warn-counter:
|
||||
from ..server.filter import _decode_line_warn
|
||||
_decode_line_warn.clear()
|
||||
|
||||
STR_00 = "Dec 31 11:59:59 [sshd] error: PAM: Authentication failure for kevin from 192.0.2.0"
|
||||
STR_00_NODT = "[sshd] error: PAM: Authentication failure for kevin from 192.0.2.0"
|
||||
|
||||
RE_00 = r"(?:(?:Authentication failure|Failed [-/\w+]+) for(?: [iI](?:llegal|nvalid) user)?|[Ii](?:llegal|nvalid) user|ROOT LOGIN REFUSED) .*(?: from|FROM) <HOST>"
|
||||
RE_00_ID = r"Authentication failure for <F-ID>.*?</F-ID> from <ADDR>$"
|
||||
RE_00_USER = r"Authentication failure for <F-USER>.*?</F-USER> from <ADDR>$"
|
||||
|
||||
FILENAME_01 = os.path.join(TEST_FILES_DIR, "testcase01.log")
|
||||
FILENAME_02 = os.path.join(TEST_FILES_DIR, "testcase02.log")
|
||||
FILENAME_WRONGCHAR = os.path.join(TEST_FILES_DIR, "testcase-wrong-char.log")
|
||||
|
||||
# STR_ML_SSHD -- multiline log-excerpt with two sessions:
|
||||
# 192.0.2.1 (sshd[32307]) makes 2 failed attempts using public keys (without "Disconnecting: Too many authentication"),
|
||||
# and delayed success on accepted (STR_ML_SSHD_OK) or no success by close on preauth phase (STR_ML_SSHD_FAIL)
|
||||
# 192.0.2.2 (sshd[32310]) makes 2 failed attempts using public keys (with "Disconnecting: Too many authentication"),
|
||||
# and closed on preauth phase
|
||||
STR_ML_SSHD = """Nov 28 09:16:03 srv sshd[32307]: Failed publickey for git from 192.0.2.1 port 57904 ssh2: ECDSA 0e:ff:xx:xx:xx:xx:xx:xx:xx:xx:xx:...
|
||||
Nov 28 09:16:03 srv sshd[32307]: Failed publickey for git from 192.0.2.1 port 57904 ssh2: RSA 04:bc:xx:xx:xx:xx:xx:xx:xx:xx:xx:...
|
||||
Nov 28 09:16:03 srv sshd[32307]: Postponed publickey for git from 192.0.2.1 port 57904 ssh2 [preauth]
|
||||
Nov 28 09:16:05 srv sshd[32310]: Failed publickey for git from 192.0.2.2 port 57910 ssh2: ECDSA 1e:fe:xx:xx:xx:xx:xx:xx:xx:xx:xx:...
|
||||
Nov 28 09:16:05 srv sshd[32310]: Failed publickey for git from 192.0.2.2 port 57910 ssh2: RSA 14:ba:xx:xx:xx:xx:xx:xx:xx:xx:xx:...
|
||||
Nov 28 09:16:05 srv sshd[32310]: Disconnecting: Too many authentication failures for git [preauth]
|
||||
Nov 28 09:16:05 srv sshd[32310]: Connection closed by 192.0.2.2 [preauth]"""
|
||||
STR_ML_SSHD_OK = "Nov 28 09:16:06 srv sshd[32307]: Accepted publickey for git from 192.0.2.1 port 57904 ssh2: DSA 36:48:xx:xx:xx:xx:xx:xx:xx:xx:xx:..."
|
||||
STR_ML_SSHD_FAIL = "Nov 28 09:16:06 srv sshd[32307]: Connection closed by 192.0.2.1 [preauth]"
|
||||
|
||||
|
||||
FILENAME_SSHD = os.path.join(TEST_FILES_DIR, "logs", "sshd")
|
||||
FILTER_SSHD = os.path.join(CONFIG_DIR, 'filter.d', 'sshd.conf')
|
||||
FILENAME_ZZZ_SSHD = os.path.join(TEST_FILES_DIR, 'zzz-sshd-obsolete-multiline.log')
|
||||
FILTER_ZZZ_SSHD = os.path.join(TEST_CONFIG_DIR, 'filter.d', 'zzz-sshd-obsolete-multiline.conf')
|
||||
|
||||
FILENAME_ZZZ_GEN = os.path.join(TEST_FILES_DIR, "logs", "zzz-generic-example")
|
||||
FILTER_ZZZ_GEN = os.path.join(TEST_CONFIG_DIR, 'filter.d', 'zzz-generic-example.conf')
|
||||
|
||||
|
||||
class Fail2banRegexTest(LogCaptureTestCase):
|
||||
|
||||
def setUp(self):
|
||||
"""Call before every test case."""
|
||||
LogCaptureTestCase.setUp(self)
|
||||
setUpMyTime()
|
||||
_reset()
|
||||
|
||||
def tearDown(self):
|
||||
"""Call after every test case."""
|
||||
LogCaptureTestCase.tearDown(self)
|
||||
tearDownMyTime()
|
||||
|
||||
def testWrongRE(self):
|
||||
self.assertFalse(_test_exec(
|
||||
"test", r".** from <HOST>$"
|
||||
))
|
||||
self.assertLogged("Unable to compile regular expression")
|
||||
self.assertLogged("multiple repeat", "at position 2", all=False); # details of failed compilation
|
||||
self.pruneLog()
|
||||
self.assertFalse(_test_exec(
|
||||
"test", r"^(?:(?P<type>A)|B)? (?(typo)...) from <ADDR>"
|
||||
))
|
||||
self.assertLogged("Unable to compile regular expression")
|
||||
self.assertLogged("unknown group name", "at position 23", all=False); # details of failed compilation
|
||||
|
||||
def testWrongIgnoreRE(self):
|
||||
self.assertFalse(_test_exec(
|
||||
"--datepattern", "{^LN-BEG}EPOCH",
|
||||
"test", r".*? from <HOST>$", r".**"
|
||||
))
|
||||
self.assertLogged("Unable to compile regular expression")
|
||||
self.assertLogged("multiple repeat", "at position 2", all=False); # details of failed compilation
|
||||
|
||||
def testWrongFilterOptions(self):
|
||||
self.assertFalse(_test_exec(
|
||||
"test", "flt[a='x,y,z',b=z,y,x]"
|
||||
))
|
||||
self.assertLogged("Wrong filter name or options", "wrong syntax at 14: y,x", all=True)
|
||||
|
||||
def testDirectFound(self):
|
||||
self.assertTrue(_test_exec(
|
||||
"--datepattern", r"^(?:%a )?%b %d %H:%M:%S(?:\.%f)?(?: %ExY)?",
|
||||
"--print-all-matched", "--print-no-missed",
|
||||
STR_00,
|
||||
r"Authentication failure for .*? from <HOST>$"
|
||||
))
|
||||
self.assertLogged('Lines: 1 lines, 0 ignored, 1 matched, 0 missed')
|
||||
|
||||
def testDirectNotFound(self):
|
||||
self.assertTrue(_test_exec(
|
||||
"--print-all-missed",
|
||||
STR_00,
|
||||
r"XYZ from <HOST>$"
|
||||
))
|
||||
self.assertLogged('Lines: 1 lines, 0 ignored, 0 matched, 1 missed')
|
||||
|
||||
def testDirectIgnored(self):
|
||||
self.assertTrue(_test_exec(
|
||||
"--print-all-ignored",
|
||||
STR_00,
|
||||
r"Authentication failure for .*? from <HOST>$",
|
||||
r"kevin from 192.0.2.0$"
|
||||
))
|
||||
self.assertLogged('Lines: 1 lines, 1 ignored, 0 matched, 0 missed')
|
||||
|
||||
def testDirectRE_1(self):
|
||||
self.assertTrue(_test_exec(
|
||||
"--datepattern", r"^(?:%a )?%b %d %H:%M:%S(?:\.%f)?(?: %ExY)?",
|
||||
"--print-all-matched",
|
||||
FILENAME_01, RE_00
|
||||
))
|
||||
self.assertLogged('Lines: 19 lines, 0 ignored, 16 matched, 3 missed')
|
||||
|
||||
self.assertLogged('Error decoding line');
|
||||
self.assertLogged('Continuing to process line ignoring invalid characters')
|
||||
|
||||
self.assertLogged('Dez 31 11:59:59 [sshd] error: PAM: Authentication failure for kevin from 193.168.0.128')
|
||||
self.assertLogged('Dec 31 11:59:59 [sshd] error: PAM: Authentication failure for kevin from 87.142.124.10')
|
||||
|
||||
def testDirectRE_1raw(self):
|
||||
self.assertTrue(_test_exec(
|
||||
"--datepattern", r"^(?:%a )?%b %d %H:%M:%S(?:\.%f)?(?: %ExY)?",
|
||||
"--print-all-matched", "--raw",
|
||||
FILENAME_01, RE_00
|
||||
))
|
||||
self.assertLogged('Lines: 19 lines, 0 ignored, 19 matched, 0 missed')
|
||||
|
||||
def testDirectRE_1raw_noDns(self):
|
||||
self.assertTrue(_test_exec(
|
||||
"--datepattern", r"^(?:%a )?%b %d %H:%M:%S(?:\.%f)?(?: %ExY)?",
|
||||
"--print-all-matched", "--raw", "--usedns=no",
|
||||
FILENAME_01, RE_00
|
||||
))
|
||||
self.assertLogged('Lines: 19 lines, 0 ignored, 16 matched, 3 missed')
|
||||
# usage of <F-ID>\S+</F-ID> causes raw handling automatically:
|
||||
self.pruneLog()
|
||||
self.assertTrue(_test_exec(
|
||||
"-d", "^Epoch",
|
||||
"1490349000 test failed.dns.ch", r"^\s*test <F-ID>\S+</F-ID>"
|
||||
))
|
||||
self.assertLogged('Lines: 1 lines, 0 ignored, 1 matched, 0 missed', all=True)
|
||||
self.assertNotLogged('Unable to find a corresponding IP address')
|
||||
# no confusion to IP/CIDR
|
||||
self.pruneLog()
|
||||
self.assertTrue(_test_exec(
|
||||
"-d", "^Epoch", "-o", "id",
|
||||
"1490349000 test this/is/some/path/32", r"^\s*test <F-ID>\S+</F-ID>"
|
||||
))
|
||||
self.assertLogged('this/is/some/path/32', all=True)
|
||||
|
||||
def testDirectRE_2(self):
|
||||
self.assertTrue(_test_exec(
|
||||
"--datepattern", r"^(?:%a )?%b %d %H:%M:%S(?:\.%f)?(?: %ExY)?",
|
||||
"--print-all-matched",
|
||||
FILENAME_02, RE_00
|
||||
))
|
||||
self.assertLogged('Lines: 13 lines, 0 ignored, 5 matched, 8 missed')
|
||||
|
||||
def testVerbose(self):
|
||||
self.assertTrue(_test_exec(
|
||||
"--datepattern", r"^(?:%a )?%b %d %H:%M:%S(?:\.%f)?(?: %ExY)?",
|
||||
"--timezone", "UTC+0200",
|
||||
"--verbose", "--verbose-date", "--print-no-missed",
|
||||
FILENAME_02, RE_00
|
||||
))
|
||||
self.assertLogged('Lines: 13 lines, 0 ignored, 5 matched, 8 missed')
|
||||
|
||||
self.assertLogged('141.3.81.106 Sun Aug 14 11:53:59 2005')
|
||||
self.assertLogged('141.3.81.106 Sun Aug 14 11:54:59 2005')
|
||||
|
||||
def testVerboseFullSshd(self):
|
||||
self.assertTrue(_test_exec(
|
||||
"-l", "notice", # put down log-level, because of too many debug-messages
|
||||
"-v", "--verbose-date", "--print-all-matched", "--print-all-ignored",
|
||||
"-c", CONFIG_DIR,
|
||||
FILENAME_SSHD, "sshd.conf"
|
||||
))
|
||||
# test failure line and not-failure lines both presents:
|
||||
self.assertLogged("[29116]: User root not allowed because account is locked",
|
||||
"[29116]: Received disconnect from 1.2.3.4", all=True)
|
||||
self.pruneLog()
|
||||
# show real options:
|
||||
self.assertTrue(_test_exec(
|
||||
"-l", "notice", # put down log-level, because of too many debug-messages
|
||||
"-vv", "-c", CONFIG_DIR,
|
||||
"Dec 31 11:59:59 [sshd] error: PAM: Authentication failure for kevin from 192.0.2.1",
|
||||
"filter.d/sshd[logtype=short]"
|
||||
))
|
||||
# tet logtype is specified and set in real options:
|
||||
self.assertLogged("Real filter options :", "'logtype': 'short'", all=True)
|
||||
self.assertNotLogged("'logtype': 'file'", "'logtype': 'journal'", all=True)
|
||||
|
||||
def testFastSshd(self):
|
||||
self.assertTrue(_test_exec(
|
||||
"-l", "notice", # put down log-level, because of too many debug-messages
|
||||
"--print-all-matched",
|
||||
"-c", CONFIG_DIR,
|
||||
FILENAME_ZZZ_SSHD, "sshd.conf[mode=normal]"
|
||||
))
|
||||
# test failure line and all not-failure lines presents:
|
||||
self.assertLogged(
|
||||
"[29116]: Connection from 192.0.2.4",
|
||||
"[29116]: User root not allowed because account is locked",
|
||||
"[29116]: Received disconnect from 192.0.2.4", all=True)
|
||||
|
||||
def testLoadFromJail(self):
|
||||
self.assertTrue(_test_exec(
|
||||
"-l", "notice", # put down log-level, because of too many debug-messages
|
||||
"-c", CONFIG_DIR, '-vv',
|
||||
FILENAME_ZZZ_SSHD, "sshd[logtype=short]"
|
||||
))
|
||||
# test it was jail not filter:
|
||||
self.assertLogged(
|
||||
"Use %11s jail : %s" % ('','sshd'))
|
||||
|
||||
def testMultilineSshd(self):
|
||||
# by the way test of missing lines by multiline in `for bufLine in orgLineBuffer[int(fullBuffer):]`
|
||||
self.assertTrue(_test_exec(
|
||||
"-l", "notice", # put down log-level, because of too many debug-messages
|
||||
"--print-all-matched", "--print-all-missed",
|
||||
"-c", os.path.dirname(FILTER_ZZZ_SSHD),
|
||||
FILENAME_ZZZ_SSHD, os.path.basename(FILTER_ZZZ_SSHD)
|
||||
))
|
||||
# test "failure" line presents (2nd part only, because multiline fewer precise):
|
||||
self.assertLogged(
|
||||
"[29116]: Received disconnect from 192.0.2.4", all=True)
|
||||
|
||||
def testFullGeneric(self):
|
||||
# by the way test of ignoreregex (specified in filter file)...
|
||||
self.assertTrue(_test_exec(
|
||||
"-l", "notice", # put down log-level, because of too many debug-messages
|
||||
FILENAME_ZZZ_GEN, FILTER_ZZZ_GEN+"[mode=test]"
|
||||
))
|
||||
self.assertLogged("Ignoreregex: 2 total",
|
||||
"Lines: 23 lines, 2 ignored, 16 matched, 5 missed", all=True)
|
||||
# cover filter ignoreregex gets overwritten by command argument:
|
||||
self.pruneLog("[test-phase 2]")
|
||||
self.assertTrue(_test_exec(
|
||||
"-l", "notice", # put down log-level, because of too many debug-messages
|
||||
"[Jun 21 16:56:03] machine test-demo(pam_unix)[13709] F2B: error from 192.0.2.251\n"
|
||||
"[Jun 21 16:56:04] machine test-demo(pam_unix)[13709] F2B: error from 192.0.2.252\n"
|
||||
"[Jun 21 16:56:05] machine test-demo(pam_unix)[13709] F2B: error from 192.0.2.255\n",
|
||||
FILTER_ZZZ_GEN+"[mode=test]",
|
||||
"F2B: error from 192.0.2.255$"
|
||||
))
|
||||
self.assertLogged("Use ignoreregex line", "Ignoreregex: 1 total",
|
||||
"Lines: 3 lines, 1 ignored, 2 matched, 0 missed", all=True)
|
||||
|
||||
def testDirectMultilineBuf(self):
|
||||
# test it with some pre-lines also to cover correct buffer scrolling (all multi-lines printed):
|
||||
for preLines in (0, 20):
|
||||
self.pruneLog("[test-phase %s]" % preLines)
|
||||
self.assertTrue(_test_exec(
|
||||
"--usedns", "no", "-d", "^Epoch", "--print-all-matched", "--maxlines", "5",
|
||||
("1490349000 TEST-NL\n"*preLines) +
|
||||
"1490349000 FAIL\n1490349000 TEST1\n1490349001 TEST2\n1490349001 HOST 192.0.2.34",
|
||||
r"^\s*FAIL\s*$<SKIPLINES>^\s*HOST <HOST>\s*$"
|
||||
))
|
||||
self.assertLogged('Lines: %s lines, 0 ignored, 2 matched, %s missed' % (preLines+4, preLines+2))
|
||||
# both matched lines were printed:
|
||||
self.assertLogged("| 1490349000 FAIL", "| 1490349001 HOST 192.0.2.34", all=True)
|
||||
|
||||
|
||||
def testDirectMultilineBufDebuggex(self):
|
||||
self.assertTrue(_test_exec(
|
||||
"--usedns", "no", "-d", "^Epoch", "--debuggex", "--print-all-matched", "--maxlines", "5",
|
||||
"1490349000 FAIL\n1490349000 TEST1\n1490349001 TEST2\n1490349001 HOST 192.0.2.34",
|
||||
r"^\s*FAIL\s*$<SKIPLINES>^\s*HOST <HOST>\s*$"
|
||||
))
|
||||
self.assertLogged('Lines: 4 lines, 0 ignored, 2 matched, 2 missed')
|
||||
# the sequence in args-dict is currently undefined (so can be 1st argument)
|
||||
self.assertLogged("&flags=m", "?flags=m")
|
||||
|
||||
def testSinglelineWithNLinContent(self):
|
||||
#
|
||||
self.assertTrue(_test_exec(
|
||||
"--usedns", "no", "-d", "^Epoch", "--print-all-matched",
|
||||
"-L", "2", "1490349000 FAIL: failure\nhost: 192.0.2.35",
|
||||
r"^\s*FAIL:\s*.*\nhost:\s+<HOST>$"
|
||||
))
|
||||
self.assertLogged('Lines: 2 lines, 0 ignored, 2 matched, 0 missed')
|
||||
|
||||
def testRegexEpochPatterns(self):
|
||||
self.assertTrue(_test_exec(
|
||||
"-r", "-d", r"^\[{LEPOCH}\]\s+", "--maxlines", "5",
|
||||
"[1516469849] 192.0.2.1 FAIL: failure\n"
|
||||
"[1516469849551] 192.0.2.2 FAIL: failure\n"
|
||||
"[1516469849551000] 192.0.2.3 FAIL: failure\n"
|
||||
"[1516469849551.000] 192.0.2.4 FAIL: failure",
|
||||
r"^<HOST> FAIL\b"
|
||||
))
|
||||
self.assertLogged('Lines: 4 lines, 0 ignored, 4 matched, 0 missed')
|
||||
|
||||
def testRegexSubnet(self):
|
||||
self.assertTrue(_test_exec(
|
||||
"-vv", "-d", r"^\[{LEPOCH}\]\s+", "--maxlines", "5",
|
||||
"[1516469849] 192.0.2.1 FAIL: failure\n"
|
||||
"[1516469849] 192.0.2.1/24 FAIL: failure\n"
|
||||
"[1516469849] 2001:DB8:FF:FF::1 FAIL: failure\n"
|
||||
"[1516469849] 2001:DB8:FF:FF::1/60 FAIL: failure\n",
|
||||
r"^<SUBNET> FAIL\b"
|
||||
))
|
||||
self.assertLogged('Lines: 4 lines, 0 ignored, 4 matched, 0 missed')
|
||||
self.assertLogged('192.0.2.0/24', '2001:db8:ff:f0::/60', all=True)
|
||||
|
||||
def testFrmtOutput(self):
|
||||
# id/ip only:
|
||||
self.assertTrue(_test_exec('-o', 'id', STR_00, RE_00_ID))
|
||||
self.assertLogged('output: %s' % 'kevin')
|
||||
self.pruneLog()
|
||||
# multiple id combined to a tuple (id, tuple_id):
|
||||
self.assertTrue(_test_exec('-o', 'id', '-d', '{^LN-BEG}EPOCH',
|
||||
'1591983743.667 192.0.2.1 192.0.2.2',
|
||||
r'^\s*<F-ID/> <F-TUPLE_ID>\S+</F-TUPLE_ID>'))
|
||||
self.assertLogged('output: %s' % str(('192.0.2.1', '192.0.2.2')))
|
||||
self.pruneLog()
|
||||
# multiple id combined to a tuple, id first - (id, tuple_id_1, tuple_id_2):
|
||||
self.assertTrue(_test_exec('-o', 'id', '-d', '{^LN-BEG}EPOCH',
|
||||
'1591983743.667 left 192.0.2.3 right',
|
||||
r'^\s*<F-TUPLE_ID_1>\S+</F-TUPLE_ID_1> <F-ID/> <F-TUPLE_ID_2>\S+</F-TUPLE_ID_2>'))
|
||||
self.assertLogged('output: %s' % str(('192.0.2.3', 'left', 'right')))
|
||||
self.pruneLog()
|
||||
# id had higher precedence as ip-address:
|
||||
self.assertTrue(_test_exec('-o', 'id', '-d', '{^LN-BEG}EPOCH',
|
||||
'1591983743.667 left [192.0.2.4]:12345 right',
|
||||
r'^\s*<F-TUPLE_ID_1>\S+</F-TUPLE_ID_1> <F-ID><ADDR>:<F-PORT/></F-ID> <F-TUPLE_ID_2>\S+</F-TUPLE_ID_2>'))
|
||||
self.assertLogged('output: %s' % str(('[192.0.2.4]:12345', 'left', 'right')))
|
||||
self.pruneLog()
|
||||
# ip is not id anymore (if IP-address deviates from ID):
|
||||
self.assertTrue(_test_exec('-o', 'ip', '-d', '{^LN-BEG}EPOCH',
|
||||
'1591983743.667 left [192.0.2.4]:12345 right',
|
||||
r'^\s*<F-TUPLE_ID_1>\S+</F-TUPLE_ID_1> <F-ID><ADDR>:<F-PORT/></F-ID> <F-TUPLE_ID_2>\S+</F-TUPLE_ID_2>'))
|
||||
self.assertNotLogged('output: %s' % str(('[192.0.2.4]:12345', 'left', 'right')))
|
||||
self.assertLogged('output: %s' % '192.0.2.4')
|
||||
self.pruneLog()
|
||||
self.assertTrue(_test_exec('-o', 'ID:<fid> | IP:<ip>', '-d', '{^LN-BEG}EPOCH',
|
||||
'1591983743.667 left [192.0.2.4]:12345 right',
|
||||
r'^\s*<F-TUPLE_ID_1>\S+</F-TUPLE_ID_1> <F-ID><ADDR>:<F-PORT/></F-ID> <F-TUPLE_ID_2>\S+</F-TUPLE_ID_2>'))
|
||||
self.assertLogged('output: %s' % 'ID:'+str(('[192.0.2.4]:12345', 'left', 'right'))+' | IP:192.0.2.4')
|
||||
self.pruneLog()
|
||||
# row with id :
|
||||
self.assertTrue(_test_exec('-o', 'row', STR_00, RE_00_ID))
|
||||
self.assertLogged('output: %s' % "['kevin'", "'ip4': '192.0.2.0'", "'fid': 'kevin'", all=True)
|
||||
self.pruneLog()
|
||||
# row with ip :
|
||||
self.assertTrue(_test_exec('-o', 'row', STR_00, RE_00_USER))
|
||||
self.assertLogged('output: %s' % "['192.0.2.0'", "'ip4': '192.0.2.0'", "'user': 'kevin'", all=True)
|
||||
self.pruneLog()
|
||||
# log msg :
|
||||
nmline = "Dec 31 12:00:00 [sshd] error: PAM: No failure for user from 192.0.2.123"
|
||||
lines = STR_00+"\n"+nmline
|
||||
self.assertTrue(_test_exec('-o', 'msg', lines, RE_00_USER))
|
||||
self.assertLogged('output: %s' % STR_00)
|
||||
self.assertNotLogged('output: %s' % nmline)
|
||||
self.pruneLog()
|
||||
# log msg (inverted) :
|
||||
self.assertTrue(_test_exec('-o', 'msg', '-i', lines, RE_00_USER))
|
||||
self.assertLogged('output: %s' % nmline)
|
||||
self.assertNotLogged('output: %s' % STR_00)
|
||||
self.pruneLog()
|
||||
# item of match (user):
|
||||
self.assertTrue(_test_exec('-o', 'user', STR_00, RE_00_USER))
|
||||
self.assertLogged('output: %s' % 'kevin')
|
||||
self.pruneLog()
|
||||
# complex substitution using tags (ip, user, family):
|
||||
self.assertTrue(_test_exec('-o', '<ip>, <F-USER>, <family>', STR_00, RE_00_USER))
|
||||
self.assertLogged('output: %s' % '192.0.2.0, kevin, inet4')
|
||||
self.pruneLog()
|
||||
# log msg :
|
||||
lines = nmline+"\n"+STR_00; # just reverse lines (to cover possible order dependencies)
|
||||
self.assertTrue(_test_exec('-o', '<time> : <msg>', lines, RE_00_USER))
|
||||
self.assertLogged('output: %s : %s' % (1104490799.0, STR_00))
|
||||
self.assertNotLogged('output: %s' % nmline)
|
||||
self.pruneLog()
|
||||
# log msg (inverted) :
|
||||
self.assertTrue(_test_exec('-o', '<time> : <msg>', '-i', lines, RE_00_USER))
|
||||
self.assertLogged('output: %s : %s' % (1104490800.0, nmline))
|
||||
self.assertNotLogged('output: %s' % STR_00)
|
||||
self.pruneLog()
|
||||
|
||||
def testStalledIPByNoFailFrmtOutput(self):
|
||||
opts = (
|
||||
'-c', CONFIG_DIR,
|
||||
"-d", r"^(?:%a )?%b %d %H:%M:%S(?:\.%f)?(?: %ExY)?",
|
||||
)
|
||||
log = (
|
||||
'May 27 00:16:33 host sshd[2364]: User root not allowed because account is locked\n'
|
||||
'May 27 00:16:33 host sshd[2364]: Received disconnect from 192.0.2.76 port 58846:11: Bye Bye [preauth]'
|
||||
)
|
||||
_test = lambda *args: _test_exec(*(opts + args))
|
||||
# with MLFID from prefregex and IP after failure obtained from F-NOFAIL RE:
|
||||
self.assertTrue(_test('-o', 'IP:<ip>', log, 'sshd.conf'))
|
||||
self.assertLogged('IP:192.0.2.76')
|
||||
self.pruneLog()
|
||||
# test diverse ID/IP constellations:
|
||||
def _test_variants(flt="sshd.conf", prefix=""):
|
||||
# with different ID/IP from failregex (ID/User from first, IP from second message):
|
||||
self.assertTrue(_test('-o', 'ID:"<fid>" | IP:<ip> | U:<F-USER>', log,
|
||||
flt+'[failregex="'
|
||||
'^'+prefix+r'<F-ID>User <F-USER>\S+</F-USER></F-ID> not allowed'+'\n'
|
||||
'^'+prefix+r'Received disconnect from <ADDR>'
|
||||
'"]'))
|
||||
self.assertLogged('ID:"User root" | IP:192.0.2.76 | U:root')
|
||||
self.pruneLog()
|
||||
# with different ID/IP from failregex (User from first, ID and IP from second message):
|
||||
self.assertTrue(_test('-o', 'ID:"<fid>" | IP:<ip> | U:<F-USER>', log,
|
||||
flt+'[failregex="'
|
||||
'^'+prefix+r'User <F-USER>\S+</F-USER> not allowed'+'\n'
|
||||
'^'+prefix+r'Received disconnect from <F-ID><ADDR> port \d+</F-ID>'
|
||||
'"]'))
|
||||
self.assertLogged('ID:"192.0.2.76 port 58846" | IP:192.0.2.76 | U:root')
|
||||
self.pruneLog()
|
||||
# first with sshd and prefregex:
|
||||
_test_variants()
|
||||
# the same without prefregex and MLFID directly in failregex (no merge with prefregex groups):
|
||||
_test_variants('common.conf', prefix=r"\s*\S+ sshd\[<F-MLFID>\d+</F-MLFID>\]:\s+")
|
||||
|
||||
def testNoDateTime(self):
|
||||
# datepattern doesn't match:
|
||||
self.assertTrue(_test_exec('-d', '{^LN-BEG}EPOCH', '-o', 'Found-ID:<F-ID>', STR_00_NODT, RE_00_ID))
|
||||
self.assertLogged(
|
||||
"Found a match but no valid date/time found",
|
||||
"Match without a timestamp:",
|
||||
"Found-ID:kevin", all=True)
|
||||
self.pruneLog()
|
||||
# explicitly no datepattern:
|
||||
self.assertTrue(_test_exec('-d', '{NONE}', '-o', 'Found-ID:<F-ID>', STR_00_NODT, RE_00_ID))
|
||||
self.assertLogged(
|
||||
"Found-ID:kevin", all=True)
|
||||
self.assertNotLogged(
|
||||
"Found a match but no valid date/time found",
|
||||
"Match without a timestamp:", all=True)
|
||||
|
||||
def testIncompleteDateTime(self):
|
||||
# datepattern in followed lines doesn't match previously known pattern + line is too short
|
||||
# (logging break-off, no flush, etc):
|
||||
self.assertTrue(_test_exec(
|
||||
'-o', 'Found-ADDR:<ip>',
|
||||
'192.0.2.1 - - [02/May/2021:18:40:55 +0100] "GET / HTTP/1.1" 302 328 "-" "Mozilla/5.0" "-"\n'
|
||||
'192.0.2.2 - - [02/May/2021:18:40:55 +0100\n'
|
||||
'192.0.2.3 - - [02/May/2021:18:40:55',
|
||||
'^<ADDR>'))
|
||||
self.assertLogged(
|
||||
"Found-ADDR:192.0.2.1", "Found-ADDR:192.0.2.2", "Found-ADDR:192.0.2.3", all=True)
|
||||
|
||||
def testFrmtOutputWrapML(self):
|
||||
unittest.F2B.SkipIfCfgMissing(stock=True)
|
||||
# complex substitution using tags and message (ip, user, msg):
|
||||
self.assertTrue(_test_exec('-o', '<ip>, <F-USER>, <msg>',
|
||||
'-c', CONFIG_DIR, '--usedns', 'no',
|
||||
STR_ML_SSHD + "\n" + STR_ML_SSHD_OK, 'sshd.conf[logtype=short, publickey=invalid]'))
|
||||
# be sure we don't have IP in one line and have it in another:
|
||||
lines = STR_ML_SSHD.split("\n")
|
||||
self.assertTrue('192.0.2.2' not in lines[-2] and '192.0.2.2' in lines[-1])
|
||||
# but both are in output "merged" with IP and user:
|
||||
self.assertLogged(
|
||||
'192.0.2.2, git, '+lines[-2],
|
||||
'192.0.2.2, git, '+lines[-1],
|
||||
all=True)
|
||||
# nothing should be found for 192.0.2.1 (mode is not aggressive):
|
||||
self.assertNotLogged('192.0.2.1, git, ')
|
||||
|
||||
# test with publickey (nofail) - would not produce output for 192.0.2.1 because accepted:
|
||||
self.pruneLog("[test-phase 1] mode=aggressive & publickey=nofail + OK (accepted)")
|
||||
self.assertTrue(_test_exec('-o', '<ip>, <F-USER>, <msg>',
|
||||
'-c', CONFIG_DIR, '--usedns', 'no',
|
||||
STR_ML_SSHD + "\n" + STR_ML_SSHD_OK, 'sshd.conf[logtype=short, mode=aggressive]'))
|
||||
self.assertLogged(
|
||||
'192.0.2.2, git, '+lines[-4],
|
||||
'192.0.2.2, git, '+lines[-3],
|
||||
'192.0.2.2, git, '+lines[-2],
|
||||
'192.0.2.2, git, '+lines[-1],
|
||||
all=True)
|
||||
# nothing should be found for 192.0.2.1 (access gained so failures ignored):
|
||||
self.assertNotLogged('192.0.2.1, git, ')
|
||||
|
||||
# now same test but "accepted" replaced with "closed" on preauth phase:
|
||||
self.pruneLog("[test-phase 2] mode=aggressive & publickey=nofail + FAIL (closed on preauth)")
|
||||
self.assertTrue(_test_exec('-o', '<ip>, <F-USER>, <msg>',
|
||||
'-c', CONFIG_DIR, '--usedns', 'no',
|
||||
STR_ML_SSHD + "\n" + STR_ML_SSHD_FAIL, 'sshd.conf[logtype=short, mode=aggressive]'))
|
||||
# 192.0.2.1 should be found for every failure (2x failed key + 1x closed):
|
||||
lines = STR_ML_SSHD.split("\n")[0:2] + STR_ML_SSHD_FAIL.split("\n")[-1:]
|
||||
self.assertLogged(
|
||||
'192.0.2.1, git, '+lines[-3],
|
||||
'192.0.2.1, git, '+lines[-2],
|
||||
'192.0.2.1, git, '+lines[-1],
|
||||
all=True)
|
||||
|
||||
def testOutputNoPendingFailuresAfterGained(self):
|
||||
unittest.F2B.SkipIfCfgMissing(stock=True)
|
||||
# connect finished without authorization must generate a failure, because
|
||||
# connect started will produce pending failure which gets reset by gained
|
||||
# connect authorized.
|
||||
self.assertTrue(_test_exec('-o', 'failure from == <ip> ==',
|
||||
'-c', CONFIG_DIR, '-d', '{NONE}',
|
||||
'svc[1] connect started 192.0.2.3\n'
|
||||
'svc[1] connect finished 192.0.2.3\n'
|
||||
'svc[2] connect started 192.0.2.4\n'
|
||||
'svc[2] connect authorized 192.0.2.4\n'
|
||||
'svc[2] connect finished 192.0.2.4\n',
|
||||
r'common.conf[prefregex="^svc\[<F-MLFID>\d+</F-MLFID>\] connect <F-CONTENT>.+</F-CONTENT>$"'
|
||||
', failregex="'
|
||||
'^started\n'
|
||||
'^<F-NOFAIL><F-MLFFORGET>finished</F-MLFFORGET></F-NOFAIL> <ADDR>\n'
|
||||
'^<F-MLFGAINED>authorized</F-MLFGAINED> <ADDR>'
|
||||
'", maxlines=1]'
|
||||
))
|
||||
self.assertLogged('failure from == 192.0.2.3 ==')
|
||||
self.assertNotLogged('failure from == 192.0.2.4 ==')
|
||||
|
||||
def testWrongFilterFile(self):
|
||||
# use test log as filter file to cover error cases...
|
||||
self.assertFalse(_test_exec(
|
||||
FILENAME_ZZZ_GEN, FILENAME_ZZZ_GEN
|
||||
))
|
||||
|
||||
def testWrongChar(self):
|
||||
unittest.F2B.SkipIfCfgMissing(stock=True)
|
||||
self.assertTrue(_test_exec(
|
||||
"-l", "notice", # put down log-level, because of too many debug-messages
|
||||
"--datepattern", r"^(?:%a )?%b %d %H:%M:%S(?:\.%f)?(?: %ExY)?",
|
||||
FILENAME_WRONGCHAR, FILTER_SSHD
|
||||
))
|
||||
self.assertLogged('Lines: 4 lines, 0 ignored, 2 matched, 2 missed')
|
||||
|
||||
self.assertLogged('Error decoding line')
|
||||
self.assertLogged('Continuing to process line ignoring invalid characters:')
|
||||
|
||||
self.assertLogged('Nov 8 00:16:12 main sshd[32548]: input_userauth_request: invalid user llinco')
|
||||
self.assertLogged('Nov 8 00:16:12 main sshd[32547]: pam_succeed_if(sshd:auth): error retrieving information about user llinco')
|
||||
|
||||
def testWrongCharDebuggex(self):
|
||||
unittest.F2B.SkipIfCfgMissing(stock=True)
|
||||
self.assertTrue(_test_exec(
|
||||
"-l", "notice", # put down log-level, because of too many debug-messages
|
||||
"--datepattern", r"^(?:%a )?%b %d %H:%M:%S(?:\.%f)?(?: %ExY)?",
|
||||
"--debuggex", "--print-all-matched",
|
||||
FILENAME_WRONGCHAR, FILTER_SSHD,
|
||||
r"llinco[^\\]"
|
||||
))
|
||||
self.assertLogged('Error decoding line')
|
||||
self.assertLogged('Lines: 4 lines, 1 ignored, 2 matched, 1 missed')
|
||||
|
||||
self.assertLogged('https://')
|
||||
|
||||
def testNLCharAsPartOfUniChar(self):
|
||||
fname = tempfile.mktemp(prefix='tmp_fail2ban', suffix='uni')
|
||||
# test two multi-byte encodings (both contains `\x0A` in either \x02\x0A or \x0A\x02):
|
||||
for enc in ('utf-16be', 'utf-16le'):
|
||||
self.pruneLog("[test-phase encoding=%s]" % enc)
|
||||
try:
|
||||
fout = open(fname, 'wb')
|
||||
# test on unicode string containing \x0A as part of uni-char,
|
||||
# it must produce exactly 2 lines (both are failures):
|
||||
for l in (
|
||||
'1490349000 \u20AC Failed auth: invalid user Test\u020A from 192.0.2.1\n',
|
||||
'1490349000 \u20AC Failed auth: invalid user TestI from 192.0.2.2\n'
|
||||
):
|
||||
fout.write(l.encode(enc))
|
||||
fout.close()
|
||||
|
||||
self.assertTrue(_test_exec(
|
||||
"-l", "notice", # put down log-level, because of too many debug-messages
|
||||
"--encoding", enc,
|
||||
"--datepattern", r"^EPOCH",
|
||||
fname, r"Failed .* from <HOST>",
|
||||
))
|
||||
|
||||
self.assertLogged(" encoding : %s" % enc,
|
||||
"Lines: 2 lines, 0 ignored, 2 matched, 0 missed", all=True)
|
||||
self.assertNotLogged("Missed line(s)")
|
||||
finally:
|
||||
fout.close()
|
||||
os.unlink(fname)
|
||||
|
||||
def testExecCmdLine_Usage(self):
|
||||
self.assertNotEqual(_test_exec_command_line(), 0)
|
||||
self.pruneLog()
|
||||
self.assertEqual(_test_exec_command_line('-V'), 0)
|
||||
self.assertLogged(fail2banregex.normVersion())
|
||||
self.pruneLog()
|
||||
self.assertEqual(_test_exec_command_line('--version'), 0)
|
||||
|
||||
def testExecCmdLine_Direct(self):
|
||||
self.assertEqual(_test_exec_command_line(
|
||||
'-l', 'info',
|
||||
STR_00, r"Authentication failure for .*? from <HOST>$"
|
||||
), 0)
|
||||
self.assertLogged('Lines: 1 lines, 0 ignored, 1 matched, 0 missed')
|
||||
|
||||
def testExecCmdLine_MissFailID(self):
|
||||
self.assertNotEqual(_test_exec_command_line(
|
||||
'-l', 'info',
|
||||
STR_00, r"Authentication failure"
|
||||
), 0)
|
||||
self.assertLogged('No failure-id group in ')
|
||||
|
||||
def testExecCmdLine_ErrorParam(self):
|
||||
# single line error:
|
||||
self.assertNotEqual(_test_exec_command_line(
|
||||
'-l', 'notice', '-d', '%:%.%-', 'LOG', 'RE'
|
||||
), 0)
|
||||
self.assertLogged('ERROR: Failed to set datepattern')
|
||||
# verbose (traceback/callstack):
|
||||
self.pruneLog()
|
||||
self.assertNotEqual(_test_exec_command_line(
|
||||
'-v', '-d', '%:%.%-', 'LOG', 'RE'
|
||||
), 0)
|
||||
self.assertLogged('Failed to set datepattern')
|
||||
|
||||
def testLogtypeSystemdJournal(self): # pragma: no cover
|
||||
if not fail2banregex.FilterSystemd:
|
||||
raise unittest.SkipTest('Skip test because no systemd backend available')
|
||||
self.assertTrue(_test_exec(
|
||||
"systemd-journal", FILTER_ZZZ_GEN
|
||||
+'[journalmatch="SYSLOG_IDENTIFIER=\x01\x02dummy\x02\x01",'
|
||||
+' failregex="^\x00\x01\x02dummy regex, never match <F-ID>xxx</F-ID>"]'
|
||||
))
|
||||
self.assertLogged("'logtype': 'journal'")
|
||||
self.assertNotLogged("'logtype': 'file'")
|
||||
self.assertLogged('Lines: 0 lines, 0 ignored, 0 matched, 0 missed')
|
||||
self.pruneLog()
|
||||
# logtype specified explicitly (should win in filter):
|
||||
self.assertTrue(_test_exec(
|
||||
"systemd-journal", FILTER_ZZZ_GEN
|
||||
+'[logtype=file,'
|
||||
+' journalmatch="SYSLOG_IDENTIFIER=\x01\x02dummy\x02\x01",'
|
||||
+' failregex="^\x00\x01\x02dummy regex, never match <F-ID>xxx</F-ID>"]'
|
||||
))
|
||||
self.assertLogged("'logtype': 'file'")
|
||||
self.assertNotLogged("'logtype': 'journal'")
|
||||
262
fail2ban-master/fail2ban/tests/failmanagertestcase.py
Normal file
262
fail2ban-master/fail2ban/tests/failmanagertestcase.py
Normal file
@@ -0,0 +1,262 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Author: Cyril Jaquier
|
||||
#
|
||||
|
||||
__author__ = "Cyril Jaquier"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
|
||||
__license__ = "GPL"
|
||||
|
||||
import unittest
|
||||
|
||||
from ..server import failmanager
|
||||
from ..server.failmanager import FailManager, FailManagerEmpty
|
||||
from ..server.ipdns import IPAddr
|
||||
from ..server.ticket import FailTicket
|
||||
|
||||
|
||||
class AddFailure(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
"""Call before every test case."""
|
||||
super(AddFailure, self).setUp()
|
||||
self.__items = None
|
||||
self.__failManager = FailManager()
|
||||
|
||||
def tearDown(self):
|
||||
"""Call after every test case."""
|
||||
super(AddFailure, self).tearDown()
|
||||
|
||||
def _addDefItems(self):
|
||||
self.__items = [['193.168.0.128', 1167605999.0],
|
||||
['193.168.0.128', 1167605999.0],
|
||||
['193.168.0.128', 1167605999.0],
|
||||
['193.168.0.128', 1167605999.0],
|
||||
['193.168.0.128', 1167605999.0],
|
||||
['87.142.124.10', 1167605999.0],
|
||||
['87.142.124.10', 1167605999.0],
|
||||
['87.142.124.10', 1167605999.0],
|
||||
['100.100.10.10', 1000000000.0],
|
||||
['100.100.10.10', 1000000500.0],
|
||||
['100.100.10.10', 1000001000.0],
|
||||
['100.100.10.10', 1000001500.0],
|
||||
['100.100.10.10', 1000002000.0]]
|
||||
for i in self.__items:
|
||||
self.__failManager.addFailure(FailTicket(i[0], i[1]))
|
||||
|
||||
def testFailManagerAdd(self):
|
||||
self._addDefItems()
|
||||
self.assertEqual(self.__failManager.size(), 3)
|
||||
self.assertEqual(self.__failManager.getFailTotal(), 13)
|
||||
self.__failManager.setFailTotal(0)
|
||||
self.assertEqual(self.__failManager.getFailTotal(), 0)
|
||||
self.__failManager.setFailTotal(13)
|
||||
|
||||
def testFailManagerAdd_MaxMatches(self):
|
||||
maxMatches = 2
|
||||
self.__failManager.maxMatches = maxMatches
|
||||
failures = ["abc\n", "123\n", "ABC\n", "1234\n"]
|
||||
# add failures sequential:
|
||||
i = 80
|
||||
for f in failures:
|
||||
i -= 10
|
||||
ticket = FailTicket("127.0.0.1", 1000002000 - i, [f])
|
||||
ticket.setAttempt(1)
|
||||
self.__failManager.addFailure(ticket)
|
||||
#
|
||||
manFailList = self.__failManager._FailManager__failList
|
||||
self.assertEqual(len(manFailList), 1)
|
||||
ticket = manFailList["127.0.0.1"]
|
||||
# should retrieve 2 matches only, but count of all attempts (4):
|
||||
self.assertEqual(ticket.getAttempt(), len(failures))
|
||||
self.assertEqual(len(ticket.getMatches()), maxMatches)
|
||||
self.assertEqual(ticket.getMatches(), failures[len(failures) - maxMatches:])
|
||||
# add more failures at once:
|
||||
ticket = FailTicket("127.0.0.1", 1000002000 - 10, failures)
|
||||
ticket.setAttempt(len(failures))
|
||||
self.__failManager.addFailure(ticket)
|
||||
#
|
||||
manFailList = self.__failManager._FailManager__failList
|
||||
self.assertEqual(len(manFailList), 1)
|
||||
ticket = manFailList["127.0.0.1"]
|
||||
# should retrieve 2 matches only, but count of all attempts (8):
|
||||
self.assertEqual(ticket.getAttempt(), 2 * len(failures))
|
||||
self.assertEqual(len(ticket.getMatches()), maxMatches)
|
||||
self.assertEqual(ticket.getMatches(), failures[len(failures) - maxMatches:])
|
||||
# add self ticket again:
|
||||
self.__failManager.addFailure(ticket)
|
||||
#
|
||||
manFailList = self.__failManager._FailManager__failList
|
||||
self.assertEqual(len(manFailList), 1)
|
||||
ticket = manFailList["127.0.0.1"]
|
||||
# same matches, but +1 attempt (9)
|
||||
self.assertEqual(ticket.getAttempt(), 2 * len(failures) + 1)
|
||||
self.assertEqual(len(ticket.getMatches()), maxMatches)
|
||||
self.assertEqual(ticket.getMatches(), failures[len(failures) - maxMatches:])
|
||||
# no matches by maxMatches == 0 :
|
||||
self.__failManager.maxMatches = 0
|
||||
self.__failManager.addFailure(ticket)
|
||||
manFailList = self.__failManager._FailManager__failList
|
||||
ticket = manFailList["127.0.0.1"]
|
||||
self.assertEqual(len(ticket.getMatches()), 0)
|
||||
# test set matches None to None:
|
||||
ticket.setMatches(None)
|
||||
|
||||
def testFailManagerMaxTime(self):
|
||||
self._addDefItems()
|
||||
self.assertEqual(self.__failManager.getMaxTime(), 600)
|
||||
self.__failManager.setMaxTime(13)
|
||||
self.assertEqual(self.__failManager.getMaxTime(), 13)
|
||||
self.__failManager.setMaxTime(600)
|
||||
|
||||
def testDel(self):
|
||||
self._addDefItems()
|
||||
self.__failManager.delFailure('193.168.0.128')
|
||||
self.__failManager.delFailure('111.111.1.111')
|
||||
|
||||
self.assertEqual(self.__failManager.size(), 2)
|
||||
|
||||
def testCleanupOK(self):
|
||||
self._addDefItems()
|
||||
timestamp = 1167606999.0
|
||||
self.__failManager.cleanup(timestamp)
|
||||
self.assertEqual(self.__failManager.size(), 0)
|
||||
|
||||
def testCleanupNOK(self):
|
||||
self._addDefItems()
|
||||
timestamp = 1167605990.0
|
||||
self.__failManager.cleanup(timestamp)
|
||||
self.assertEqual(self.__failManager.size(), 2)
|
||||
|
||||
def testbanOK(self):
|
||||
self._addDefItems()
|
||||
self.__failManager.setMaxRetry(5)
|
||||
#ticket = FailTicket('193.168.0.128', None)
|
||||
ticket = self.__failManager.toBan()
|
||||
self.assertEqual(ticket.getID(), "193.168.0.128")
|
||||
self.assertTrue(isinstance(ticket.getID(), (str, IPAddr)))
|
||||
|
||||
# finish with rudimentary tests of the ticket
|
||||
# verify consistent str
|
||||
ticket_str = str(ticket)
|
||||
ticket_repr = repr(ticket)
|
||||
self.assertEqual(
|
||||
ticket_str,
|
||||
'FailTicket: ip=193.168.0.128 time=1167605999.0 bantime=None bancount=0 #attempts=5 matches=[]')
|
||||
self.assertEqual(
|
||||
ticket_repr,
|
||||
'FailTicket: ip=193.168.0.128 time=1167605999.0 bantime=None bancount=0 #attempts=5 matches=[]')
|
||||
self.assertFalse(not ticket)
|
||||
# and some get/set-ers otherwise not tested
|
||||
ticket.setTime(1000002000.0)
|
||||
self.assertEqual(ticket.getTime(), 1000002000.0)
|
||||
# and str() adjusted correspondingly
|
||||
self.assertEqual(
|
||||
str(ticket),
|
||||
'FailTicket: ip=193.168.0.128 time=1000002000.0 bantime=None bancount=0 #attempts=5 matches=[]')
|
||||
|
||||
def testbanNOK(self):
|
||||
self._addDefItems()
|
||||
self.__failManager.setMaxRetry(10)
|
||||
self.assertRaises(FailManagerEmpty, self.__failManager.toBan)
|
||||
|
||||
def testWindow(self):
|
||||
self._addDefItems()
|
||||
ticket = self.__failManager.toBan()
|
||||
self.assertNotEqual(ticket.getID(), "100.100.10.10")
|
||||
ticket = self.__failManager.toBan()
|
||||
self.assertNotEqual(ticket.getID(), "100.100.10.10")
|
||||
self.assertRaises(FailManagerEmpty, self.__failManager.toBan)
|
||||
|
||||
def testBgService(self):
|
||||
bgSvc = self.__failManager._FailManager__bgSvc
|
||||
failManager2nd = FailManager()
|
||||
# test singleton (same object):
|
||||
bgSvc2 = failManager2nd._FailManager__bgSvc
|
||||
self.assertTrue(id(bgSvc) == id(bgSvc2))
|
||||
bgSvc2 = None
|
||||
# test service :
|
||||
self.assertTrue(bgSvc.service(True, True))
|
||||
self.assertFalse(bgSvc.service())
|
||||
# bypass threshold and time:
|
||||
for i in range(1, bgSvc._BgService__threshold):
|
||||
self.assertFalse(bgSvc.service())
|
||||
# bypass time check:
|
||||
bgSvc._BgService__serviceTime = -0x7fffffff
|
||||
self.assertTrue(bgSvc.service())
|
||||
# bypass threshold and time:
|
||||
bgSvc._BgService__serviceTime = -0x7fffffff
|
||||
for i in range(1, bgSvc._BgService__threshold):
|
||||
self.assertFalse(bgSvc.service())
|
||||
self.assertTrue(bgSvc.service(False, True))
|
||||
self.assertFalse(bgSvc.service(False, True))
|
||||
|
||||
|
||||
class FailmanagerComplex(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
"""Call before every test case."""
|
||||
super(FailmanagerComplex, self).setUp()
|
||||
self.__failManager = FailManager()
|
||||
# down logging level for all this tests, because of extremely large failure count (several GB on heavydebug)
|
||||
self.__saved_ll = failmanager.logLevel
|
||||
failmanager.logLevel = 3
|
||||
|
||||
def tearDown(self):
|
||||
super(FailmanagerComplex, self).tearDown()
|
||||
# restore level
|
||||
failmanager.logLevel = self.__saved_ll
|
||||
|
||||
@staticmethod
|
||||
def _ip_range(maxips):
|
||||
class _ip(list):
|
||||
def __str__(self):
|
||||
return '.'.join(map(str, self))
|
||||
def __repr__(self):
|
||||
return str(self)
|
||||
def __key__(self):
|
||||
return str(self)
|
||||
def __hash__(self):
|
||||
#return (int)(struct.unpack('I', struct.pack("BBBB",*self))[0])
|
||||
return (int)(self[0] << 24 | self[1] << 16 | self[2] << 8 | self[3])
|
||||
i = 0
|
||||
c = [127,0,0,0]
|
||||
while i < maxips:
|
||||
for n in range(3,0,-1):
|
||||
if c[n] < 255:
|
||||
c[n] += 1
|
||||
break
|
||||
c[n] = 0
|
||||
yield (i, _ip(c))
|
||||
i += 1
|
||||
|
||||
def testCheckIPGenerator(self):
|
||||
for i, ip in self._ip_range(65536 if not unittest.F2B.fast else 1000):
|
||||
if i == 254:
|
||||
self.assertEqual(str(ip), '127.0.0.255')
|
||||
elif i == 255:
|
||||
self.assertEqual(str(ip), '127.0.1.0')
|
||||
elif i == 1000:
|
||||
self.assertEqual(str(ip), '127.0.3.233')
|
||||
elif i == 65534:
|
||||
self.assertEqual(str(ip), '127.0.255.255')
|
||||
elif i == 65535:
|
||||
self.assertEqual(str(ip), '127.1.0.0')
|
||||
|
||||
29
fail2ban-master/fail2ban/tests/files/action.d/action.py
Normal file
29
fail2ban-master/fail2ban/tests/files/action.d/action.py
Normal file
@@ -0,0 +1,29 @@
|
||||
|
||||
from fail2ban.server.action import ActionBase
|
||||
|
||||
|
||||
class TestAction(ActionBase):
|
||||
|
||||
def __init__(self, jail, name, opt1, opt2=None):
|
||||
super(TestAction, self).__init__(jail, name)
|
||||
self._logSys.debug("%s initialised" % self.__class__.__name__)
|
||||
self.opt1 = opt1
|
||||
self.opt2 = opt2
|
||||
self._opt3 = "Hello"
|
||||
|
||||
def start(self):
|
||||
self._logSys.debug("%s action start" % self.__class__.__name__)
|
||||
|
||||
def stop(self):
|
||||
self._logSys.debug("%s action stop" % self.__class__.__name__)
|
||||
|
||||
def ban(self, aInfo):
|
||||
self._logSys.debug("%s action ban" % self.__class__.__name__)
|
||||
|
||||
def unban(self, aInfo):
|
||||
self._logSys.debug("%s action unban" % self.__class__.__name__)
|
||||
|
||||
def testmethod(self, text):
|
||||
return "%s %s %s" % (self._opt3, text, self.opt1)
|
||||
|
||||
Action = TestAction
|
||||
@@ -0,0 +1,18 @@
|
||||
|
||||
from fail2ban.server.action import ActionBase
|
||||
|
||||
|
||||
class TestAction(ActionBase):
|
||||
|
||||
def ban(self, aInfo):
|
||||
self._logSys.info("ban ainfo %s, %s, %s, %s",
|
||||
aInfo["ipmatches"] != '', aInfo["ipjailmatches"] != '', aInfo["ipfailures"] > 0, aInfo["ipjailfailures"] > 0
|
||||
)
|
||||
self._logSys.info("jail info %d, %d, %d, %d",
|
||||
aInfo["jail.banned"], aInfo["jail.banned_total"], aInfo["jail.found"], aInfo["jail.found_total"]
|
||||
)
|
||||
|
||||
def unban(self, aInfo):
|
||||
pass
|
||||
|
||||
Action = TestAction
|
||||
@@ -0,0 +1,22 @@
|
||||
|
||||
from fail2ban.server.action import ActionBase
|
||||
|
||||
|
||||
class TestAction(ActionBase):
|
||||
|
||||
def __init__(self, jail, name):
|
||||
super(TestAction, self).__init__(jail, name)
|
||||
|
||||
def start(self):
|
||||
raise Exception()
|
||||
|
||||
def stop(self):
|
||||
raise Exception()
|
||||
|
||||
def ban(self):
|
||||
raise Exception()
|
||||
|
||||
def unban(self):
|
||||
raise Exception()
|
||||
|
||||
Action = TestAction
|
||||
@@ -0,0 +1,20 @@
|
||||
|
||||
from fail2ban.server.action import ActionBase
|
||||
|
||||
|
||||
class TestAction(ActionBase):
|
||||
|
||||
def ban(self, aInfo):
|
||||
del aInfo['ip']
|
||||
self._logSys.info("%s ban deleted aInfo IP", self._name)
|
||||
|
||||
def unban(self, aInfo):
|
||||
del aInfo['ip']
|
||||
self._logSys.info("%s unban deleted aInfo IP", self._name)
|
||||
|
||||
def flush(self):
|
||||
# intended error to cover no unhandled exception occurs in flush
|
||||
# as well as unbans are done individually after errored flush.
|
||||
raise ValueError("intended error")
|
||||
|
||||
Action = TestAction
|
||||
@@ -0,0 +1,6 @@
|
||||
|
||||
from fail2ban.server.action import ActionBase
|
||||
|
||||
|
||||
class TestAction(ActionBase):
|
||||
pass
|
||||
@@ -0,0 +1,10 @@
|
||||
|
||||
class TestAction():
|
||||
|
||||
def __init__(self, jail, name):
|
||||
pass
|
||||
|
||||
def start(self):
|
||||
pass
|
||||
|
||||
Action = TestAction
|
||||
@@ -0,0 +1,13 @@
|
||||
|
||||
Apache Auth.
|
||||
|
||||
This directory contains the configuration file of Apache's Web Server to
|
||||
simulate authentication files.
|
||||
|
||||
These assumed that /var/www/html is the web root and AllowOverrides is "All".
|
||||
|
||||
The subdirectories here are copied to the /var/www/html directory.
|
||||
|
||||
Commands executed are in testcases/files/log/apache-auth with their
|
||||
corresponding failure mechanism.
|
||||
|
||||
@@ -0,0 +1,5 @@
|
||||
AuthType basic
|
||||
AuthName "private area"
|
||||
AuthBasicProvider file
|
||||
AuthUserFile /var/www/html/basic/authz_owner/.htpasswd
|
||||
Require file-owner
|
||||
@@ -0,0 +1 @@
|
||||
username:$apr1$1f5oQUl4$21lLXSN7xQOPtNsj5s4Nk/
|
||||
@@ -0,0 +1,5 @@
|
||||
AuthType basic
|
||||
AuthName "private area"
|
||||
AuthBasicProvider file
|
||||
AuthUserFile /var/www/html/basic/file/.htpasswd
|
||||
Require valid-user
|
||||
@@ -0,0 +1 @@
|
||||
username:$apr1$uUMsOjCQ$.BzXClI/B/vZKddgIAJCR.
|
||||
@@ -0,0 +1,168 @@
|
||||
#!/usr/bin/env fail2ban-python
|
||||
import requests
|
||||
|
||||
try:
|
||||
import hashlib
|
||||
md5sum = hashlib.md5
|
||||
except ImportError: # pragma: no cover
|
||||
# hashlib was introduced in Python 2.5. For compatibility with those
|
||||
# elderly Pythons, import from md5
|
||||
import md5
|
||||
md5sum = md5.new
|
||||
|
||||
|
||||
def auth(v):
|
||||
|
||||
ha1 = md5sum(username + ':' + realm + ':' + password).hexdigest()
|
||||
ha2 = md5sum("GET:" + url).hexdigest()
|
||||
|
||||
#response = md5sum(ha1 + ':' + v['nonce'][1:-1] + ':' + v['nc'] + ':' + v['cnonce'][1:-1]
|
||||
# + ':' + v['qop'][1:-1] + ':' + ha2).hexdigest()
|
||||
|
||||
nonce = v['nonce'][1:-1]
|
||||
nc=v.get('nc') or ''
|
||||
cnonce = v.get('cnonce') or ''
|
||||
#opaque = v.get('opaque') or ''
|
||||
qop = v['qop'][1:-1]
|
||||
algorithm = v['algorithm']
|
||||
response = md5sum(ha1 + ':' + nonce + ':' + nc + ':' + cnonce + ':' + qop + ':' + ha2).hexdigest()
|
||||
|
||||
p = requests.Request('GET', host + url).prepare()
|
||||
#p.headers['Authentication-Info'] = response
|
||||
p.headers['Authorization'] = """
|
||||
Digest username="%s",
|
||||
algorithm="%s",
|
||||
realm="%s",
|
||||
uri="%s",
|
||||
nonce="%s",
|
||||
cnonce="",
|
||||
nc="",
|
||||
qop=%s,
|
||||
response="%s"
|
||||
""" % ( username, algorithm, realm, url, nonce, qop, response )
|
||||
# opaque="%s",
|
||||
print((p.method, p.url, p.headers))
|
||||
s = requests.Session()
|
||||
return s.send(p)
|
||||
|
||||
|
||||
def preauth():
|
||||
r = requests.get(host + url)
|
||||
print(r)
|
||||
r.headers['www-authenticate'].split(', ')
|
||||
return dict([ a.split('=',1) for a in r.headers['www-authenticate'].split(', ') ])
|
||||
|
||||
|
||||
url='/digest/'
|
||||
host = 'http://localhost:801'
|
||||
|
||||
v = preauth()
|
||||
|
||||
username="username"
|
||||
password = "password"
|
||||
print(v)
|
||||
|
||||
realm = 'so far away'
|
||||
r = auth(v)
|
||||
|
||||
realm = v['Digest realm'][1:-1]
|
||||
|
||||
# [Sun Jul 28 21:27:56.549667 2013] [auth_digest:error] [pid 24835:tid 139895297222400] [client 127.0.0.1:57052] AH01788: realm mismatch - got `so far away' but expected `digest private area'
|
||||
|
||||
|
||||
algorithm = v['algorithm']
|
||||
v['algorithm'] = 'super funky chicken'
|
||||
r = auth(v)
|
||||
|
||||
# [Sun Jul 28 21:41:20 2013] [error] [client 127.0.0.1] Digest: unknown algorithm `super funky chicken' received: /digest/
|
||||
|
||||
print((r.status_code,r.headers, r.text))
|
||||
v['algorithm'] = algorithm
|
||||
|
||||
|
||||
r = auth(v)
|
||||
print((r.status_code,r.headers, r.text))
|
||||
|
||||
nonce = v['nonce']
|
||||
v['nonce']=v['nonce'][5:-5]
|
||||
|
||||
r = auth(v)
|
||||
print((r.status_code,r.headers, r.text))
|
||||
|
||||
# [Sun Jul 28 21:05:31.178340 2013] [auth_digest:error] [pid 24224:tid 139895539455744] [client 127.0.0.1:56906] AH01793: invalid qop `auth' received: /digest/qop_none/
|
||||
|
||||
|
||||
v['nonce']=nonce[0:11] + 'ZZZ' + nonce[14:]
|
||||
|
||||
r = auth(v)
|
||||
print((r.status_code,r.headers, r.text))
|
||||
|
||||
#[Sun Jul 28 21:18:11.769228 2013] [auth_digest:error] [pid 24752:tid 139895505884928] [client 127.0.0.1:56964] AH01776: invalid nonce b9YAiJDiBAZZZ1b1abe02d20063ea3b16b544ea1b0d981c1bafe received - hash is not d42d824dee7aaf50c3ba0a7c6290bd453e3dd35b
|
||||
|
||||
|
||||
url='/digest_time/'
|
||||
v=preauth()
|
||||
|
||||
import time
|
||||
time.sleep(1)
|
||||
|
||||
r = auth(v)
|
||||
print((r.status_code,r.headers, r.text))
|
||||
|
||||
# Obtained by putting the following code in modules/aaa/mod_auth_digest.c
|
||||
# in the function initialize_secret
|
||||
# {
|
||||
# const char *hex = "0123456789abcdef";
|
||||
# char secbuff[SECRET_LEN * 4];
|
||||
# char *hash = secbuff;
|
||||
# int idx;
|
||||
|
||||
# for (idx=0; idx<sizeof(secret); idx++) {
|
||||
# *hash++ = hex[secret[idx] >> 4];
|
||||
# *hash++ = hex[secret[idx] & 0xF];
|
||||
# }
|
||||
# *hash = '\0';
|
||||
# /* remove comment makings in below for apache-2.4+ */
|
||||
# ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, s, /* APLOGNO(11759) */ "secret: %s", secbuff);
|
||||
# }
|
||||
|
||||
|
||||
import sha
|
||||
import binascii
|
||||
import base64
|
||||
import struct
|
||||
|
||||
apachesecret = binascii.unhexlify('497d8894adafa5ec7c8c981ddf9c8457da7a90ac')
|
||||
s = sha.sha(apachesecret)
|
||||
|
||||
v=preauth()
|
||||
|
||||
print((v['nonce']))
|
||||
realm = v['Digest realm'][1:-1]
|
||||
|
||||
(t,) = struct.unpack('l',base64.b64decode(v['nonce'][1:13]))
|
||||
|
||||
# whee, time travel
|
||||
t = t + 5540
|
||||
|
||||
timepac = base64.b64encode(struct.pack('l',t))
|
||||
|
||||
s.update(realm)
|
||||
s.update(timepac)
|
||||
|
||||
v['nonce'] = v['nonce'][0] + timepac + s.hexdigest() + v['nonce'][-1]
|
||||
|
||||
print(v)
|
||||
|
||||
r = auth(v)
|
||||
#[Mon Jul 29 02:12:55.539813 2013] [auth_digest:error] [pid 9647:tid 139895522670336] [client 127.0.0.1:58474] AH01777: invalid nonce 59QJppTiBAA=b08983fd166ade9840407df1b0f75b9e6e07d88d received - user attempted time travel
|
||||
print((r.status_code,r.headers, r.text))
|
||||
|
||||
url='/digest_onetime/'
|
||||
v=preauth()
|
||||
|
||||
# Need opaque header handling in auth
|
||||
r = auth(v)
|
||||
print((r.status_code,r.headers, r.text))
|
||||
r = auth(v)
|
||||
print((r.status_code,r.headers, r.text))
|
||||
@@ -0,0 +1,6 @@
|
||||
AuthType Digest
|
||||
AuthName "digest private area"
|
||||
AuthDigestDomain /digest/
|
||||
AuthBasicProvider file
|
||||
AuthUserFile /var/www/html/digest/.htpasswd
|
||||
Require valid-user
|
||||
@@ -0,0 +1 @@
|
||||
username:digest private area:fad48d3a7c63f61b5b3567a4105bbb04
|
||||
@@ -0,0 +1,9 @@
|
||||
AuthType Digest
|
||||
AuthName "digest anon"
|
||||
AuthDigestDomain /digest_anon/
|
||||
AuthBasicProvider file anon
|
||||
AuthUserFile /var/www/html/digest_anon/.htpasswd
|
||||
Anonymous_NoUserID off
|
||||
Anonymous anonymous
|
||||
Anonymous_LogEmail on
|
||||
Require valid-user
|
||||
@@ -0,0 +1,3 @@
|
||||
username:digest anon:25e4077a9344ceb1a88f2a62c9fb60d8
|
||||
05bbb04
|
||||
anonymous:digest anon:faa4e5870970cf935bb9674776e6b26a
|
||||
@@ -0,0 +1,7 @@
|
||||
AuthType Digest
|
||||
AuthName "digest private area"
|
||||
AuthDigestDomain /digest_time/
|
||||
AuthBasicProvider file
|
||||
AuthUserFile /var/www/html/digest_time/.htpasswd
|
||||
AuthDigestNonceLifetime 1
|
||||
Require valid-user
|
||||
@@ -0,0 +1 @@
|
||||
username:digest private area:fad48d3a7c63f61b5b3567a4105bbb04
|
||||
@@ -0,0 +1,6 @@
|
||||
AuthType Digest
|
||||
AuthName "digest private area"
|
||||
AuthDigestDomain /digest_wrongrelm/
|
||||
AuthBasicProvider file
|
||||
AuthUserFile /var/www/html/digest_wrongrelm/.htpasswd
|
||||
Require valid-user
|
||||
@@ -0,0 +1,2 @@
|
||||
username:wrongrelm:99cd340e1283c6d0ab34734bd47bdc30
|
||||
4105bbb04
|
||||
@@ -0,0 +1 @@
|
||||
Deny from all
|
||||
BIN
fail2ban-master/fail2ban/tests/files/database_v1.db
Normal file
BIN
fail2ban-master/fail2ban/tests/files/database_v1.db
Normal file
Binary file not shown.
BIN
fail2ban-master/fail2ban/tests/files/database_v2.db
Normal file
BIN
fail2ban-master/fail2ban/tests/files/database_v2.db
Normal file
Binary file not shown.
@@ -0,0 +1,11 @@
|
||||
[DEFAULT]
|
||||
|
||||
honeypot = fail2ban@localhost
|
||||
|
||||
[Definition]
|
||||
|
||||
failregex = to=<honeypot> fromip=<IP>
|
||||
|
||||
[Init]
|
||||
|
||||
honeypot = sweet@example.com
|
||||
@@ -0,0 +1,41 @@
|
||||
# Generic configuration items (to be used as interpolations) in other
|
||||
# filters or actions configurations
|
||||
#
|
||||
# Author: Yaroslav Halchenko
|
||||
#
|
||||
# $Revision$
|
||||
#
|
||||
|
||||
[DEFAULT]
|
||||
|
||||
# Daemon definition is to be specialized (if needed) in .conf file
|
||||
_daemon = \S*
|
||||
|
||||
#
|
||||
# Shortcuts for easier comprehension of the failregex
|
||||
#
|
||||
# PID.
|
||||
# EXAMPLES: [123]
|
||||
__pid_re = (?:\[\d+\])
|
||||
|
||||
# Daemon name (with optional source_file:line or whatever)
|
||||
# EXAMPLES: pam_rhosts_auth, [sshd], pop(pam_unix)
|
||||
__daemon_re = [\[\(]?%(_daemon)s(?:\(\S+\))?[\]\)]?:?
|
||||
|
||||
# Combinations of daemon name and PID
|
||||
# EXAMPLES: sshd[31607], pop(pam_unix)[4920]
|
||||
__daemon_combs_re = (?:%(__pid_re)s?:\s+%(__daemon_re)s|%(__daemon_re)s%(__pid_re)s?:)
|
||||
|
||||
# Some messages have a kernel prefix with a timestamp
|
||||
# EXAMPLES: kernel: [769570.846956]
|
||||
__kernel_prefix = kernel: \[\d+\.\d+\]
|
||||
|
||||
__hostname = \S+
|
||||
|
||||
#
|
||||
# Common line prefixes (beginnings) which could be used in filters
|
||||
#
|
||||
# [hostname] [vserver tag] daemon_id spaces
|
||||
# this can be optional (for instance if we match named native log files)
|
||||
__prefix_line = \s*(?:%(__hostname)s )?(?:%(__kernel_prefix)s )?(?:@vserver_\S+ )?%(__daemon_combs_re)s?\s*
|
||||
|
||||
@@ -0,0 +1,50 @@
|
||||
# Fail2Ban configuration file
|
||||
#
|
||||
# Author: Cyril Jaquier
|
||||
#
|
||||
# $Revision$
|
||||
#
|
||||
|
||||
[INCLUDES]
|
||||
|
||||
# Read common prefixes. If any customizations available -- read them from
|
||||
# common.local
|
||||
before = testcase-common.conf
|
||||
|
||||
|
||||
[Definition]
|
||||
|
||||
_daemon = sshd
|
||||
|
||||
# Option: failregex
|
||||
# Notes.: regex to match the password failures messages in the logfile. The
|
||||
# host must be matched by a group named "host". The tag "<HOST>" can
|
||||
# be used for standard IP/hostname matching and is only an alias for
|
||||
# (?:::f{4,6}:)?(?P<host>[\w\-.^_]+)
|
||||
# Values: TEXT
|
||||
#
|
||||
failregex = ^%(__prefix_line)s(?:error: PAM: )?Authentication failure for .* from <HOST>\s*$
|
||||
^%(__prefix_line)s(?:error: PAM: )?User not known to the underlying authentication module for .* from <HOST>\s*$
|
||||
^%(__prefix_line)s(?:error: PAM: )?User not known to the\nunderlying authentication.+$<SKIPLINES>^.+ module for .* from <HOST>\s*$
|
||||
|
||||
# Option: ignoreregex
|
||||
# Notes.: regex to ignore. If this regex matches, the line is ignored.
|
||||
# Values: TEXT
|
||||
#
|
||||
ignoreregex = ^.+ john from host 192.168.1.1\s*$
|
||||
|
||||
# "maxlines" is number of log lines to buffer for multi-line regex searches
|
||||
maxlines = 1
|
||||
|
||||
# "datepattern" allows setting of a custom data pattern as alternative
|
||||
# to the default date detectors. See manpage strptime(3) for date formats.
|
||||
# NOTE: that ALL '%' must be prefixed with '%' due to string substitution
|
||||
# e.g. %%Y-%%m-%%d %%H:%%M
|
||||
datepattern = %%Y %%m %%d %%H:%%M:%%S
|
||||
|
||||
# Option: journalmatch
|
||||
# Notes.: systemd journalctl style match filter for journal based backends
|
||||
# Values: TEXT
|
||||
#
|
||||
journalmatch = _COMM=sshd + _SYSTEMD_UNIT=sshd.service _UID=0
|
||||
"FIELD= with spaces " + AFIELD=" with + char and spaces"
|
||||
@@ -0,0 +1,12 @@
|
||||
[INCLUDES]
|
||||
|
||||
# Read common prefixes. If any customizations available -- read them from
|
||||
# common.local
|
||||
before = testcase-common.conf
|
||||
|
||||
[Definition]
|
||||
|
||||
_daemon = sshd
|
||||
__prefix_line = %(known/__prefix_line)s(?:\w{14,20}: )?
|
||||
|
||||
failregex = %(__prefix_line)s test
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user