instructions
This commit is contained in:
25
fail2ban-master/fail2ban/server/__init__.py
Normal file
25
fail2ban-master/fail2ban/server/__init__.py
Normal file
@@ -0,0 +1,25 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Author: Cyril Jaquier
|
||||
#
|
||||
|
||||
__author__ = "Cyril Jaquier"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
|
||||
__license__ = "GPL"
|
||||
1042
fail2ban-master/fail2ban/server/action.py
Normal file
1042
fail2ban-master/fail2ban/server/action.py
Normal file
File diff suppressed because it is too large
Load Diff
745
fail2ban-master/fail2ban/server/actions.py
Normal file
745
fail2ban-master/fail2ban/server/actions.py
Normal file
@@ -0,0 +1,745 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Author: Cyril Jaquier
|
||||
#
|
||||
|
||||
__author__ = "Cyril Jaquier"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
|
||||
__license__ = "GPL"
|
||||
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
try:
|
||||
from collections.abc import Mapping
|
||||
except ImportError:
|
||||
from collections import Mapping
|
||||
from collections import OrderedDict
|
||||
|
||||
from .banmanager import BanManager, BanTicket
|
||||
from .ipdns import IPAddr
|
||||
from .jailthread import JailThread
|
||||
from .action import ActionBase, CommandAction, CallingMap
|
||||
from .mytime import MyTime
|
||||
from .observer import Observers
|
||||
from .utils import Utils
|
||||
from ..helpers import getLogger
|
||||
|
||||
# Gets the instance of the logger.
|
||||
logSys = getLogger(__name__)
|
||||
|
||||
|
||||
class Actions(JailThread, Mapping):
|
||||
"""Handles jail actions.
|
||||
|
||||
This class handles the actions of the jail. Creation, deletion or to
|
||||
actions must be done through this class. This class is based on the
|
||||
Mapping type, and the `add` method must be used to add new actions.
|
||||
This class also starts and stops the actions, and fetches bans from
|
||||
the jail executing these bans via the actions.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
jail: Jail
|
||||
The jail of which the actions belongs to.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
daemon
|
||||
ident
|
||||
name
|
||||
status
|
||||
active : bool
|
||||
Control the state of the thread.
|
||||
idle : bool
|
||||
Control the idle state of the thread.
|
||||
sleeptime : int
|
||||
The time the thread sleeps for in the loop.
|
||||
"""
|
||||
|
||||
def __init__(self, jail):
|
||||
JailThread.__init__(self, name="f2b/a."+jail.name)
|
||||
## The jail which contains this action.
|
||||
self._jail = jail
|
||||
self._actions = OrderedDict()
|
||||
## The ban manager.
|
||||
self.banManager = BanManager()
|
||||
self.banEpoch = 0
|
||||
self.__lastConsistencyCheckTM = 0
|
||||
## Precedence of ban (over unban), so max number of tickets banned (to call an unban check):
|
||||
self.banPrecedence = 10
|
||||
## Max count of outdated tickets to unban per each __checkUnBan operation:
|
||||
self.unbanMaxCount = self.banPrecedence * 2
|
||||
|
||||
@staticmethod
|
||||
def _load_python_module(pythonModule):
|
||||
mod = Utils.load_python_module(pythonModule)
|
||||
if not hasattr(mod, "Action"): # pragma: no cover
|
||||
raise RuntimeError(
|
||||
"%s module does not have 'Action' class" % pythonModule)
|
||||
elif not issubclass(mod.Action, ActionBase): # pragma: no cover
|
||||
raise RuntimeError(
|
||||
"%s module %s does not implement required methods" % (
|
||||
pythonModule, mod.Action.__name__))
|
||||
return mod
|
||||
|
||||
|
||||
def add(self, name, pythonModule=None, initOpts=None, reload=False):
|
||||
"""Adds a new action.
|
||||
|
||||
Add a new action if not already present, defaulting to standard
|
||||
`CommandAction`, or specified Python module.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
name : str
|
||||
The name of the action.
|
||||
pythonModule : str, optional
|
||||
Path to Python file which must contain `Action` class.
|
||||
Default None, which means `CommandAction` is used.
|
||||
initOpts : dict, optional
|
||||
Options for Python Action, used as keyword arguments for
|
||||
initialisation. Default None.
|
||||
|
||||
Raises
|
||||
------
|
||||
ValueError
|
||||
If action name already exists.
|
||||
RuntimeError
|
||||
If external Python module does not have `Action` class
|
||||
or does not implement necessary methods as per `ActionBase`
|
||||
abstract class.
|
||||
"""
|
||||
# Check is action name already exists
|
||||
if name in self._actions:
|
||||
if not reload:
|
||||
raise ValueError("Action %s already exists" % name)
|
||||
# don't create new action if reload supported:
|
||||
action = self._actions[name]
|
||||
if hasattr(action, 'reload'):
|
||||
# don't execute reload right now, reload after all parameters are actualized
|
||||
if hasattr(action, 'clearAllParams'):
|
||||
action.clearAllParams()
|
||||
self._reload_actions[name] = initOpts
|
||||
return
|
||||
## Create new action:
|
||||
if pythonModule is None:
|
||||
action = CommandAction(self._jail, name)
|
||||
else:
|
||||
customActionModule = self._load_python_module(pythonModule)
|
||||
action = customActionModule.Action(self._jail, name, **initOpts)
|
||||
self._actions[name] = action
|
||||
|
||||
def reload(self, begin=True):
|
||||
""" Begin or end of reloading resp. refreshing of all parameters
|
||||
"""
|
||||
if begin:
|
||||
self._reload_actions = dict()
|
||||
else:
|
||||
if hasattr(self, '_reload_actions'):
|
||||
# reload actions after all parameters set via stream:
|
||||
for name, initOpts in self._reload_actions.items():
|
||||
if name in self._actions:
|
||||
self._actions[name].reload(**(initOpts if initOpts else {}))
|
||||
# remove obsolete actions (untouched by reload process):
|
||||
delacts = OrderedDict((name, action) for name, action in self._actions.items()
|
||||
if name not in self._reload_actions)
|
||||
if len(delacts):
|
||||
# unban all tickets using removed actions only:
|
||||
self.__flushBan(db=False, actions=delacts, stop=True)
|
||||
# stop and remove it:
|
||||
self.stopActions(actions=delacts)
|
||||
delattr(self, '_reload_actions')
|
||||
|
||||
def __getitem__(self, name):
|
||||
try:
|
||||
return self._actions[name]
|
||||
except KeyError:
|
||||
raise KeyError("Invalid Action name: %s" % name)
|
||||
|
||||
def __delitem__(self, name):
|
||||
try:
|
||||
del self._actions[name]
|
||||
except KeyError:
|
||||
raise KeyError("Invalid Action name: %s" % name)
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self._actions)
|
||||
|
||||
def __len__(self):
|
||||
return len(self._actions)
|
||||
|
||||
def __eq__(self, other): # Required for Threading
|
||||
return False
|
||||
|
||||
def __hash__(self): # Required for Threading
|
||||
return id(self)
|
||||
|
||||
##
|
||||
# Set the ban time.
|
||||
#
|
||||
# @param value the time
|
||||
|
||||
def setBanTime(self, value):
|
||||
value = MyTime.str2seconds(value)
|
||||
self.banManager.setBanTime(value)
|
||||
logSys.info(" banTime: %s" % value)
|
||||
|
||||
##
|
||||
# Get the ban time.
|
||||
#
|
||||
# @return the time
|
||||
|
||||
def getBanTime(self):
|
||||
return self.banManager.getBanTime()
|
||||
|
||||
def getBanned(self, ids):
|
||||
lst = self.banManager.getBanList()
|
||||
if not ids:
|
||||
return lst
|
||||
if len(ids) == 1:
|
||||
return 1 if ids[0] in lst else 0
|
||||
return [1 if ip in lst else 0 for ip in ids]
|
||||
|
||||
def getBanList(self, withTime=False):
|
||||
"""Returns the list of banned IP addresses.
|
||||
|
||||
Returns
|
||||
-------
|
||||
list
|
||||
The list of banned IP addresses.
|
||||
"""
|
||||
return self.banManager.getBanList(ordered=True, withTime=withTime)
|
||||
|
||||
def addBannedIP(self, ip):
|
||||
"""Ban an IP or list of IPs."""
|
||||
unixTime = MyTime.time()
|
||||
|
||||
if isinstance(ip, list):
|
||||
# Multiple IPs:
|
||||
tickets = (BanTicket(ip, unixTime) for ip in ip)
|
||||
else:
|
||||
# Single IP:
|
||||
tickets = (BanTicket(ip, unixTime),)
|
||||
|
||||
return self.__checkBan(tickets)
|
||||
|
||||
def removeBannedIP(self, ip=None, db=True, ifexists=False):
|
||||
"""Removes banned IP calling actions' unban method
|
||||
|
||||
Remove a banned IP now, rather than waiting for it to expire,
|
||||
even if set to never expire.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ip : list, str, IPAddr or None
|
||||
The IP address (or multiple IPs as list) to unban or all IPs if None
|
||||
|
||||
Raises
|
||||
------
|
||||
ValueError
|
||||
If `ip` is not banned
|
||||
"""
|
||||
# Unban all?
|
||||
if ip is None:
|
||||
return self.__flushBan(db)
|
||||
# Multiple IPs:
|
||||
if isinstance(ip, (list, tuple)):
|
||||
missed = []
|
||||
cnt = 0
|
||||
for i in ip:
|
||||
try:
|
||||
cnt += self.removeBannedIP(i, db, ifexists)
|
||||
except ValueError:
|
||||
if not ifexists:
|
||||
missed.append(i)
|
||||
if missed:
|
||||
raise ValueError("not banned: %r" % missed)
|
||||
return cnt
|
||||
# Single IP:
|
||||
# Always delete ip from database (also if currently not banned)
|
||||
if db and self._jail.database is not None:
|
||||
self._jail.database.delBan(self._jail, ip)
|
||||
# Find the ticket with the IP.
|
||||
ticket = self.banManager.getTicketByID(ip)
|
||||
if ticket is not None:
|
||||
# Unban the IP.
|
||||
self.__unBan(ticket)
|
||||
else:
|
||||
# Multiple IPs by subnet or dns:
|
||||
if not isinstance(ip, IPAddr):
|
||||
ipa = IPAddr(ip)
|
||||
if not ipa.isSingle: # subnet (mask/cidr) or raw (may be dns/hostname):
|
||||
ips = list(filter(ipa.contains, self.banManager.getBanList()))
|
||||
if ips:
|
||||
return self.removeBannedIP(ips, db, ifexists)
|
||||
# not found:
|
||||
msg = "%s is not banned" % ip
|
||||
logSys.log(logging.MSG, msg)
|
||||
if ifexists:
|
||||
return 0
|
||||
raise ValueError(msg)
|
||||
return 1
|
||||
|
||||
|
||||
def stopActions(self, actions=None):
|
||||
"""Stops the actions in reverse sequence (optionally filtered)
|
||||
"""
|
||||
if actions is None:
|
||||
actions = self._actions
|
||||
for name, action in reversed(list(actions.items())):
|
||||
try:
|
||||
action.stop()
|
||||
except Exception as e:
|
||||
logSys.error("Failed to stop jail '%s' action '%s': %s",
|
||||
self._jail.name, name, e,
|
||||
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
del self._actions[name]
|
||||
logSys.debug("%s: action %s terminated", self._jail.name, name)
|
||||
|
||||
|
||||
def run(self):
|
||||
"""Main loop for Threading.
|
||||
|
||||
This function is the main loop of the thread. It checks the jail
|
||||
queue and executes commands when an IP address is banned.
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True when the thread exits nicely.
|
||||
"""
|
||||
cnt = 0
|
||||
for name, action in self._actions.items():
|
||||
try:
|
||||
action.start()
|
||||
except Exception as e:
|
||||
logSys.error("Failed to start jail '%s' action '%s': %s",
|
||||
self._jail.name, name, e,
|
||||
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
while self.active:
|
||||
try:
|
||||
if self.idle:
|
||||
logSys.debug("Actions: enter idle mode")
|
||||
Utils.wait_for(lambda: not self.active or not self.idle,
|
||||
lambda: False, self.sleeptime)
|
||||
logSys.debug("Actions: leave idle mode")
|
||||
continue
|
||||
# wait for ban (stop if gets inactive, pending ban or unban):
|
||||
bancnt = 0
|
||||
wt = min(self.sleeptime, self.banManager._nextUnbanTime - MyTime.time())
|
||||
logSys.log(5, "Actions: wait for pending tickets %s (default %s)", wt, self.sleeptime)
|
||||
if Utils.wait_for(lambda: not self.active or self._jail.hasFailTickets, wt):
|
||||
bancnt = self.__checkBan()
|
||||
cnt += bancnt
|
||||
# unban if nothing is banned not later than banned tickets >= banPrecedence
|
||||
if not bancnt or cnt >= self.banPrecedence:
|
||||
if self.active:
|
||||
# let shrink the ban list faster
|
||||
bancnt *= 2
|
||||
logSys.log(5, "Actions: check-unban %s, bancnt %s, max: %s", bancnt if bancnt and bancnt < self.unbanMaxCount else self.unbanMaxCount, bancnt, self.unbanMaxCount)
|
||||
self.__checkUnBan(bancnt if bancnt and bancnt < self.unbanMaxCount else self.unbanMaxCount)
|
||||
cnt = 0
|
||||
except Exception as e: # pragma: no cover
|
||||
logSys.error("[%s] unhandled error in actions thread: %s",
|
||||
self._jail.name, e,
|
||||
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
|
||||
self.__flushBan(stop=True)
|
||||
self.stopActions()
|
||||
return True
|
||||
|
||||
class ActionInfo(CallingMap):
|
||||
|
||||
CM_REPR_ITEMS = ("fid", "raw-ticket")
|
||||
|
||||
AI_DICT = {
|
||||
"ip": lambda self: self.__ticket.getIP(),
|
||||
"family": lambda self: self['ip'].familyStr,
|
||||
"ip-rev": lambda self: self['ip'].getPTR(''),
|
||||
"ip-host": lambda self: self['ip'].getHost(),
|
||||
"fid": lambda self: self.__ticket.getID(),
|
||||
"failures": lambda self: self.__ticket.getAttempt(),
|
||||
"time": lambda self: self.__ticket.getTime(),
|
||||
"bantime": lambda self: self._getBanTime(),
|
||||
"bancount": lambda self: self.__ticket.getBanCount(),
|
||||
"matches": lambda self: "\n".join(self.__ticket.getMatches()),
|
||||
# to bypass actions, that should not be executed for restored tickets
|
||||
"restored": lambda self: (1 if self.__ticket.restored else 0),
|
||||
# extra-interpolation - all match-tags (captured from the filter):
|
||||
"F-*": lambda self, tag=None: self.__ticket.getData(tag),
|
||||
# merged info:
|
||||
"ipmatches": lambda self: "\n".join(self._mi4ip(True).getMatches()),
|
||||
"ipjailmatches": lambda self: "\n".join(self._mi4ip().getMatches()),
|
||||
"ipfailures": lambda self: self._mi4ip(True).getAttempt(),
|
||||
"ipjailfailures": lambda self: self._mi4ip().getAttempt(),
|
||||
# raw ticket info:
|
||||
"raw-ticket": lambda self: repr(self.__ticket),
|
||||
# jail info:
|
||||
"jail.banned": lambda self: self.__jail.actions.banManager.size(),
|
||||
"jail.banned_total": lambda self: self.__jail.actions.banManager.getBanTotal(),
|
||||
"jail.found": lambda self: self.__jail.filter.failManager.size(),
|
||||
"jail.found_total": lambda self: self.__jail.filter.failManager.getFailTotal()
|
||||
}
|
||||
|
||||
__slots__ = CallingMap.__slots__ + ('__ticket', '__jail', '__mi4ip')
|
||||
|
||||
def __init__(self, ticket, jail=None, immutable=True, data=AI_DICT):
|
||||
self.__ticket = ticket
|
||||
self.__jail = jail
|
||||
self.storage = dict()
|
||||
self.immutable = immutable
|
||||
self.data = data
|
||||
|
||||
def copy(self): # pragma: no cover
|
||||
return self.__class__(self.__ticket, self.__jail, self.immutable, self.data.copy())
|
||||
|
||||
def _getBanTime(self):
|
||||
btime = self.__ticket.getBanTime()
|
||||
if btime is None: btime = self.__jail.actions.getBanTime()
|
||||
return int(btime)
|
||||
|
||||
def _mi4ip(self, overalljails=False):
|
||||
"""Gets bans merged once, a helper for lambda(s), prevents stop of executing action by any exception inside.
|
||||
|
||||
This function never returns None for ainfo lambdas - always a ticket (merged or single one)
|
||||
and prevents any errors through merging (to guarantee ban actions will be executed).
|
||||
[TODO] move merging to observer - here we could wait for merge and read already merged info from a database
|
||||
|
||||
Parameters
|
||||
----------
|
||||
overalljails : bool
|
||||
switch to get a merged bans :
|
||||
False - (default) bans merged for current jail only
|
||||
True - bans merged for all jails of current ip address
|
||||
|
||||
Returns
|
||||
-------
|
||||
BanTicket
|
||||
merged or self ticket only
|
||||
"""
|
||||
if not hasattr(self, '__mi4ip'):
|
||||
self.__mi4ip = {}
|
||||
mi = self.__mi4ip
|
||||
idx = 'all' if overalljails else 'jail'
|
||||
if idx in mi:
|
||||
return mi[idx] if mi[idx] is not None else self.__ticket
|
||||
try:
|
||||
jail = self.__jail
|
||||
ip = self['ip']
|
||||
mi[idx] = None
|
||||
if not jail.database: # pragma: no cover
|
||||
return self.__ticket
|
||||
if overalljails:
|
||||
mi[idx] = jail.database.getBansMerged(ip=ip)
|
||||
else:
|
||||
mi[idx] = jail.database.getBansMerged(ip=ip, jail=jail)
|
||||
except Exception as e:
|
||||
logSys.error(
|
||||
"Failed to get %s bans merged, jail '%s': %s",
|
||||
idx, jail.name, e,
|
||||
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
return mi[idx] if mi[idx] is not None else self.__ticket
|
||||
|
||||
|
||||
def _getActionInfo(self, ticket):
|
||||
if not ticket:
|
||||
ticket = BanTicket("", MyTime.time())
|
||||
aInfo = Actions.ActionInfo(ticket, self._jail)
|
||||
return aInfo
|
||||
|
||||
def __getFailTickets(self, count=100):
|
||||
"""Generator to get maximal count failure tickets from fail-manager."""
|
||||
cnt = 0
|
||||
while cnt < count:
|
||||
ticket = self._jail.getFailTicket()
|
||||
if not ticket:
|
||||
break
|
||||
yield ticket
|
||||
cnt += 1
|
||||
|
||||
def __checkBan(self, tickets=None):
|
||||
"""Check for IP address to ban.
|
||||
|
||||
If tickets are not specified look in the jail queue for FailTicket. If a ticket is available,
|
||||
it executes the "ban" command and adds a ticket to the BanManager.
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if an IP address get banned.
|
||||
"""
|
||||
cnt = 0
|
||||
if not tickets:
|
||||
tickets = self.__getFailTickets(self.banPrecedence)
|
||||
rebanacts = None
|
||||
for ticket in tickets:
|
||||
|
||||
bTicket = BanTicket.wrap(ticket)
|
||||
btime = ticket.getBanTime(self.banManager.getBanTime())
|
||||
ip = bTicket.getID()
|
||||
aInfo = self._getActionInfo(bTicket)
|
||||
reason = {}
|
||||
if self.banManager.addBanTicket(bTicket, reason=reason):
|
||||
cnt += 1
|
||||
# report ticket to observer, to check time should be increased and hereafter observer writes ban to database (asynchronous)
|
||||
if Observers.Main is not None and not bTicket.restored:
|
||||
Observers.Main.add('banFound', bTicket, self._jail, btime)
|
||||
logSys.notice("[%s] %sBan %s", self._jail.name, ('' if not bTicket.restored else 'Restore '), ip)
|
||||
# do actions :
|
||||
for name, action in self._actions.items():
|
||||
try:
|
||||
if bTicket.restored and getattr(action, 'norestored', False):
|
||||
continue
|
||||
if not aInfo.immutable: aInfo.reset()
|
||||
action.ban(aInfo)
|
||||
except Exception as e:
|
||||
logSys.error(
|
||||
"Failed to execute ban jail '%s' action '%s' "
|
||||
"info '%r': %s",
|
||||
self._jail.name, name, aInfo, e,
|
||||
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
# after all actions are processed set banned flag:
|
||||
bTicket.banned = True
|
||||
if self.banEpoch: # be sure tickets always have the same ban epoch (default 0):
|
||||
bTicket.banEpoch = self.banEpoch
|
||||
else:
|
||||
if reason.get('expired', 0):
|
||||
logSys.info('[%s] Ignore %s, expired bantime', self._jail.name, ip)
|
||||
continue
|
||||
bTicket = reason.get('ticket', bTicket)
|
||||
# if already banned (otherwise still process some action)
|
||||
if bTicket.banned:
|
||||
# compare time of failure occurrence with time ticket was really banned:
|
||||
diftm = ticket.getTime() - bTicket.getTime()
|
||||
# log already banned with following level:
|
||||
# DEBUG - before 3 seconds - certain interval for it, because of possible latency by recognizing in backends, etc.
|
||||
# NOTICE - before 60 seconds - may still occur if action is slow, or very high load in backend,
|
||||
# WARNING - after 60 seconds - very long time, something may be wrong
|
||||
ll = logging.DEBUG if diftm < 3 \
|
||||
else logging.NOTICE if diftm < 60 \
|
||||
else logging.WARNING
|
||||
logSys.log(ll, "[%s] %s already banned", self._jail.name, ip)
|
||||
# if long time after ban - do consistency check (something is wrong here):
|
||||
if bTicket.banEpoch == self.banEpoch and diftm > 3:
|
||||
# avoid too often checks:
|
||||
if not rebanacts and MyTime.time() > self.__lastConsistencyCheckTM + 3:
|
||||
self.__lastConsistencyCheckTM = MyTime.time()
|
||||
for action in self._actions.values():
|
||||
if hasattr(action, 'consistencyCheck'):
|
||||
action.consistencyCheck()
|
||||
# check epoch in order to reban it:
|
||||
if bTicket.banEpoch < self.banEpoch:
|
||||
if not rebanacts: rebanacts = dict(
|
||||
(name, action) for name, action in self._actions.items()
|
||||
if action.banEpoch > bTicket.banEpoch)
|
||||
cnt += self.__reBan(bTicket, actions=rebanacts)
|
||||
else: # pragma: no cover - unexpected: ticket is not banned for some reasons - reban using all actions:
|
||||
cnt += self.__reBan(bTicket)
|
||||
# add ban to database moved to observer (should previously check not already banned
|
||||
# and increase ticket time if "bantime.increment" set)
|
||||
if cnt:
|
||||
logSys.debug("Banned %s / %s, %s ticket(s) in %r", cnt,
|
||||
self.banManager.getBanTotal(), self.banManager.size(), self._jail.name)
|
||||
return cnt
|
||||
|
||||
def __reBan(self, ticket, actions=None, log=True):
|
||||
"""Repeat bans for the ticket.
|
||||
|
||||
Executes the actions in order to reban the host given in the
|
||||
ticket.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ticket : Ticket
|
||||
Ticket to reban
|
||||
"""
|
||||
actions = actions or self._actions
|
||||
ip = ticket.getID()
|
||||
aInfo = self._getActionInfo(ticket)
|
||||
if log:
|
||||
logSys.notice("[%s] Reban %s%s", self._jail.name, ip, (', action %r' % list(actions.keys())[0] if len(actions) == 1 else ''))
|
||||
for name, action in actions.items():
|
||||
try:
|
||||
logSys.debug("[%s] action %r: reban %s", self._jail.name, name, ip)
|
||||
if not aInfo.immutable: aInfo.reset()
|
||||
action.reban(aInfo)
|
||||
except Exception as e:
|
||||
logSys.error(
|
||||
"Failed to execute reban jail '%s' action '%s' "
|
||||
"info '%r': %s",
|
||||
self._jail.name, name, aInfo, e,
|
||||
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
return 0
|
||||
# after all actions are processed set banned flag:
|
||||
ticket.banned = True
|
||||
if self.banEpoch: # be sure tickets always have the same ban epoch (default 0):
|
||||
ticket.banEpoch = self.banEpoch
|
||||
return 1
|
||||
|
||||
def _prolongBan(self, ticket):
|
||||
# prevent to prolong ticket that was removed in-between,
|
||||
# if it in ban list - ban time already prolonged (and it stays there):
|
||||
if not self.banManager._inBanList(ticket): return
|
||||
# do actions :
|
||||
aInfo = None
|
||||
for name, action in self._actions.items():
|
||||
try:
|
||||
if ticket.restored and getattr(action, 'norestored', False):
|
||||
continue
|
||||
if not action._prolongable:
|
||||
continue
|
||||
if aInfo is None:
|
||||
aInfo = self._getActionInfo(ticket)
|
||||
if not aInfo.immutable: aInfo.reset()
|
||||
action.prolong(aInfo)
|
||||
except Exception as e:
|
||||
logSys.error(
|
||||
"Failed to execute ban jail '%s' action '%s' "
|
||||
"info '%r': %s",
|
||||
self._jail.name, name, aInfo, e,
|
||||
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
|
||||
def __checkUnBan(self, maxCount=None):
|
||||
"""Check for IP address to unban.
|
||||
|
||||
Unban IP addresses which are outdated.
|
||||
"""
|
||||
lst = self.banManager.unBanList(MyTime.time(), maxCount)
|
||||
for ticket in lst:
|
||||
self.__unBan(ticket)
|
||||
cnt = len(lst)
|
||||
if cnt:
|
||||
logSys.debug("Unbanned %s, %s ticket(s) in %r",
|
||||
cnt, self.banManager.size(), self._jail.name)
|
||||
return cnt
|
||||
|
||||
def __flushBan(self, db=False, actions=None, stop=False):
|
||||
"""Flush the ban list.
|
||||
|
||||
Unban all IP address which are still in the banning list.
|
||||
|
||||
If actions specified, don't flush list - just execute unban for
|
||||
given actions (reload, obsolete resp. removed actions).
|
||||
"""
|
||||
log = "Unban" if not stop else "Repeal Ban"
|
||||
if actions is None:
|
||||
logSys.debug(" Flush ban list")
|
||||
lst = self.banManager.flushBanList()
|
||||
else:
|
||||
log = None # don't log "[jail] Unban ..." if removing actions only.
|
||||
lst = iter(self.banManager)
|
||||
cnt = 0
|
||||
# first we'll execute flush for actions supporting this operation:
|
||||
unbactions = {}
|
||||
for name, action in (actions if actions is not None else self._actions).items():
|
||||
try:
|
||||
if hasattr(action, 'flush') and (not isinstance(action, CommandAction) or action.actionflush):
|
||||
logSys.notice("[%s] Flush ticket(s) with %s", self._jail.name, name)
|
||||
if action.flush():
|
||||
continue
|
||||
except Exception as e:
|
||||
logSys.error("Failed to flush bans in jail '%s' action '%s': %s",
|
||||
self._jail.name, name, e,
|
||||
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
logSys.info("No flush occurred, do consistency check")
|
||||
if hasattr(action, 'consistencyCheck'):
|
||||
def _beforeRepair():
|
||||
if stop and not getattr(action, 'actionrepair_on_unban', None): # don't need repair on stop
|
||||
logSys.error("Invariant check failed. Flush is impossible.")
|
||||
return False
|
||||
return True
|
||||
action.consistencyCheck(_beforeRepair)
|
||||
continue
|
||||
# fallback to single unbans:
|
||||
logSys.debug(" Unban tickets each individually")
|
||||
unbactions[name] = action
|
||||
actions = unbactions
|
||||
# flush the database also:
|
||||
if db and self._jail.database is not None:
|
||||
logSys.debug(" Flush jail in database")
|
||||
self._jail.database.delBan(self._jail)
|
||||
# unban each ticket with non-flusheable actions:
|
||||
for ticket in lst:
|
||||
# unban ip:
|
||||
self.__unBan(ticket, actions=actions, log=log)
|
||||
cnt += 1
|
||||
logSys.debug(" Unbanned %s, %s ticket(s) in %r",
|
||||
cnt, self.banManager.size(), self._jail.name)
|
||||
return cnt
|
||||
|
||||
def __unBan(self, ticket, actions=None, log="Unban"):
|
||||
"""Unbans host corresponding to the ticket.
|
||||
|
||||
Executes the actions in order to unban the host given in the
|
||||
ticket.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ticket : FailTicket
|
||||
Ticket of failures of which to unban
|
||||
"""
|
||||
if actions is None:
|
||||
unbactions = self._actions
|
||||
else:
|
||||
unbactions = actions
|
||||
ip = ticket.getID()
|
||||
aInfo = self._getActionInfo(ticket)
|
||||
if log:
|
||||
logSys.notice("[%s] %s %s", self._jail.name, log, ip)
|
||||
for name, action in unbactions.items():
|
||||
try:
|
||||
logSys.debug("[%s] action %r: unban %s", self._jail.name, name, ip)
|
||||
if not aInfo.immutable: aInfo.reset()
|
||||
action.unban(aInfo)
|
||||
except Exception as e:
|
||||
logSys.error(
|
||||
"Failed to execute unban jail '%s' action '%s' "
|
||||
"info '%r': %s",
|
||||
self._jail.name, name, aInfo, e,
|
||||
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
|
||||
def status(self, flavor="basic"):
|
||||
"""Status of current and total ban counts and current banned IP list.
|
||||
"""
|
||||
# TODO: Allow this list to be printed as 'status' output
|
||||
supported_flavors = ["short", "basic", "stats", "cymru"]
|
||||
if flavor is None or flavor not in supported_flavors:
|
||||
logSys.warning("Unsupported extended jail status flavor %r. Supported: %s" % (flavor, supported_flavors))
|
||||
if flavor == "stats":
|
||||
return (self.banManager.size(), self.banManager.getBanTotal())
|
||||
# Always print this information (basic)
|
||||
if flavor != "short":
|
||||
banned = self.banManager.getBanList()
|
||||
cnt = len(banned)
|
||||
else:
|
||||
cnt = self.banManager.size()
|
||||
ret = [("Currently banned", cnt),
|
||||
("Total banned", self.banManager.getBanTotal())]
|
||||
if flavor != "short":
|
||||
ret += [("Banned IP list", banned)]
|
||||
if flavor == "cymru":
|
||||
cymru_info = self.banManager.getBanListExtendedCymruInfo()
|
||||
ret += \
|
||||
[("Banned ASN list", self.banManager.geBanListExtendedASN(cymru_info)),
|
||||
("Banned Country list", self.banManager.geBanListExtendedCountry(cymru_info)),
|
||||
("Banned RIR list", self.banManager.geBanListExtendedRIR(cymru_info))]
|
||||
return ret
|
||||
348
fail2ban-master/fail2ban/server/asyncserver.py
Normal file
348
fail2ban-master/fail2ban/server/asyncserver.py
Normal file
@@ -0,0 +1,348 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Author: Cyril Jaquier
|
||||
#
|
||||
|
||||
__author__ = "Cyril Jaquier"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
|
||||
__license__ = "GPL"
|
||||
|
||||
from pickle import dumps, loads, HIGHEST_PROTOCOL
|
||||
import errno
|
||||
import fcntl
|
||||
import os
|
||||
import socket
|
||||
import sys
|
||||
import threading
|
||||
import traceback
|
||||
|
||||
from .utils import Utils
|
||||
from ..protocol import CSPROTO
|
||||
from ..helpers import logging, getLogger, formatExceptionInfo
|
||||
|
||||
# load asyncore and asynchat after helper to ensure we've a path to compat folder:
|
||||
import asynchat
|
||||
if asynchat.asyncore:
|
||||
asyncore = asynchat.asyncore
|
||||
else: # pragma: no cover - normally unreachable
|
||||
import asyncore
|
||||
|
||||
# Gets the instance of the logger.
|
||||
logSys = getLogger(__name__)
|
||||
|
||||
|
||||
##
|
||||
# Request handler class.
|
||||
#
|
||||
# This class extends asynchat in order to provide a request handler for
|
||||
# incoming query.
|
||||
class RequestHandler(asynchat.async_chat):
|
||||
|
||||
def __init__(self, conn, transmitter):
|
||||
asynchat.async_chat.__init__(self, conn)
|
||||
self.__conn = conn
|
||||
self.__transmitter = transmitter
|
||||
self.__buffer = []
|
||||
# Sets the terminator.
|
||||
self.set_terminator(CSPROTO.END)
|
||||
|
||||
def __close(self):
|
||||
if self.__conn:
|
||||
conn = self.__conn
|
||||
self.__conn = None
|
||||
try:
|
||||
conn.shutdown(socket.SHUT_RDWR)
|
||||
conn.close()
|
||||
except socket.error: # pragma: no cover - normally unreachable
|
||||
pass
|
||||
|
||||
def handle_close(self):
|
||||
self.__close()
|
||||
asynchat.async_chat.handle_close(self)
|
||||
|
||||
def collect_incoming_data(self, data):
|
||||
#logSys.debug("Received raw data: " + str(data))
|
||||
self.__buffer.append(data)
|
||||
|
||||
# exception identifies deserialization errors (exception by load in pickle):
|
||||
class LoadError(Exception):
|
||||
pass
|
||||
|
||||
##
|
||||
# Handles a new request.
|
||||
#
|
||||
# This method is called once we have a complete request.
|
||||
|
||||
def found_terminator(self):
|
||||
try:
|
||||
# Pop whole buffer
|
||||
message = self.__buffer
|
||||
self.__buffer = []
|
||||
# Joins the buffer items.
|
||||
message = CSPROTO.EMPTY.join(message)
|
||||
# Closes the channel if close was received
|
||||
if message == CSPROTO.CLOSE:
|
||||
self.close_when_done()
|
||||
return
|
||||
# Deserialize
|
||||
try:
|
||||
message = loads(message)
|
||||
except Exception as e:
|
||||
logSys.error('PROTO-error: load message failed: %s', e,
|
||||
exc_info=logSys.getEffectiveLevel()<logging.DEBUG)
|
||||
raise RequestHandler.LoadError(e)
|
||||
# Gives the message to the transmitter.
|
||||
if self.__transmitter:
|
||||
message = self.__transmitter.proceed(message)
|
||||
else:
|
||||
message = ['SHUTDOWN']
|
||||
# Serializes the response.
|
||||
message = dumps(message, HIGHEST_PROTOCOL)
|
||||
# Sends the response to the client.
|
||||
self.push(message + CSPROTO.END)
|
||||
except Exception as e:
|
||||
if not isinstance(e, RequestHandler.LoadError): # pragma: no cover - normally unreachable
|
||||
logSys.error("Caught unhandled exception: %r", e,
|
||||
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
# Sends the response to the client.
|
||||
message = dumps("ERROR: %s" % e, HIGHEST_PROTOCOL)
|
||||
self.push(message + CSPROTO.END)
|
||||
|
||||
##
|
||||
# Handles an communication errors in request.
|
||||
#
|
||||
def handle_error(self):
|
||||
try:
|
||||
e1, e2 = formatExceptionInfo()
|
||||
logSys.error("Unexpected communication error: %s" % str(e2))
|
||||
logSys.error(traceback.format_exc().splitlines())
|
||||
# Sends the response to the client.
|
||||
message = dumps("ERROR: %s" % e2, HIGHEST_PROTOCOL)
|
||||
self.push(message + CSPROTO.END)
|
||||
except Exception as e: # pragma: no cover - normally unreachable
|
||||
pass
|
||||
self.close_when_done()
|
||||
|
||||
|
||||
def loop(active, timeout=None, use_poll=False, err_count=None):
|
||||
"""Custom event loop implementation
|
||||
|
||||
Uses poll instead of loop to respect `active` flag,
|
||||
to avoid loop timeout mistake: different in poll and poll2 (sec vs ms),
|
||||
and to prevent sporadic errors like EBADF 'Bad file descriptor' etc. (see gh-161)
|
||||
"""
|
||||
if not err_count: err_count={}
|
||||
err_count['listen'] = 0
|
||||
if timeout is None:
|
||||
timeout = Utils.DEFAULT_SLEEP_TIME
|
||||
poll = asyncore.poll
|
||||
if callable(use_poll):
|
||||
poll = use_poll
|
||||
elif use_poll and asyncore.poll2 and hasattr(asyncore.select, 'poll'): # pragma: no cover
|
||||
logSys.debug('Server listener (select) uses poll')
|
||||
# poll2 expected a timeout in milliseconds (but poll and loop in seconds):
|
||||
timeout = float(timeout) / 1000
|
||||
poll = asyncore.poll2
|
||||
# Poll as long as active:
|
||||
while active():
|
||||
try:
|
||||
poll(timeout)
|
||||
if err_count['listen']:
|
||||
err_count['listen'] -= 1
|
||||
except Exception as e:
|
||||
if not active():
|
||||
break
|
||||
err_count['listen'] += 1
|
||||
if err_count['listen'] < 20:
|
||||
# errno.ENOTCONN - 'Socket is not connected'
|
||||
# errno.EBADF - 'Bad file descriptor'
|
||||
if e.args[0] in (errno.ENOTCONN, errno.EBADF): # pragma: no cover (too sporadic)
|
||||
logSys.info('Server connection was closed: %s', str(e))
|
||||
else:
|
||||
logSys.error('Server connection was closed: %s', str(e))
|
||||
elif err_count['listen'] == 20:
|
||||
logSys.exception(e)
|
||||
logSys.error('Too many errors - stop logging connection errors')
|
||||
elif err_count['listen'] > 100: # pragma: no cover - normally unreachable
|
||||
if (
|
||||
e.args[0] == errno.EMFILE # [Errno 24] Too many open files
|
||||
or sum(err_count.values()) > 1000
|
||||
):
|
||||
logSys.critical("Too many errors - critical count reached %r", err_count)
|
||||
break
|
||||
|
||||
|
||||
##
|
||||
# Asynchronous server class.
|
||||
#
|
||||
# This class extends asyncore and dispatches connection requests to
|
||||
# RequestHandler.
|
||||
|
||||
class AsyncServer(asyncore.dispatcher):
|
||||
|
||||
def __init__(self, transmitter):
|
||||
asyncore.dispatcher.__init__(self)
|
||||
self.__transmitter = transmitter
|
||||
self.__sock = "/var/run/fail2ban/fail2ban.sock"
|
||||
self.__init = False
|
||||
self.__active = False
|
||||
self.__errCount = {'accept': 0, 'listen': 0}
|
||||
self.onstart = None
|
||||
|
||||
##
|
||||
# Returns False as we only read the socket first.
|
||||
|
||||
def writable(self):
|
||||
return False
|
||||
|
||||
def handle_accept(self):
|
||||
try:
|
||||
conn, addr = self.accept()
|
||||
except Exception as e: # pragma: no cover
|
||||
self.__errCount['accept'] += 1
|
||||
if self.__errCount['accept'] < 20:
|
||||
logSys.warning("Accept socket error: %s", e,
|
||||
exc_info=(self.__errCount['accept'] <= 1))
|
||||
elif self.__errCount['accept'] == 20:
|
||||
logSys.error("Too many acceptor errors - stop logging errors")
|
||||
elif self.__errCount['accept'] > 100:
|
||||
if (
|
||||
(isinstance(e, socket.error) and e.args[0] == errno.EMFILE) # [Errno 24] Too many open files
|
||||
or sum(self.__errCount.values()) > 1000
|
||||
):
|
||||
logSys.critical("Too many errors - critical count reached %r", self.__errCount)
|
||||
self.stop()
|
||||
return
|
||||
if self.__errCount['accept']:
|
||||
self.__errCount['accept'] -= 1;
|
||||
AsyncServer.__markCloseOnExec(conn)
|
||||
# Creates an instance of the handler class to handle the
|
||||
# request/response on the incoming connection.
|
||||
RequestHandler(conn, self.__transmitter)
|
||||
|
||||
##
|
||||
# Starts the communication server.
|
||||
#
|
||||
# @param sock: socket file.
|
||||
# @param force: remove the socket file if exists.
|
||||
|
||||
def start(self, sock, force, timeout=None, use_poll=False):
|
||||
self.__worker = threading.current_thread()
|
||||
self.__sock = sock
|
||||
# Remove socket
|
||||
if os.path.exists(sock):
|
||||
logSys.error("Fail2ban seems to be already running")
|
||||
if force:
|
||||
logSys.warning("Forcing execution of the server")
|
||||
self._remove_sock()
|
||||
else:
|
||||
raise AsyncServerException("Server already running")
|
||||
# Creates the socket.
|
||||
self.create_socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
self.set_reuse_addr()
|
||||
try:
|
||||
self.bind(sock)
|
||||
except Exception: # pragma: no cover
|
||||
raise AsyncServerException("Unable to bind socket %s" % self.__sock)
|
||||
AsyncServer.__markCloseOnExec(self.socket)
|
||||
self.listen(1)
|
||||
# Sets the init flag.
|
||||
self.__init = self.__loop = self.__active = True
|
||||
# Execute on start event (server ready):
|
||||
if self.onstart:
|
||||
self.onstart()
|
||||
# Event loop as long as active:
|
||||
loop(lambda: self.__loop, timeout=timeout, use_poll=use_poll, err_count=self.__errCount)
|
||||
self.__active = False
|
||||
# Cleanup all
|
||||
self.stop()
|
||||
|
||||
def close(self):
|
||||
stopflg = False
|
||||
if self.__active:
|
||||
self.__loop = False
|
||||
# shutdown socket here:
|
||||
if self.socket:
|
||||
try:
|
||||
self.socket.shutdown(socket.SHUT_RDWR)
|
||||
except socket.error: # pragma: no cover - normally unreachable
|
||||
pass
|
||||
# close connection:
|
||||
asyncore.dispatcher.close(self)
|
||||
# If not the loop thread (stops self in handler), wait (a little bit)
|
||||
# for the server leaves loop, before remove socket
|
||||
if threading.current_thread() != self.__worker:
|
||||
Utils.wait_for(lambda: not self.__active, 1)
|
||||
stopflg = True
|
||||
# Remove socket (file) only if it was created:
|
||||
if self.__init and os.path.exists(self.__sock):
|
||||
self._remove_sock()
|
||||
logSys.debug("Removed socket file " + self.__sock)
|
||||
if stopflg:
|
||||
logSys.debug("Socket shutdown")
|
||||
self.__active = False
|
||||
|
||||
##
|
||||
# Stops the communication server.
|
||||
|
||||
def stop_communication(self):
|
||||
if self.__transmitter:
|
||||
logSys.debug("Stop communication, shutdown")
|
||||
self.__transmitter = None
|
||||
|
||||
##
|
||||
# Stops the server.
|
||||
|
||||
def stop(self):
|
||||
self.stop_communication()
|
||||
self.close()
|
||||
|
||||
# better remains a method (not a property) since used as a callable for wait_for
|
||||
def isActive(self):
|
||||
return self.__active
|
||||
|
||||
##
|
||||
# Safe remove (in multithreaded mode):
|
||||
|
||||
def _remove_sock(self):
|
||||
try:
|
||||
os.remove(self.__sock)
|
||||
except OSError as e: # pragma: no cover
|
||||
if e.errno != errno.ENOENT:
|
||||
raise
|
||||
|
||||
##
|
||||
# Marks socket as close-on-exec to avoid leaking file descriptors when
|
||||
# running actions involving command execution.
|
||||
|
||||
# @param sock: socket file.
|
||||
|
||||
@staticmethod
|
||||
def __markCloseOnExec(sock):
|
||||
fd = sock.fileno()
|
||||
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
|
||||
fcntl.fcntl(fd, fcntl.F_SETFD, flags|fcntl.FD_CLOEXEC)
|
||||
|
||||
|
||||
##
|
||||
# AsyncServerException is used to wrap communication exceptions.
|
||||
|
||||
class AsyncServerException(Exception):
|
||||
pass
|
||||
386
fail2ban-master/fail2ban/server/banmanager.py
Normal file
386
fail2ban-master/fail2ban/server/banmanager.py
Normal file
@@ -0,0 +1,386 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Author: Cyril Jaquier
|
||||
#
|
||||
|
||||
__author__ = "Cyril Jaquier"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
|
||||
__license__ = "GPL"
|
||||
|
||||
from threading import Lock
|
||||
|
||||
from .ticket import BanTicket
|
||||
from .mytime import MyTime
|
||||
from ..helpers import getLogger, logging
|
||||
|
||||
# Gets the instance of the logger.
|
||||
logSys = getLogger(__name__)
|
||||
|
||||
|
||||
##
|
||||
# Banning Manager.
|
||||
#
|
||||
# Manage the banned IP addresses. Convert FailTicket to BanTicket.
|
||||
# This class is mainly used by the Action class.
|
||||
|
||||
class BanManager:
|
||||
|
||||
##
|
||||
# Constructor.
|
||||
#
|
||||
# Initialize members with default values.
|
||||
|
||||
def __init__(self):
|
||||
## Mutex used to protect the ban list.
|
||||
self.__lock = Lock()
|
||||
## The ban list.
|
||||
self.__banList = dict()
|
||||
## The amount of time an IP address gets banned.
|
||||
self.__banTime = 600
|
||||
## Total number of banned IP address
|
||||
self.__banTotal = 0
|
||||
## The time for next unban process (for performance and load reasons):
|
||||
self._nextUnbanTime = BanTicket.MAX_TIME
|
||||
|
||||
##
|
||||
# Set the ban time.
|
||||
#
|
||||
# Set the amount of time an IP address get banned.
|
||||
# @param value the time
|
||||
|
||||
def setBanTime(self, value):
|
||||
self.__banTime = int(value)
|
||||
|
||||
##
|
||||
# Get the ban time.
|
||||
#
|
||||
# Get the amount of time an IP address get banned.
|
||||
# @return the time
|
||||
|
||||
def getBanTime(self):
|
||||
return self.__banTime
|
||||
|
||||
##
|
||||
# Set the total number of banned address.
|
||||
#
|
||||
# @param value total number
|
||||
|
||||
def setBanTotal(self, value):
|
||||
self.__banTotal = value
|
||||
|
||||
##
|
||||
# Get the total number of banned address.
|
||||
#
|
||||
# @return the total number
|
||||
|
||||
def getBanTotal(self):
|
||||
return self.__banTotal
|
||||
|
||||
##
|
||||
# Returns a copy of the IP list.
|
||||
#
|
||||
# @return IP list
|
||||
|
||||
def getBanList(self, ordered=False, withTime=False):
|
||||
if not ordered:
|
||||
return list(self.__banList.keys())
|
||||
with self.__lock:
|
||||
lst = []
|
||||
for ticket in self.__banList.values():
|
||||
eob = ticket.getEndOfBanTime(self.__banTime)
|
||||
lst.append((ticket,eob))
|
||||
lst.sort(key=lambda t: t[1])
|
||||
t2s = MyTime.time2str
|
||||
if withTime:
|
||||
return ['%s \t%s + %d = %s' % (
|
||||
t[0].getID(),
|
||||
t2s(t[0].getTime()), t[0].getBanTime(self.__banTime), t2s(t[1])
|
||||
) for t in lst]
|
||||
return [t[0].getID() for t in lst]
|
||||
|
||||
##
|
||||
# Returns a iterator to ban list (used in reload, so idle).
|
||||
#
|
||||
# @return ban list iterator
|
||||
|
||||
def __iter__(self):
|
||||
# ensure iterator is safe - traverse over the list in snapshot created within lock (GIL):
|
||||
return iter(list(self.__banList.values()))
|
||||
|
||||
##
|
||||
# Returns normalized value
|
||||
#
|
||||
# @return value or "unknown" if value is None or empty string
|
||||
|
||||
@staticmethod
|
||||
def handleBlankResult(value):
|
||||
if value is None or len(value) == 0:
|
||||
return "unknown"
|
||||
else:
|
||||
return value
|
||||
|
||||
##
|
||||
# Returns Cymru DNS query information
|
||||
#
|
||||
# @return {"asn": [], "country": [], "rir": []} dict for self.__banList IPs
|
||||
|
||||
def getBanListExtendedCymruInfo(self, timeout=10):
|
||||
return_dict = {"asn": [], "country": [], "rir": []}
|
||||
if not hasattr(self, 'dnsResolver'):
|
||||
global dns
|
||||
try:
|
||||
import dns.exception
|
||||
import dns.resolver
|
||||
resolver = dns.resolver.Resolver()
|
||||
resolver.lifetime = timeout
|
||||
resolver.timeout = timeout / 2
|
||||
self.dnsResolver = resolver
|
||||
except ImportError as e: # pragma: no cover
|
||||
logSys.error("dnspython package is required but could not be imported")
|
||||
return_dict["error"] = repr(e)
|
||||
return_dict["asn"].append("error")
|
||||
return_dict["country"].append("error")
|
||||
return_dict["rir"].append("error")
|
||||
return return_dict
|
||||
# get ips in lock:
|
||||
with self.__lock:
|
||||
banIPs = [banData.getIP() for banData in list(self.__banList.values())]
|
||||
# get cymru info:
|
||||
try:
|
||||
for ip in banIPs:
|
||||
# Reference: https://www.team-cymru.com/IP-ASN-mapping.html#dns
|
||||
question = ip.getPTR(
|
||||
"origin.asn.cymru.com" if ip.isIPv4
|
||||
else "origin6.asn.cymru.com"
|
||||
)
|
||||
try:
|
||||
resolver = self.dnsResolver
|
||||
answers = resolver.query(question, "TXT")
|
||||
if not answers:
|
||||
raise ValueError("No data retrieved")
|
||||
asns = set()
|
||||
countries = set()
|
||||
rirs = set()
|
||||
for rdata in answers:
|
||||
asn, net, country, rir, changed =\
|
||||
[answer.strip("'\" ") for answer in rdata.to_text().split("|")]
|
||||
asn = self.handleBlankResult(asn)
|
||||
country = self.handleBlankResult(country)
|
||||
rir = self.handleBlankResult(rir)
|
||||
asns.add(self.handleBlankResult(asn))
|
||||
countries.add(self.handleBlankResult(country))
|
||||
rirs.add(self.handleBlankResult(rir))
|
||||
return_dict["asn"].append(', '.join(sorted(asns)))
|
||||
return_dict["country"].append(', '.join(sorted(countries)))
|
||||
return_dict["rir"].append(', '.join(sorted(rirs)))
|
||||
except dns.resolver.NXDOMAIN:
|
||||
return_dict["asn"].append("nxdomain")
|
||||
return_dict["country"].append("nxdomain")
|
||||
return_dict["rir"].append("nxdomain")
|
||||
except (dns.exception.DNSException, dns.resolver.NoNameservers, dns.exception.Timeout) as dnse: # pragma: no cover
|
||||
logSys.error("DNSException %r querying Cymru for %s TXT", dnse, question)
|
||||
if logSys.level <= logging.DEBUG:
|
||||
logSys.exception(dnse)
|
||||
return_dict["error"] = repr(dnse)
|
||||
break
|
||||
except Exception as e: # pragma: no cover
|
||||
logSys.error("Unhandled Exception %r querying Cymru for %s TXT", e, question)
|
||||
if logSys.level <= logging.DEBUG:
|
||||
logSys.exception(e)
|
||||
return_dict["error"] = repr(e)
|
||||
break
|
||||
except Exception as e: # pragma: no cover
|
||||
logSys.error("Failure looking up extended Cymru info: %s", e)
|
||||
if logSys.level <= logging.DEBUG:
|
||||
logSys.exception(e)
|
||||
return_dict["error"] = repr(e)
|
||||
return return_dict
|
||||
|
||||
##
|
||||
# Returns list of Banned ASNs from Cymru info
|
||||
#
|
||||
# Use getBanListExtendedCymruInfo() to provide cymru_info
|
||||
#
|
||||
# @return list of Banned ASNs
|
||||
|
||||
def geBanListExtendedASN(self, cymru_info):
|
||||
try:
|
||||
return [asn for asn in cymru_info["asn"]]
|
||||
except Exception as e:
|
||||
logSys.error("Failed to lookup ASN")
|
||||
logSys.exception(e)
|
||||
return []
|
||||
|
||||
##
|
||||
# Returns list of Banned Countries from Cymru info
|
||||
#
|
||||
# Use getBanListExtendedCymruInfo() to provide cymru_info
|
||||
#
|
||||
# @return list of Banned Countries
|
||||
|
||||
def geBanListExtendedCountry(self, cymru_info):
|
||||
try:
|
||||
return [country for country in cymru_info["country"]]
|
||||
except Exception as e:
|
||||
logSys.error("Failed to lookup Country")
|
||||
logSys.exception(e)
|
||||
return []
|
||||
|
||||
##
|
||||
# Returns list of Banned RIRs from Cymru info
|
||||
#
|
||||
# Use getBanListExtendedCymruInfo() to provide cymru_info
|
||||
#
|
||||
# @return list of Banned RIRs
|
||||
|
||||
def geBanListExtendedRIR(self, cymru_info):
|
||||
try:
|
||||
return [rir for rir in cymru_info["rir"]]
|
||||
except Exception as e:
|
||||
logSys.error("Failed to lookup RIR")
|
||||
logSys.exception(e)
|
||||
return []
|
||||
|
||||
##
|
||||
# Add a ban ticket.
|
||||
#
|
||||
# Add a BanTicket instance into the ban list.
|
||||
# @param ticket the ticket
|
||||
# @return True if the IP address is not in the ban list
|
||||
|
||||
def addBanTicket(self, ticket, reason={}):
|
||||
eob = ticket.getEndOfBanTime(self.__banTime)
|
||||
if eob < MyTime.time():
|
||||
reason['expired'] = 1
|
||||
return False
|
||||
with self.__lock:
|
||||
# check already banned
|
||||
fid = ticket.getID()
|
||||
oldticket = self.__banList.get(fid)
|
||||
if oldticket:
|
||||
reason['ticket'] = oldticket
|
||||
# if new time for end of ban is larger than already banned end-time:
|
||||
if eob > oldticket.getEndOfBanTime(self.__banTime):
|
||||
# we have longest ban - set new (increment) ban time
|
||||
reason['prolong'] = 1
|
||||
btm = ticket.getBanTime(self.__banTime)
|
||||
# if not permanent:
|
||||
if btm != -1:
|
||||
diftm = ticket.getTime() - oldticket.getTime()
|
||||
if diftm > 0:
|
||||
btm += diftm
|
||||
oldticket.setBanTime(btm)
|
||||
return False
|
||||
# not yet banned - add new one:
|
||||
self.__banList[fid] = ticket
|
||||
self.__banTotal += 1
|
||||
ticket.incrBanCount()
|
||||
# correct next unban time:
|
||||
if self._nextUnbanTime > eob:
|
||||
self._nextUnbanTime = eob
|
||||
return True
|
||||
|
||||
##
|
||||
# Get the size of the ban list.
|
||||
#
|
||||
# @return the size
|
||||
|
||||
def size(self):
|
||||
return len(self.__banList)
|
||||
|
||||
##
|
||||
# Check if a ticket is in the list.
|
||||
#
|
||||
# Check if a BanTicket with a given IP address is already in the
|
||||
# ban list.
|
||||
# @param ticket the ticket
|
||||
# @return True if a ticket already exists
|
||||
|
||||
def _inBanList(self, ticket):
|
||||
return ticket.getID() in self.__banList
|
||||
|
||||
##
|
||||
# Get the list of IP address to unban.
|
||||
#
|
||||
# Return a list of BanTicket which need to be unbanned.
|
||||
# @param time the time
|
||||
# @return the list of ticket to unban
|
||||
|
||||
def unBanList(self, time, maxCount=0x7fffffff):
|
||||
with self.__lock:
|
||||
# Check next unban time:
|
||||
nextUnbanTime = self._nextUnbanTime
|
||||
if nextUnbanTime > time:
|
||||
return list()
|
||||
|
||||
# Gets the list of ticket to remove (thereby correct next unban time).
|
||||
unBanList = {}
|
||||
nextUnbanTime = BanTicket.MAX_TIME
|
||||
for fid,ticket in self.__banList.items():
|
||||
# current time greater as end of ban - timed out:
|
||||
eob = ticket.getEndOfBanTime(self.__banTime)
|
||||
if time > eob:
|
||||
unBanList[fid] = ticket
|
||||
if len(unBanList) >= maxCount: # stop search cycle, so reset back the next check time
|
||||
nextUnbanTime = self._nextUnbanTime
|
||||
break
|
||||
elif nextUnbanTime > eob:
|
||||
nextUnbanTime = eob
|
||||
|
||||
self._nextUnbanTime = nextUnbanTime
|
||||
# Removes tickets.
|
||||
if len(unBanList):
|
||||
if len(unBanList) / 2.0 <= len(self.__banList) / 3.0:
|
||||
# few as 2/3 should be removed - remove particular items:
|
||||
for fid in unBanList.keys():
|
||||
del self.__banList[fid]
|
||||
else:
|
||||
# create new dictionary without items to be deleted:
|
||||
self.__banList = dict((fid,ticket) for fid,ticket in self.__banList.items() \
|
||||
if fid not in unBanList)
|
||||
|
||||
# return list of tickets:
|
||||
return list(unBanList.values())
|
||||
|
||||
##
|
||||
# Flush the ban list.
|
||||
#
|
||||
# Get the ban list and initialize it with an empty one.
|
||||
# @return the complete ban list
|
||||
|
||||
def flushBanList(self):
|
||||
with self.__lock:
|
||||
uBList = list(self.__banList.values())
|
||||
self.__banList = dict()
|
||||
return uBList
|
||||
|
||||
##
|
||||
# Gets the ticket for the specified ID (most of the time it is IP-address).
|
||||
#
|
||||
# @return the ticket or False.
|
||||
def getTicketByID(self, fid):
|
||||
with self.__lock:
|
||||
try:
|
||||
# Return the ticket after removing (popping)
|
||||
# if from the ban list.
|
||||
return self.__banList.pop(fid)
|
||||
except KeyError:
|
||||
pass
|
||||
return None # if none found
|
||||
890
fail2ban-master/fail2ban/server/database.py
Normal file
890
fail2ban-master/fail2ban/server/database.py
Normal file
@@ -0,0 +1,890 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
__author__ = "Steven Hiscocks"
|
||||
__copyright__ = "Copyright (c) 2013 Steven Hiscocks"
|
||||
__license__ = "GPL"
|
||||
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import sqlite3
|
||||
import sys
|
||||
import time
|
||||
from functools import wraps
|
||||
from threading import RLock
|
||||
|
||||
from .mytime import MyTime
|
||||
from .ticket import FailTicket
|
||||
from .utils import Utils
|
||||
from ..helpers import getLogger, uni_string, PREFER_ENC
|
||||
|
||||
# Gets the instance of the logger.
|
||||
logSys = getLogger(__name__)
|
||||
|
||||
|
||||
def _json_default(x):
|
||||
"""Avoid errors on types unknown in json-adapters."""
|
||||
if isinstance(x, set):
|
||||
x = list(x)
|
||||
return uni_string(x)
|
||||
|
||||
def _json_dumps_safe(x):
|
||||
try:
|
||||
x = json.dumps(x, ensure_ascii=False, default=_json_default).encode(
|
||||
PREFER_ENC, 'replace')
|
||||
except Exception as e:
|
||||
# adapter handler should be exception-safe
|
||||
logSys.error('json dumps failed: %r', e, exc_info=logSys.getEffectiveLevel() <= 4)
|
||||
x = '{}'
|
||||
return x
|
||||
|
||||
def _json_loads_safe(x):
|
||||
try:
|
||||
x = json.loads(x.decode(PREFER_ENC, 'replace'))
|
||||
except Exception as e:
|
||||
# converter handler should be exception-safe
|
||||
logSys.error('json loads failed: %r', e, exc_info=logSys.getEffectiveLevel() <= 4)
|
||||
x = {}
|
||||
return x
|
||||
|
||||
sqlite3.register_adapter(dict, _json_dumps_safe)
|
||||
sqlite3.register_converter("JSON", _json_loads_safe)
|
||||
|
||||
|
||||
def commitandrollback(f):
|
||||
@wraps(f)
|
||||
def wrapper(self, *args, **kwargs):
|
||||
with self._lock: # Threading lock
|
||||
with self._db: # Auto commit and rollback on exception
|
||||
cur = self._db.cursor()
|
||||
try:
|
||||
return f(self, cur, *args, **kwargs)
|
||||
finally:
|
||||
cur.close()
|
||||
return wrapper
|
||||
|
||||
|
||||
class Fail2BanDb(object):
|
||||
"""Fail2Ban database for storing persistent data.
|
||||
|
||||
This allows after Fail2Ban is restarted to reinstated bans and
|
||||
to continue monitoring logs from the same point.
|
||||
|
||||
This will either create a new Fail2Ban database, connect to an
|
||||
existing, and if applicable upgrade the schema in the process.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
filename : str
|
||||
File name for SQLite3 database, which will be created if
|
||||
doesn't already exist.
|
||||
purgeAge : int
|
||||
Purge age in seconds, used to remove old bans from
|
||||
database during purge.
|
||||
|
||||
Raises
|
||||
------
|
||||
sqlite3.OperationalError
|
||||
Error connecting/creating a SQLite3 database.
|
||||
RuntimeError
|
||||
If existing database fails to update to new schema.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
filename
|
||||
purgeage
|
||||
"""
|
||||
__version__ = 4
|
||||
# Note all SCRIPTS strings must end in ';' for py26 compatibility
|
||||
_CREATE_SCRIPTS = (
|
||||
('fail2banDb', "CREATE TABLE IF NOT EXISTS fail2banDb(version INTEGER);")
|
||||
,('jails', "CREATE TABLE IF NOT EXISTS jails(" \
|
||||
"name TEXT NOT NULL UNIQUE, " \
|
||||
"enabled INTEGER NOT NULL DEFAULT 1" \
|
||||
");" \
|
||||
"CREATE INDEX IF NOT EXISTS jails_name ON jails(name);")
|
||||
,('logs', "CREATE TABLE IF NOT EXISTS logs(" \
|
||||
"jail TEXT NOT NULL, " \
|
||||
"path TEXT, " \
|
||||
"firstlinemd5 TEXT, " \
|
||||
"lastfilepos INTEGER DEFAULT 0, " \
|
||||
"FOREIGN KEY(jail) REFERENCES jails(name) ON DELETE CASCADE, " \
|
||||
"UNIQUE(jail, path)," \
|
||||
"UNIQUE(jail, path, firstlinemd5)" \
|
||||
");" \
|
||||
"CREATE INDEX IF NOT EXISTS logs_path ON logs(path);" \
|
||||
"CREATE INDEX IF NOT EXISTS logs_jail_path ON logs(jail, path);")
|
||||
#TODO: systemd journal features \
|
||||
#"journalmatch TEXT, " \
|
||||
#"journlcursor TEXT, " \
|
||||
#"lastfiletime INTEGER DEFAULT 0, " # is this easily available
|
||||
,('bans', "CREATE TABLE IF NOT EXISTS bans(" \
|
||||
"jail TEXT NOT NULL, " \
|
||||
"ip TEXT, " \
|
||||
"timeofban INTEGER NOT NULL, " \
|
||||
"bantime INTEGER NOT NULL, " \
|
||||
"bancount INTEGER NOT NULL default 1, " \
|
||||
"data JSON, " \
|
||||
"FOREIGN KEY(jail) REFERENCES jails(name) " \
|
||||
");" \
|
||||
"CREATE INDEX IF NOT EXISTS bans_jail_timeofban_ip ON bans(jail, timeofban);" \
|
||||
"CREATE INDEX IF NOT EXISTS bans_jail_ip ON bans(jail, ip);" \
|
||||
"CREATE INDEX IF NOT EXISTS bans_ip ON bans(ip);")
|
||||
,('bips', "CREATE TABLE IF NOT EXISTS bips(" \
|
||||
"ip TEXT NOT NULL, " \
|
||||
"jail TEXT NOT NULL, " \
|
||||
"timeofban INTEGER NOT NULL, " \
|
||||
"bantime INTEGER NOT NULL, " \
|
||||
"bancount INTEGER NOT NULL default 1, " \
|
||||
"data JSON, " \
|
||||
"PRIMARY KEY(ip, jail), " \
|
||||
"FOREIGN KEY(jail) REFERENCES jails(name) " \
|
||||
");" \
|
||||
"CREATE INDEX IF NOT EXISTS bips_timeofban ON bips(timeofban);" \
|
||||
"CREATE INDEX IF NOT EXISTS bips_ip ON bips(ip);")
|
||||
)
|
||||
_CREATE_TABS = dict(_CREATE_SCRIPTS)
|
||||
|
||||
|
||||
def __init__(self, filename, purgeAge=24*60*60, outDatedFactor=3):
|
||||
self.maxMatches = 10
|
||||
self._lock = RLock()
|
||||
self._dbFilename = filename
|
||||
self._purgeAge = purgeAge
|
||||
self._outDatedFactor = outDatedFactor;
|
||||
self._connectDB()
|
||||
|
||||
def _connectDB(self, checkIntegrity=False):
|
||||
filename = self._dbFilename
|
||||
try:
|
||||
self._db = sqlite3.connect(
|
||||
filename, check_same_thread=False,
|
||||
detect_types=sqlite3.PARSE_DECLTYPES)
|
||||
# # to allow use multi-byte utf-8
|
||||
# self._db.text_factory = str
|
||||
|
||||
self._bansMergedCache = {}
|
||||
|
||||
logSys.info(
|
||||
"Connected to fail2ban persistent database '%s'", filename)
|
||||
except sqlite3.OperationalError as e:
|
||||
logSys.error(
|
||||
"Error connecting to fail2ban persistent database '%s': %s",
|
||||
filename, e.args[0])
|
||||
raise
|
||||
|
||||
# differentiate pypy: switch journal mode later (save it during the upgrade),
|
||||
# to prevent errors like "database table is locked":
|
||||
try:
|
||||
import __pypy__
|
||||
pypy = True
|
||||
except ImportError:
|
||||
pypy = False
|
||||
|
||||
cur = self._db.cursor()
|
||||
try:
|
||||
cur.execute("PRAGMA foreign_keys = ON")
|
||||
# speedup: write data through OS without syncing (no wait):
|
||||
cur.execute("PRAGMA synchronous = OFF")
|
||||
# speedup: transaction log in memory, alternate using OFF (disable, rollback will be impossible):
|
||||
if not pypy:
|
||||
cur.execute("PRAGMA journal_mode = MEMORY")
|
||||
# speedup: temporary tables and indices are kept in memory:
|
||||
cur.execute("PRAGMA temp_store = MEMORY")
|
||||
|
||||
cur.execute("SELECT version FROM fail2banDb LIMIT 1")
|
||||
except sqlite3.OperationalError:
|
||||
logSys.warning("New database created. Version '%r'",
|
||||
self.createDb())
|
||||
except sqlite3.Error as e:
|
||||
logSys.error(
|
||||
"Error opening fail2ban persistent database '%s': %s",
|
||||
filename, e.args[0])
|
||||
# if not a file - raise an error:
|
||||
if not os.path.isfile(filename):
|
||||
raise
|
||||
# try to repair it:
|
||||
cur.close()
|
||||
cur = None
|
||||
self.repairDB()
|
||||
else:
|
||||
version = cur.fetchone()[0]
|
||||
if version != Fail2BanDb.__version__:
|
||||
newversion = self.updateDb(version)
|
||||
if newversion == Fail2BanDb.__version__:
|
||||
logSys.warning( "Database updated from '%r' to '%r'",
|
||||
version, newversion)
|
||||
else: # pragma: no cover
|
||||
logSys.error( "Database update failed to achieve version '%r'"
|
||||
": updated from '%r' to '%r'",
|
||||
Fail2BanDb.__version__, version, newversion)
|
||||
raise RuntimeError('Failed to fully update')
|
||||
finally:
|
||||
if checkIntegrity:
|
||||
logSys.debug(" Create missing tables/indices ...")
|
||||
self._createDb(cur, incremental=True)
|
||||
logSys.debug(" -> ok")
|
||||
logSys.debug(" Check integrity ...")
|
||||
cur.execute("PRAGMA integrity_check")
|
||||
for s in cur.fetchall():
|
||||
logSys.debug(" -> %s", ' '.join(s))
|
||||
self._db.commit()
|
||||
if cur:
|
||||
# pypy: set journal mode after possible upgrade db:
|
||||
if pypy:
|
||||
cur.execute("PRAGMA journal_mode = MEMORY")
|
||||
cur.close()
|
||||
|
||||
def close(self):
|
||||
logSys.debug("Close connection to database ...")
|
||||
self._db.close()
|
||||
logSys.info("Connection to database closed.")
|
||||
|
||||
@property
|
||||
def _dbBackupFilename(self):
|
||||
try:
|
||||
return self.__dbBackupFilename
|
||||
except AttributeError:
|
||||
self.__dbBackupFilename = self._dbFilename + '.' + time.strftime('%Y%m%d-%H%M%S', MyTime.gmtime())
|
||||
return self.__dbBackupFilename
|
||||
|
||||
def repairDB(self):
|
||||
class RepairException(Exception):
|
||||
pass
|
||||
# avoid endless recursion if reconnect failed again for some reasons:
|
||||
_repairDB = self.repairDB
|
||||
self.repairDB = None
|
||||
try:
|
||||
# backup
|
||||
logSys.info("Trying to repair database %s", self._dbFilename)
|
||||
if not os.path.isfile(self._dbBackupFilename):
|
||||
shutil.move(self._dbFilename, self._dbBackupFilename)
|
||||
logSys.info(" Database backup created: %s", self._dbBackupFilename)
|
||||
elif os.path.isfile(self._dbFilename):
|
||||
os.remove(self._dbFilename)
|
||||
# first try to repair using dump/restore in order
|
||||
Utils.executeCmd((r"""f2b_db=$0; f2b_dbbk=$1; sqlite3 "$f2b_dbbk" ".dump" | sqlite3 "$f2b_db" """,
|
||||
self._dbFilename, self._dbBackupFilename))
|
||||
dbFileSize = os.stat(self._dbFilename).st_size
|
||||
if dbFileSize:
|
||||
logSys.info(" Repair seems to be successful, restored %d byte(s).", dbFileSize)
|
||||
# succeeded - try to reconnect:
|
||||
self._connectDB(checkIntegrity=True)
|
||||
else:
|
||||
logSys.info(" Repair seems to be failed, restored %d byte(s).", dbFileSize)
|
||||
raise RepairException('Recreate ...')
|
||||
except Exception as e:
|
||||
# if still failed, just recreate database as fallback:
|
||||
logSys.error(" Error repairing of fail2ban database '%s': %s",
|
||||
self._dbFilename, e.args[0],
|
||||
exc_info=(not isinstance(e, RepairException) and logSys.getEffectiveLevel() <= 10))
|
||||
os.remove(self._dbFilename)
|
||||
self._connectDB(checkIntegrity=True)
|
||||
finally:
|
||||
self.repairDB = _repairDB
|
||||
|
||||
@property
|
||||
def filename(self):
|
||||
"""File name of SQLite3 database file.
|
||||
"""
|
||||
return self._dbFilename
|
||||
|
||||
@property
|
||||
def purgeage(self):
|
||||
"""Purge age in seconds.
|
||||
"""
|
||||
return self._purgeAge
|
||||
|
||||
@purgeage.setter
|
||||
def purgeage(self, value):
|
||||
self._purgeAge = MyTime.str2seconds(value)
|
||||
|
||||
def _createDb(self, cur, incremental=False):
|
||||
"""Creates a new database, called during initialisation.
|
||||
"""
|
||||
# create all (if not exists):
|
||||
for (n, s) in Fail2BanDb._CREATE_SCRIPTS:
|
||||
cur.executescript(s)
|
||||
# save current database version (if not already set):
|
||||
cur.execute("INSERT INTO fail2banDb(version)"
|
||||
" SELECT ? WHERE NOT EXISTS (SELECT 1 FROM fail2banDb LIMIT 1)",
|
||||
(Fail2BanDb.__version__, ))
|
||||
cur.execute("SELECT version FROM fail2banDb LIMIT 1")
|
||||
return cur.fetchone()[0]
|
||||
|
||||
@commitandrollback
|
||||
def createDb(self, cur, incremental=False):
|
||||
return self._createDb(cur, incremental);
|
||||
|
||||
def _tableExists(self, cur, table):
|
||||
cur.execute("select 1 where exists ("
|
||||
"select 1 from sqlite_master WHERE type='table' AND name=?)", (table,))
|
||||
res = cur.fetchone()
|
||||
return res is not None and res[0]
|
||||
|
||||
@commitandrollback
|
||||
def updateDb(self, cur, version):
|
||||
"""Update an existing database, called during initialisation.
|
||||
|
||||
A timestamped backup is also created prior to attempting the update.
|
||||
"""
|
||||
if version > Fail2BanDb.__version__:
|
||||
raise NotImplementedError(
|
||||
"Attempt to travel to future version of database ...how did you get here??")
|
||||
try:
|
||||
logSys.info("Upgrade database: %s from version '%r'", self._dbBackupFilename, version)
|
||||
if not os.path.isfile(self._dbBackupFilename):
|
||||
shutil.copyfile(self.filename, self._dbBackupFilename)
|
||||
logSys.info(" Database backup created: %s", self._dbBackupFilename)
|
||||
|
||||
if version < 2 and self._tableExists(cur, "logs"):
|
||||
cur.executescript("BEGIN TRANSACTION;"
|
||||
"CREATE TEMPORARY TABLE logs_temp AS SELECT * FROM logs;"
|
||||
"DROP TABLE logs;"
|
||||
"%s;"
|
||||
"INSERT INTO logs SELECT * from logs_temp;"
|
||||
"DROP TABLE logs_temp;"
|
||||
"UPDATE fail2banDb SET version = 2;"
|
||||
"COMMIT;" % Fail2BanDb._CREATE_TABS['logs'])
|
||||
|
||||
if version < 3 and self._tableExists(cur, "bans"):
|
||||
# set ban-time to -2 (note it means rather unknown, as persistent, will be fixed by restore):
|
||||
cur.executescript("BEGIN TRANSACTION;"
|
||||
"CREATE TEMPORARY TABLE bans_temp AS SELECT jail, ip, timeofban, -2 as bantime, 1 as bancount, data FROM bans;"
|
||||
"DROP TABLE bans;"
|
||||
"%s;\n"
|
||||
"INSERT INTO bans SELECT * from bans_temp;"
|
||||
"DROP TABLE bans_temp;"
|
||||
"COMMIT;" % Fail2BanDb._CREATE_TABS['bans'])
|
||||
if version < 4 and not self._tableExists(cur, "bips"):
|
||||
cur.executescript("BEGIN TRANSACTION;"
|
||||
"%s;\n"
|
||||
"UPDATE fail2banDb SET version = 4;"
|
||||
"COMMIT;" % Fail2BanDb._CREATE_TABS['bips'])
|
||||
if self._tableExists(cur, "bans"):
|
||||
cur.execute(
|
||||
"INSERT OR REPLACE INTO bips(ip, jail, timeofban, bantime, bancount, data)"
|
||||
" SELECT ip, jail, timeofban, bantime, bancount, data FROM bans order by timeofban")
|
||||
|
||||
cur.execute("SELECT version FROM fail2banDb LIMIT 1")
|
||||
return cur.fetchone()[0]
|
||||
except Exception as e:
|
||||
# if still failed, just recreate database as fallback:
|
||||
logSys.error("Failed to upgrade database '%s': %s",
|
||||
self._dbFilename, e.args[0],
|
||||
exc_info=logSys.getEffectiveLevel() <= 10)
|
||||
self.repairDB()
|
||||
|
||||
@commitandrollback
|
||||
def addJail(self, cur, jail):
|
||||
"""Adds a jail to the database.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
jail : Jail
|
||||
Jail to be added to the database.
|
||||
"""
|
||||
cur.execute(
|
||||
"INSERT OR IGNORE INTO jails(name, enabled) VALUES(?, 1)",
|
||||
(jail.name,))
|
||||
if cur.rowcount <= 0:
|
||||
cur.execute(
|
||||
"UPDATE jails SET enabled = 1 WHERE name = ? AND enabled != 1",
|
||||
(jail.name,))
|
||||
|
||||
@commitandrollback
|
||||
def delJail(self, cur, jail):
|
||||
"""Deletes a jail from the database.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
jail : Jail
|
||||
Jail to be removed from the database.
|
||||
"""
|
||||
# Will be deleted by purge as appropriate
|
||||
cur.execute(
|
||||
"UPDATE jails SET enabled=0 WHERE name=?", (jail.name, ))
|
||||
|
||||
@commitandrollback
|
||||
def delAllJails(self, cur):
|
||||
"""Deletes all jails from the database.
|
||||
"""
|
||||
# Will be deleted by purge as appropriate
|
||||
cur.execute("UPDATE jails SET enabled=0")
|
||||
|
||||
@commitandrollback
|
||||
def getJailNames(self, cur, enabled=None):
|
||||
"""Get name of jails in database.
|
||||
|
||||
Currently only used for testing purposes.
|
||||
|
||||
Returns
|
||||
-------
|
||||
set
|
||||
Set of jail names.
|
||||
"""
|
||||
if enabled is None:
|
||||
cur.execute("SELECT name FROM jails")
|
||||
else:
|
||||
cur.execute("SELECT name FROM jails WHERE enabled=%s" %
|
||||
(int(enabled),))
|
||||
return set(row[0] for row in cur.fetchmany())
|
||||
|
||||
@commitandrollback
|
||||
def addLog(self, cur, jail, container):
|
||||
"""Adds a log to the database.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
jail : Jail
|
||||
Jail that log is being monitored by.
|
||||
container : FileContainer
|
||||
File container of the log file being added.
|
||||
|
||||
Returns
|
||||
-------
|
||||
int
|
||||
If log was already present in database, value of last position
|
||||
in the log file; else `None`
|
||||
"""
|
||||
return self._addLog(cur, jail, container.getFileName(), container.getPos(), container.getHash())
|
||||
|
||||
def _addLog(self, cur, jail, name, pos=0, md5=None):
|
||||
lastLinePos = None
|
||||
cur.execute(
|
||||
"SELECT firstlinemd5, lastfilepos FROM logs "
|
||||
"WHERE jail=? AND path=?",
|
||||
(jail.name, name))
|
||||
try:
|
||||
firstLineMD5, lastLinePos = cur.fetchone()
|
||||
except TypeError:
|
||||
firstLineMD5 = None
|
||||
|
||||
if firstLineMD5 is None and (pos or md5 is not None):
|
||||
cur.execute(
|
||||
"INSERT OR REPLACE INTO logs(jail, path, firstlinemd5, lastfilepos) "
|
||||
"VALUES(?, ?, ?, ?)", (jail.name, name, md5, pos))
|
||||
if md5 is not None and md5 != firstLineMD5:
|
||||
lastLinePos = None
|
||||
return lastLinePos
|
||||
|
||||
@commitandrollback
|
||||
def getLogPaths(self, cur, jail=None):
|
||||
"""Gets all the log paths from the database.
|
||||
|
||||
Currently only for testing purposes.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
jail : Jail
|
||||
If specified, will only return logs belonging to the jail.
|
||||
|
||||
Returns
|
||||
-------
|
||||
set
|
||||
Set of log paths.
|
||||
"""
|
||||
query = "SELECT path FROM logs"
|
||||
queryArgs = []
|
||||
if jail is not None:
|
||||
query += " WHERE jail=?"
|
||||
queryArgs.append(jail.name)
|
||||
cur.execute(query, queryArgs)
|
||||
return set(row[0] for row in cur.fetchmany())
|
||||
|
||||
@commitandrollback
|
||||
def updateLog(self, cur, jail, container):
|
||||
"""Updates hash and last position in log file.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
jail : Jail
|
||||
Jail of which the log file belongs to.
|
||||
container : FileContainer
|
||||
File container of the log file being updated.
|
||||
"""
|
||||
self._updateLog(cur, jail, container.getFileName(), container.getPos(), container.getHash())
|
||||
|
||||
def _updateLog(self, cur, jail, name, pos, md5):
|
||||
cur.execute(
|
||||
"UPDATE logs SET firstlinemd5=?, lastfilepos=? "
|
||||
"WHERE jail=? AND path=?", (md5, pos, jail.name, name))
|
||||
# be sure it is set (if not available):
|
||||
if not cur.rowcount:
|
||||
cur.execute(
|
||||
"INSERT OR REPLACE INTO logs(jail, path, firstlinemd5, lastfilepos) "
|
||||
"VALUES(?, ?, ?, ?)", (jail.name, name, md5, pos))
|
||||
|
||||
@commitandrollback
|
||||
def getJournalPos(self, cur, jail, name, time=0, iso=None):
|
||||
"""Get journal position from database.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
jail : Jail
|
||||
Jail of which the journal belongs to.
|
||||
name, time, iso :
|
||||
Journal name (typically systemd-journal) and last known time.
|
||||
|
||||
Returns
|
||||
-------
|
||||
int (or float)
|
||||
Last position (as time) if it was already present in database; else `None`
|
||||
"""
|
||||
return self._addLog(cur, jail, name, time, iso); # no hash, just time as iso
|
||||
|
||||
@commitandrollback
|
||||
def updateJournal(self, cur, jail, name, time, iso):
|
||||
"""Updates last position (as time) of journal.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
jail : Jail
|
||||
Jail of which the journal belongs to.
|
||||
name, time, iso :
|
||||
Journal name (typically systemd-journal) and last known time.
|
||||
"""
|
||||
self._updateLog(cur, jail, name, time, iso); # no hash, just time as iso
|
||||
|
||||
@commitandrollback
|
||||
def addBan(self, cur, jail, ticket):
|
||||
"""Add a ban to the database.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
jail : Jail
|
||||
Jail in which the ban has occurred.
|
||||
ticket : BanTicket
|
||||
Ticket of the ban to be added.
|
||||
"""
|
||||
ip = str(ticket.getID())
|
||||
try:
|
||||
del self._bansMergedCache[(ip, jail)]
|
||||
except KeyError:
|
||||
pass
|
||||
try:
|
||||
del self._bansMergedCache[(ip, None)]
|
||||
except KeyError:
|
||||
pass
|
||||
#TODO: Implement data parts once arbitrary match keys completed
|
||||
data = ticket.getData()
|
||||
matches = data.get('matches')
|
||||
if self.maxMatches:
|
||||
if matches and len(matches) > self.maxMatches:
|
||||
data = data.copy()
|
||||
data['matches'] = matches[-self.maxMatches:]
|
||||
elif matches:
|
||||
data = data.copy()
|
||||
del data['matches']
|
||||
cur.execute(
|
||||
"INSERT INTO bans(jail, ip, timeofban, bantime, bancount, data) VALUES(?, ?, ?, ?, ?, ?)",
|
||||
(jail.name, ip, int(round(ticket.getTime())), ticket.getBanTime(jail.actions.getBanTime()), ticket.getBanCount(),
|
||||
data))
|
||||
cur.execute(
|
||||
"INSERT OR REPLACE INTO bips(ip, jail, timeofban, bantime, bancount, data) VALUES(?, ?, ?, ?, ?, ?)",
|
||||
(ip, jail.name, int(round(ticket.getTime())), ticket.getBanTime(jail.actions.getBanTime()), ticket.getBanCount(),
|
||||
data))
|
||||
|
||||
@commitandrollback
|
||||
def delBan(self, cur, jail, *args):
|
||||
"""Delete a single or multiple tickets from the database.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
jail : Jail
|
||||
Jail in which the ticket(s) should be removed.
|
||||
args : list of IP
|
||||
IPs to be removed, if not given all tickets of jail will be removed.
|
||||
"""
|
||||
query1 = "DELETE FROM bips WHERE jail = ?"
|
||||
query2 = "DELETE FROM bans WHERE jail = ?"
|
||||
queryArgs = [jail.name];
|
||||
if not len(args):
|
||||
cur.execute(query1, queryArgs);
|
||||
cur.execute(query2, queryArgs);
|
||||
return
|
||||
query1 += " AND ip = ?"
|
||||
query2 += " AND ip = ?"
|
||||
queryArgs.append('');
|
||||
for ip in args:
|
||||
queryArgs[1] = str(ip);
|
||||
cur.execute(query1, queryArgs);
|
||||
cur.execute(query2, queryArgs);
|
||||
|
||||
@commitandrollback
|
||||
def _getBans(self, cur, jail=None, bantime=None, ip=None):
|
||||
query = "SELECT ip, timeofban, data FROM bans WHERE 1"
|
||||
queryArgs = []
|
||||
|
||||
if jail is not None:
|
||||
query += " AND jail=?"
|
||||
queryArgs.append(jail.name)
|
||||
if bantime is not None and bantime >= 0:
|
||||
query += " AND timeofban > ?"
|
||||
queryArgs.append(MyTime.time() - bantime)
|
||||
if ip is not None:
|
||||
query += " AND ip=?"
|
||||
queryArgs.append(str(ip))
|
||||
query += " ORDER BY ip, timeofban desc"
|
||||
|
||||
# repack iterator as long as in lock:
|
||||
return list(cur.execute(query, queryArgs))
|
||||
|
||||
def getBans(self, **kwargs):
|
||||
"""Get bans from the database.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
jail : Jail
|
||||
Jail that the ban belongs to. Default `None`; all jails.
|
||||
bantime : int
|
||||
Ban time in seconds, such that bans returned would still be
|
||||
valid now. Negative values are equivalent to `None`.
|
||||
Default `None`; no limit.
|
||||
ip : str
|
||||
IP Address to filter bans by. Default `None`; all IPs.
|
||||
|
||||
Returns
|
||||
-------
|
||||
list
|
||||
List of `Ticket`s for bans stored in database.
|
||||
"""
|
||||
tickets = []
|
||||
for ip, timeofban, data in self._getBans(**kwargs):
|
||||
#TODO: Implement data parts once arbitrary match keys completed
|
||||
tickets.append(FailTicket(ip, timeofban))
|
||||
tickets[-1].setData(data)
|
||||
return tickets
|
||||
|
||||
def getBansMerged(self, ip=None, jail=None, bantime=None):
|
||||
"""Get bans from the database, merged into single ticket.
|
||||
|
||||
This is the same as `getBans`, but bans merged into single
|
||||
ticket.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
jail : Jail
|
||||
Jail that the ban belongs to. Default `None`; all jails.
|
||||
bantime : int
|
||||
Ban time in seconds, such that bans returned would still be
|
||||
valid now. Negative values are equivalent to `None`.
|
||||
Default `None`; no limit.
|
||||
ip : str
|
||||
IP Address to filter bans by. Default `None`; all IPs.
|
||||
|
||||
Returns
|
||||
-------
|
||||
list or Ticket
|
||||
Single ticket representing bans stored in database per IP
|
||||
in a list. When `ip` argument passed, a single `Ticket` is
|
||||
returned.
|
||||
"""
|
||||
with self._lock:
|
||||
cacheKey = None
|
||||
if bantime is None or bantime < 0:
|
||||
cacheKey = (ip, jail)
|
||||
if cacheKey in self._bansMergedCache:
|
||||
return self._bansMergedCache[cacheKey]
|
||||
|
||||
tickets = []
|
||||
ticket = None
|
||||
|
||||
results = list(self._getBans(ip=ip, jail=jail, bantime=bantime))
|
||||
if results:
|
||||
prev_banip = results[0][0]
|
||||
matches = []
|
||||
failures = 0
|
||||
tickdata = {}
|
||||
for banip, timeofban, data in results:
|
||||
#TODO: Implement data parts once arbitrary match keys completed
|
||||
if banip != prev_banip:
|
||||
ticket = FailTicket(prev_banip, prev_timeofban, matches)
|
||||
ticket.setAttempt(failures)
|
||||
tickets.append(ticket)
|
||||
# Reset variables
|
||||
prev_banip = banip
|
||||
matches = []
|
||||
failures = 0
|
||||
tickdata = {}
|
||||
m = data.get('matches', [])
|
||||
# pre-insert "maxadd" entries (because tickets are ordered desc by time)
|
||||
maxadd = self.maxMatches - len(matches)
|
||||
if maxadd > 0:
|
||||
if len(m) <= maxadd:
|
||||
matches = m + matches
|
||||
else:
|
||||
matches = m[-maxadd:] + matches
|
||||
failures += data.get('failures', 1)
|
||||
data['failures'] = failures
|
||||
data['matches'] = matches
|
||||
tickdata.update(data)
|
||||
prev_timeofban = timeofban
|
||||
ticket = FailTicket(banip, prev_timeofban, data=tickdata)
|
||||
tickets.append(ticket)
|
||||
|
||||
if cacheKey:
|
||||
self._bansMergedCache[cacheKey] = tickets if ip is None else ticket
|
||||
return tickets if ip is None else ticket
|
||||
|
||||
@commitandrollback
|
||||
def getBan(self, cur, ip, jail=None, forbantime=None, overalljails=None, fromtime=None):
|
||||
ip = str(ip)
|
||||
if not overalljails:
|
||||
query = "SELECT bancount, timeofban, bantime FROM bips"
|
||||
else:
|
||||
query = "SELECT sum(bancount), max(timeofban), sum(bantime) FROM bips"
|
||||
query += " WHERE ip = ?"
|
||||
queryArgs = [ip]
|
||||
if not overalljails and jail is not None:
|
||||
query += " AND jail=?"
|
||||
queryArgs.append(jail.name)
|
||||
if forbantime is not None:
|
||||
query += " AND timeofban > ?"
|
||||
queryArgs.append(MyTime.time() - forbantime)
|
||||
if fromtime is not None:
|
||||
query += " AND timeofban > ?"
|
||||
queryArgs.append(fromtime)
|
||||
if overalljails or jail is None:
|
||||
query += " GROUP BY ip ORDER BY timeofban DESC LIMIT 1"
|
||||
# repack iterator as long as in lock:
|
||||
return list(cur.execute(query, queryArgs))
|
||||
|
||||
def _getCurrentBans(self, cur, jail = None, ip = None, forbantime=None, fromtime=None):
|
||||
queryArgs = []
|
||||
if jail is not None:
|
||||
query = "SELECT ip, timeofban, bantime, bancount, data FROM bips WHERE jail=?"
|
||||
queryArgs.append(jail.name)
|
||||
else:
|
||||
query = "SELECT ip, max(timeofban), bantime, bancount, data FROM bips WHERE 1"
|
||||
if ip is not None:
|
||||
query += " AND ip=?"
|
||||
queryArgs.append(ip)
|
||||
query += " AND (timeofban + bantime > ? OR bantime <= -1)"
|
||||
queryArgs.append(fromtime)
|
||||
if forbantime not in (None, -1): # not specified or persistent (all)
|
||||
query += " AND timeofban > ?"
|
||||
queryArgs.append(fromtime - forbantime)
|
||||
if ip is None:
|
||||
query += " GROUP BY ip ORDER BY ip, timeofban DESC"
|
||||
else:
|
||||
query += " ORDER BY timeofban DESC LIMIT 1"
|
||||
return cur.execute(query, queryArgs)
|
||||
|
||||
def getCurrentBans(self, jail=None, ip=None, forbantime=None, fromtime=None,
|
||||
correctBanTime=True, maxmatches=None
|
||||
):
|
||||
"""Reads tickets (with merged info) currently affected from ban from the database.
|
||||
|
||||
There are all the tickets corresponding parameters jail/ip, forbantime,
|
||||
fromtime (normally now).
|
||||
|
||||
If correctBanTime specified (default True) it will fix the restored ban-time
|
||||
(and therefore endOfBan) of the ticket (normally it is ban-time of jail as maximum)
|
||||
for all tickets with ban-time greater (or persistent).
|
||||
"""
|
||||
cur = self._db.cursor()
|
||||
try:
|
||||
if fromtime is None:
|
||||
fromtime = MyTime.time()
|
||||
tickets = []
|
||||
ticket = None
|
||||
if correctBanTime is True:
|
||||
correctBanTime = jail.getMaxBanTime() if jail is not None else None
|
||||
# don't change if persistent allowed:
|
||||
if correctBanTime == -1: correctBanTime = None
|
||||
|
||||
with self._lock:
|
||||
bans = self._getCurrentBans(cur, jail=jail, ip=ip,
|
||||
forbantime=forbantime, fromtime=fromtime
|
||||
)
|
||||
for ticket in bans:
|
||||
# can produce unpack error (database may return sporadical wrong-empty row):
|
||||
try:
|
||||
banip, timeofban, bantime, bancount, data = ticket
|
||||
# additionally check for empty values:
|
||||
if banip is None or banip == "": # pragma: no cover
|
||||
raise ValueError('unexpected value %r' % (banip,))
|
||||
# if bantime unknown (after upgrade-db from earlier version), just use min known ban-time:
|
||||
if bantime == -2: # todo: remove it in future version
|
||||
bantime = jail.actions.getBanTime() if jail is not None else (
|
||||
correctBanTime if correctBanTime else 600)
|
||||
elif correctBanTime and correctBanTime >= 0:
|
||||
# if persistent ban (or greater as max), use current max-bantime of the jail:
|
||||
if bantime == -1 or bantime > correctBanTime:
|
||||
bantime = correctBanTime
|
||||
# after correction check the end of ban again:
|
||||
if bantime != -1 and timeofban + bantime <= fromtime:
|
||||
# not persistent and too old - ignore it:
|
||||
logSys.debug("ignore ticket (with new max ban-time %r): too old %r <= %r, ticket: %r",
|
||||
bantime, timeofban + bantime, fromtime, ticket)
|
||||
continue
|
||||
except ValueError as e: # pragma: no cover
|
||||
logSys.debug("get current bans: ignore row %r - %s", ticket, e)
|
||||
continue
|
||||
# logSys.debug('restore ticket %r, %r, %r', banip, timeofban, data)
|
||||
ticket = FailTicket(banip, timeofban, data=data)
|
||||
# filter matches if expected (current count > as maxmatches specified):
|
||||
if maxmatches is None:
|
||||
maxmatches = self.maxMatches
|
||||
if maxmatches:
|
||||
matches = ticket.getMatches()
|
||||
if matches and len(matches) > maxmatches:
|
||||
ticket.setMatches(matches[-maxmatches:])
|
||||
else:
|
||||
ticket.setMatches(None)
|
||||
# logSys.debug('restored ticket: %r', ticket)
|
||||
ticket.setBanTime(bantime)
|
||||
ticket.setBanCount(bancount)
|
||||
if ip is not None: return ticket
|
||||
tickets.append(ticket)
|
||||
finally:
|
||||
cur.close()
|
||||
|
||||
return tickets
|
||||
|
||||
def _cleanjails(self, cur):
|
||||
"""Remove empty jails jails and log files from database.
|
||||
"""
|
||||
cur.execute(
|
||||
"DELETE FROM jails WHERE enabled = 0 "
|
||||
"AND NOT EXISTS(SELECT * FROM bans WHERE jail = jails.name) "
|
||||
"AND NOT EXISTS(SELECT * FROM bips WHERE jail = jails.name)")
|
||||
|
||||
def _purge_bips(self, cur):
|
||||
"""Purge old bad ips (jails and log files from database).
|
||||
Currently it is timed out IP, whose time since last ban is several times out-dated (outDatedFactor is default 3).
|
||||
Permanent banned ips will be never removed.
|
||||
"""
|
||||
cur.execute(
|
||||
"DELETE FROM bips WHERE timeofban < ? and bantime != -1 and (timeofban + (bantime * ?)) < ?",
|
||||
(int(MyTime.time()) - self._purgeAge, self._outDatedFactor, int(MyTime.time()) - self._purgeAge))
|
||||
|
||||
@commitandrollback
|
||||
def purge(self, cur):
|
||||
"""Purge old bans, jails and log files from database.
|
||||
"""
|
||||
self._bansMergedCache = {}
|
||||
cur.execute(
|
||||
"DELETE FROM bans WHERE timeofban < ?",
|
||||
(MyTime.time() - self._purgeAge, ))
|
||||
self._purge_bips(cur)
|
||||
self._cleanjails(cur)
|
||||
|
||||
556
fail2ban-master/fail2ban/server/datedetector.py
Normal file
556
fail2ban-master/fail2ban/server/datedetector.py
Normal file
@@ -0,0 +1,556 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
__author__ = "Cyril Jaquier and Fail2Ban Contributors"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
|
||||
__license__ = "GPL"
|
||||
|
||||
import copy
|
||||
import time
|
||||
|
||||
from threading import Lock
|
||||
|
||||
from .datetemplate import re, DateTemplate, DatePatternRegex, DateTai64n, DateEpoch, \
|
||||
RE_EPOCH_PATTERN
|
||||
from .strptime import validateTimeZone
|
||||
from .utils import Utils
|
||||
from ..helpers import getLogger
|
||||
|
||||
# Gets the instance of the logger.
|
||||
logSys = getLogger(__name__)
|
||||
|
||||
logLevel = 5
|
||||
|
||||
RE_DATE_PREMATCH = re.compile(r"(?<!\\)\{DATE\}", re.IGNORECASE)
|
||||
DD_patternCache = Utils.Cache(maxCount=1000, maxTime=60*60)
|
||||
|
||||
|
||||
def _getPatternTemplate(pattern, key=None):
|
||||
if key is None:
|
||||
key = pattern
|
||||
if '%' not in pattern:
|
||||
key = pattern.upper()
|
||||
template = DD_patternCache.get(key)
|
||||
|
||||
if not template:
|
||||
if "EPOCH" in key:
|
||||
if RE_EPOCH_PATTERN.search(pattern):
|
||||
template = DateEpoch(pattern=pattern, longFrm="LEPOCH" in key)
|
||||
elif key in ("EPOCH", "{^LN-BEG}EPOCH", "^EPOCH"):
|
||||
template = DateEpoch(lineBeginOnly=(key != "EPOCH"))
|
||||
elif key in ("LEPOCH", "{^LN-BEG}LEPOCH", "^LEPOCH"):
|
||||
template = DateEpoch(lineBeginOnly=(key != "LEPOCH"), longFrm=True)
|
||||
if template is None:
|
||||
if key in ("TAI64N", "{^LN-BEG}TAI64N", "^TAI64N"):
|
||||
template = DateTai64n(wordBegin=('start' if key != "TAI64N" else False))
|
||||
else:
|
||||
template = DatePatternRegex(pattern)
|
||||
|
||||
DD_patternCache.set(key, template)
|
||||
return template
|
||||
|
||||
def _getAnchoredTemplate(template, wrap=lambda s: '{^LN-BEG}' + s):
|
||||
# wrap name:
|
||||
name = wrap(template.name)
|
||||
# try to find in cache (by name):
|
||||
template2 = DD_patternCache.get(name)
|
||||
if not template2:
|
||||
# wrap pattern (or regexp if not pattern template):
|
||||
regex = wrap(getattr(template, 'pattern', template.regex))
|
||||
if hasattr(template, 'pattern'):
|
||||
# try to find in cache (by pattern):
|
||||
template2 = DD_patternCache.get(regex)
|
||||
# make duplicate and set new anchored regex:
|
||||
if not template2:
|
||||
if not hasattr(template, 'pattern'):
|
||||
template2 = _getPatternTemplate(name)
|
||||
else:
|
||||
template2 = _getPatternTemplate(regex)
|
||||
return template2
|
||||
|
||||
|
||||
|
||||
class DateDetectorCache(object):
|
||||
"""Implements the caching of the default templates list.
|
||||
"""
|
||||
def __init__(self):
|
||||
self.__lock = Lock()
|
||||
self.__templates = list()
|
||||
|
||||
@property
|
||||
def templates(self):
|
||||
"""List of template instances managed by the detector.
|
||||
"""
|
||||
if self.__templates:
|
||||
return self.__templates
|
||||
with self.__lock:
|
||||
if self.__templates: # pragma: no cover - race-condition + multi-threaded environment only
|
||||
return self.__templates
|
||||
self._addDefaultTemplate()
|
||||
return self.__templates
|
||||
|
||||
def _cacheTemplate(self, template):
|
||||
"""Cache Fail2Ban's default template.
|
||||
|
||||
"""
|
||||
# if not already line-begin anchored, additional template, that prefers datetime
|
||||
# at start of a line (safety+performance feature):
|
||||
name = template.name
|
||||
if not name.startswith('{^LN-BEG}') and not name.startswith('^') and hasattr(template, 'regex'):
|
||||
template2 = _getAnchoredTemplate(template)
|
||||
# prevent to add duplicates:
|
||||
if template2.name != name:
|
||||
# increase weight of such templates, because they should be always
|
||||
# preferred in template sorting process (bubble up):
|
||||
template2.weight = 100.0
|
||||
self.__tmpcache[0].append(template2)
|
||||
# add template:
|
||||
self.__tmpcache[1].append(template)
|
||||
|
||||
DEFAULT_TEMPLATES = [
|
||||
# ISO 8601, simple date, optional subsecond and timezone:
|
||||
# 2005-01-23T21:59:59.981746, 2005-01-23 21:59:59, 2005-01-23 8:59:59
|
||||
# simple date: 2005/01/23 21:59:59
|
||||
# custom for syslog-ng 2006.12.21 06:43:20
|
||||
r"%ExY(?P<_sep>[-/.])%m(?P=_sep)%d(?:T| ?)%H:%M:%S(?:[.,]%f)?(?:\s*%z)?",
|
||||
# asctime with optional day, subsecond and/or year:
|
||||
# Sun Jan 23 21:59:59.011 2005
|
||||
r"(?:%a )?%b %d %k:%M:%S(?:\.%f)?(?: %ExY)?",
|
||||
# asctime with optional day, subsecond and/or year coming after day
|
||||
# http://bugs.debian.org/798923
|
||||
# Sun Jan 23 2005 21:59:59.011
|
||||
r"(?:%a )?%b %d %ExY %k:%M:%S(?:\.%f)?",
|
||||
# simple date too (from x11vnc): 23/01/2005 21:59:59
|
||||
# and with optional year given by 2 digits: 23/01/05 21:59:59
|
||||
# (See http://bugs.debian.org/537610)
|
||||
# 17-07-2008 17:23:25
|
||||
r"%d(?P<_sep>[-/])%m(?P=_sep)(?:%ExY|%Exy) %k:%M:%S",
|
||||
# Apache format optional time zone:
|
||||
# [31/Oct/2006:09:22:55 -0000]
|
||||
# 26-Jul-2007 15:20:52
|
||||
# named 26-Jul-2007 15:20:52.252
|
||||
# roundcube 26-Jul-2007 15:20:52 +0200
|
||||
r"%d(?P<_sep>[-/])%b(?P=_sep)%ExY[ :]?%H:%M:%S(?:\.%f)?(?: %z)?",
|
||||
# CPanel 05/20/2008:01:57:39
|
||||
r"%m/%d/%ExY:%H:%M:%S",
|
||||
# 01-27-2012 16:22:44.252
|
||||
# subseconds explicit to avoid possible %m<->%d confusion
|
||||
# with previous ("%d-%m-%ExY %k:%M:%S" by "%d(?P<_sep>[-/])%m(?P=_sep)(?:%ExY|%Exy) %k:%M:%S")
|
||||
r"%m-%d-%ExY %k:%M:%S(?:\.%f)?",
|
||||
# Epoch
|
||||
r"EPOCH",
|
||||
# Only time information in the log
|
||||
r"{^LN-BEG}%H:%M:%S",
|
||||
# <09/16/08@05:03:30>
|
||||
r"^<%m/%d/%Exy@%H:%M:%S>",
|
||||
# MySQL: 130322 11:46:11
|
||||
r"%Exy%Exm%Exd ?%H:%M:%S",
|
||||
# Apache Tomcat
|
||||
r"%b %d, %ExY %I:%M:%S %p",
|
||||
# ASSP: Apr-27-13 02:33:06
|
||||
r"^%b-%d-%Exy %k:%M:%S",
|
||||
# 20050123T215959, 20050123 215959, 20050123 85959, 20050123-21:59:59
|
||||
r"%ExY%Exm%Exd(?:-|T| ?)%ExH:?%ExM:?%ExS(?:[.,]%f)?(?:\s*%z)?",
|
||||
# prefixed with optional named time zone (monit):
|
||||
# PDT Apr 16 21:05:29
|
||||
r"(?:%Z )?(?:%a )?%b %d %k:%M:%S(?:\.%f)?(?: %ExY)?",
|
||||
# +00:00 Jan 23 21:59:59.011 2005
|
||||
r"(?:%z )?(?:%a )?%b %d %k:%M:%S(?:\.%f)?(?: %ExY)?",
|
||||
# TAI64N
|
||||
r"TAI64N",
|
||||
]
|
||||
|
||||
@property
|
||||
def defaultTemplates(self):
|
||||
if isinstance(DateDetectorCache.DEFAULT_TEMPLATES[0], str):
|
||||
for i, dt in enumerate(DateDetectorCache.DEFAULT_TEMPLATES):
|
||||
dt = _getPatternTemplate(dt)
|
||||
DateDetectorCache.DEFAULT_TEMPLATES[i] = dt
|
||||
return DateDetectorCache.DEFAULT_TEMPLATES
|
||||
|
||||
def _addDefaultTemplate(self):
|
||||
"""Add resp. cache Fail2Ban's default set of date templates.
|
||||
"""
|
||||
self.__tmpcache = [], []
|
||||
# cache default templates:
|
||||
for dt in self.defaultTemplates:
|
||||
self._cacheTemplate(dt)
|
||||
#
|
||||
self.__templates = self.__tmpcache[0] + self.__tmpcache[1]
|
||||
del self.__tmpcache
|
||||
|
||||
|
||||
class DateDetectorTemplate(object):
|
||||
"""Used for "shallow copy" of the template object.
|
||||
|
||||
Prevents collectively usage of hits/lastUsed in cached templates
|
||||
"""
|
||||
__slots__ = ('template', 'hits', 'lastUsed', 'distance')
|
||||
def __init__(self, template):
|
||||
self.template = template
|
||||
self.hits = 0
|
||||
self.lastUsed = 0
|
||||
# the last distance to date-match within the log file:
|
||||
self.distance = 0x7fffffff
|
||||
|
||||
@property
|
||||
def weight(self):
|
||||
return self.hits * self.template.weight / max(1, self.distance)
|
||||
|
||||
def __getattr__(self, name):
|
||||
""" Returns attribute of template (called for parameters not in slots)
|
||||
"""
|
||||
return getattr(self.template, name)
|
||||
|
||||
|
||||
class DateDetector(object):
|
||||
"""Manages one or more date templates to find a date within a log line.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
templates
|
||||
"""
|
||||
_defCache = DateDetectorCache()
|
||||
|
||||
def __init__(self):
|
||||
self.__templates = list()
|
||||
self.__known_names = set()
|
||||
# time the template was long unused (currently 300 == 5m):
|
||||
self.__unusedTime = 300
|
||||
# last known distance (bypass one char collision) and end position:
|
||||
self.__lastPos = 1, None
|
||||
self.__lastEndPos = 0x7fffffff, None
|
||||
self.__lastTemplIdx = 0x7fffffff
|
||||
# first free place:
|
||||
self.__firstUnused = 0
|
||||
# pre-match pattern:
|
||||
self.__preMatch = None
|
||||
# default TZ (if set, treat log lines without explicit time zone to be in this time zone):
|
||||
self.__default_tz = None
|
||||
|
||||
def _appendTemplate(self, template, ignoreDup=False):
|
||||
name = template.name
|
||||
if name in self.__known_names:
|
||||
if ignoreDup: return
|
||||
raise ValueError(
|
||||
"There is already a template with name %s" % name)
|
||||
self.__known_names.add(name)
|
||||
self.__templates.append(DateDetectorTemplate(template))
|
||||
logSys.debug(" date pattern regex for `%s`: `%s`",
|
||||
getattr(template, 'pattern', ''), template.regex)
|
||||
|
||||
def appendTemplate(self, template):
|
||||
"""Add a date template to manage and use in search of dates.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
template : DateTemplate or str
|
||||
Can be either a `DateTemplate` instance, or a string which will
|
||||
be used as the pattern for the `DatePatternRegex` template. The
|
||||
template will then be added to the detector.
|
||||
|
||||
Raises
|
||||
------
|
||||
ValueError
|
||||
If a template already exists with the same name.
|
||||
"""
|
||||
if isinstance(template, str):
|
||||
key = pattern = template
|
||||
if '%' not in pattern:
|
||||
key = pattern.upper()
|
||||
template = DD_patternCache.get(key)
|
||||
if not template:
|
||||
if key in ("{^LN-BEG}", "{DEFAULT}"):
|
||||
flt = \
|
||||
lambda template: template.flags & DateTemplate.LINE_BEGIN if key == "{^LN-BEG}" else None
|
||||
self.addDefaultTemplate(flt)
|
||||
return
|
||||
elif "{DATE}" in key:
|
||||
self.addDefaultTemplate(preMatch=pattern, allDefaults=False)
|
||||
return
|
||||
elif key == "{NONE}":
|
||||
template = _getPatternTemplate('{UNB}^', key)
|
||||
else:
|
||||
template = _getPatternTemplate(pattern, key)
|
||||
|
||||
DD_patternCache.set(key, template)
|
||||
|
||||
logSys.info(" date pattern `%s`: `%s`",
|
||||
getattr(template, 'pattern', ''), template.name)
|
||||
self._appendTemplate(template)
|
||||
|
||||
def addDefaultTemplate(self, filterTemplate=None, preMatch=None, allDefaults=True):
|
||||
"""Add Fail2Ban's default set of date templates.
|
||||
"""
|
||||
ignoreDup = len(self.__templates) > 0
|
||||
cnt = 0
|
||||
for template in (
|
||||
DateDetector._defCache.templates if allDefaults else DateDetector._defCache.defaultTemplates
|
||||
):
|
||||
# filter if specified:
|
||||
if filterTemplate is not None and not filterTemplate(template): continue
|
||||
# if exact pattern available - create copy of template, contains replaced {DATE} with default regex:
|
||||
if preMatch is not None:
|
||||
# get cached or create a copy with modified name/pattern, using preMatch replacement for {DATE}:
|
||||
template = _getAnchoredTemplate(template,
|
||||
wrap=lambda s: RE_DATE_PREMATCH.sub(lambda m: DateTemplate.unboundPattern(s), preMatch))
|
||||
# append date detector template (ignore duplicate if some was added before default):
|
||||
self._appendTemplate(template, ignoreDup=ignoreDup)
|
||||
cnt += 1
|
||||
if preMatch:
|
||||
logSys.info(" default date pattern for `%r`: %d template(s)", preMatch, cnt)
|
||||
else:
|
||||
logSys.info(" default %sdate pattern: %d template(s)", "filtered " if filterTemplate else "", cnt)
|
||||
|
||||
@property
|
||||
def templates(self):
|
||||
"""List of template instances managed by the detector.
|
||||
"""
|
||||
return self.__templates
|
||||
|
||||
def matchTime(self, line):
|
||||
"""Attempts to find date on a log line using templates.
|
||||
|
||||
This uses the templates' `matchDate` method in an attempt to find
|
||||
a date. It also increments the match hit count for the winning
|
||||
template.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
line : str
|
||||
Line which is searched by the date templates.
|
||||
|
||||
Returns
|
||||
-------
|
||||
re.MatchObject, DateTemplate
|
||||
The regex match returned from the first successfully matched
|
||||
template.
|
||||
"""
|
||||
# if no templates specified - default templates should be used:
|
||||
if not len(self.__templates):
|
||||
self.addDefaultTemplate()
|
||||
log = logSys.log if logSys.getEffectiveLevel() <= logLevel else lambda *args: None
|
||||
log(logLevel-1, "try to match time for line: %.120s", line)
|
||||
|
||||
# first try to use last template with same start/end position:
|
||||
match = None
|
||||
found = None, 0x7fffffff, 0x7fffffff, -1
|
||||
ignoreBySearch = 0x7fffffff
|
||||
i = self.__lastTemplIdx
|
||||
if i < len(self.__templates):
|
||||
ddtempl = self.__templates[i]
|
||||
template = ddtempl.template
|
||||
if template.flags & (DateTemplate.LINE_BEGIN|DateTemplate.LINE_END):
|
||||
log(logLevel-1, " try to match last anchored template #%02i ...", i)
|
||||
match = template.matchDate(line)
|
||||
ignoreBySearch = i
|
||||
else:
|
||||
distance, endpos = self.__lastPos[0], self.__lastEndPos[0]
|
||||
log(logLevel-1, " try to match last template #%02i (from %r to %r): ...%r==%r %s %r==%r...",
|
||||
i, distance, endpos,
|
||||
line[distance-1:distance], self.__lastPos[1],
|
||||
line[distance:endpos],
|
||||
line[endpos:endpos+1], self.__lastEndPos[2])
|
||||
# check same boundaries left/right, outside fully equal, inside only if not alnum (e. g. bound RE
|
||||
# with space or some special char), otherwise possible collision/pattern switch:
|
||||
if ((
|
||||
line[distance-1:distance] == self.__lastPos[1] or
|
||||
(line[distance:distance+1] == self.__lastPos[2] and not self.__lastPos[2].isalnum())
|
||||
) and (
|
||||
line[endpos:endpos+1] == self.__lastEndPos[2] or
|
||||
(line[endpos-1:endpos] == self.__lastEndPos[1] and not self.__lastEndPos[1].isalnum())
|
||||
)):
|
||||
# search in line part only:
|
||||
log(logLevel-1, " boundaries are correct, search in part %r", line[distance:endpos])
|
||||
match = template.matchDate(line, distance, endpos)
|
||||
else:
|
||||
log(logLevel-1, " boundaries show conflict, try whole search")
|
||||
match = template.matchDate(line)
|
||||
ignoreBySearch = i
|
||||
if match:
|
||||
distance = match.start()
|
||||
endpos = match.end()
|
||||
# if different position, possible collision/pattern switch:
|
||||
if (
|
||||
len(self.__templates) == 1 or # single template:
|
||||
template.flags & (DateTemplate.LINE_BEGIN|DateTemplate.LINE_END) or
|
||||
(distance == self.__lastPos[0] and endpos == self.__lastEndPos[0])
|
||||
):
|
||||
log(logLevel, " matched last time template #%02i", i)
|
||||
else:
|
||||
log(logLevel, " ** last pattern collision - pattern change, reserve & search ...")
|
||||
found = match, distance, endpos, i; # save current best alternative
|
||||
match = None
|
||||
else:
|
||||
log(logLevel, " ** last pattern not found - pattern change, search ...")
|
||||
# search template and better match:
|
||||
if not match:
|
||||
log(logLevel, " search template (%i) ...", len(self.__templates))
|
||||
i = 0
|
||||
for ddtempl in self.__templates:
|
||||
if i == ignoreBySearch:
|
||||
i += 1
|
||||
continue
|
||||
log(logLevel-1, " try template #%02i: %s", i, ddtempl.name)
|
||||
template = ddtempl.template
|
||||
match = template.matchDate(line)
|
||||
if match:
|
||||
distance = match.start()
|
||||
endpos = match.end()
|
||||
log(logLevel, " matched time template #%02i (at %r <= %r, %r) %s",
|
||||
i, distance, ddtempl.distance, self.__lastPos[0], template.name)
|
||||
## last (or single) template - fast stop:
|
||||
if i+1 >= len(self.__templates):
|
||||
break
|
||||
## if line-begin/end anchored - stop searching:
|
||||
if template.flags & (DateTemplate.LINE_BEGIN|DateTemplate.LINE_END):
|
||||
break
|
||||
## stop searching if next template still unused, but we had already hits:
|
||||
if (distance == 0 and ddtempl.hits) and not self.__templates[i+1].template.hits:
|
||||
break
|
||||
## [grave] if distance changed, possible date-match was found somewhere
|
||||
## in body of message, so save this template, and search further:
|
||||
if distance > ddtempl.distance or distance > self.__lastPos[0]:
|
||||
log(logLevel, " ** distance collision - pattern change, reserve")
|
||||
## shortest of both:
|
||||
if distance < found[1]:
|
||||
found = match, distance, endpos, i
|
||||
## search further:
|
||||
match = None
|
||||
i += 1
|
||||
continue
|
||||
## winner - stop search:
|
||||
break
|
||||
i += 1
|
||||
# check other template was found (use this one with shortest distance):
|
||||
if not match and found[0]:
|
||||
match, distance, endpos, i = found
|
||||
log(logLevel, " use best time template #%02i", i)
|
||||
ddtempl = self.__templates[i]
|
||||
template = ddtempl.template
|
||||
# we've winner, incr hits, set distance, usage, reorder, etc:
|
||||
if match:
|
||||
ddtempl.hits += 1
|
||||
ddtempl.lastUsed = time.time()
|
||||
ddtempl.distance = distance
|
||||
if self.__firstUnused == i:
|
||||
self.__firstUnused += 1
|
||||
self.__lastPos = distance, line[distance-1:distance], line[distance]
|
||||
self.__lastEndPos = endpos, line[endpos-1], line[endpos:endpos+1]
|
||||
# if not first - try to reorder current template (bubble up), they will be not sorted anymore:
|
||||
if i and i != self.__lastTemplIdx:
|
||||
i = self._reorderTemplate(i)
|
||||
self.__lastTemplIdx = i
|
||||
# return tuple with match and template reference used for parsing:
|
||||
return (match, template)
|
||||
|
||||
# not found:
|
||||
log(logLevel, " no template.")
|
||||
return (None, None)
|
||||
|
||||
@property
|
||||
def default_tz(self):
|
||||
return self.__default_tz
|
||||
|
||||
@default_tz.setter
|
||||
def default_tz(self, value):
|
||||
self.__default_tz = validateTimeZone(value)
|
||||
|
||||
def getTime(self, line, timeMatch=None):
|
||||
"""Attempts to return the date on a log line using templates.
|
||||
|
||||
This uses the templates' `getDate` method in an attempt to find
|
||||
a date.
|
||||
For the faster usage, always specify a parameter timeMatch (the previous tuple result
|
||||
of the matchTime), then this will work without locking and without cycle over templates.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
line : str
|
||||
Line which is searched by the date templates.
|
||||
|
||||
Returns
|
||||
-------
|
||||
float
|
||||
The Unix timestamp returned from the first successfully matched
|
||||
template or None if not found.
|
||||
"""
|
||||
# search match for all specified templates:
|
||||
if timeMatch is None:
|
||||
timeMatch = self.matchTime(line)
|
||||
# convert:
|
||||
template = timeMatch[1]
|
||||
if template is not None:
|
||||
try:
|
||||
date = template.getDate(line, timeMatch[0], default_tz=self.__default_tz)
|
||||
if date is not None:
|
||||
if logSys.getEffectiveLevel() <= logLevel: # pragma: no cover - heavy debug
|
||||
logSys.log(logLevel, " got time %f for %r using template %s",
|
||||
date[0], date[1].group(1), template.name)
|
||||
return date
|
||||
except ValueError:
|
||||
pass
|
||||
return None
|
||||
|
||||
def _reorderTemplate(self, num):
|
||||
"""Reorder template (bubble up) in template list if hits grows enough.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
num : int
|
||||
Index of template should be moved.
|
||||
"""
|
||||
if num:
|
||||
templates = self.__templates
|
||||
ddtempl = templates[num]
|
||||
if logSys.getEffectiveLevel() <= logLevel:
|
||||
logSys.log(logLevel, " -> reorder template #%02i, hits: %r", num, ddtempl.hits)
|
||||
## current hits and time the template was long unused:
|
||||
untime = ddtempl.lastUsed - self.__unusedTime
|
||||
weight = ddtempl.weight
|
||||
## try to move faster (first if unused available, or half of part to current template position):
|
||||
pos = self.__firstUnused if self.__firstUnused < num else num // 2
|
||||
## don't move too often (multiline logs resp. log's with different date patterns),
|
||||
## if template not used too long, replace it also :
|
||||
def _moveable():
|
||||
pweight = templates[pos].weight
|
||||
if logSys.getEffectiveLevel() <= logLevel:
|
||||
logSys.log(logLevel, " -> compare template #%02i & #%02i, weight %.3f > %.3f, hits %r > %r",
|
||||
num, pos, weight, pweight, ddtempl.hits, templates[pos].hits)
|
||||
return weight > pweight or untime > templates[pos].lastUsed
|
||||
##
|
||||
## if not moveable (smaller weight or target position recently used):
|
||||
if not _moveable():
|
||||
## try to move slow (exact 1 position):
|
||||
if pos == num-1:
|
||||
return num
|
||||
pos = num-1
|
||||
## if still smaller and template at position used, don't move:
|
||||
if not _moveable():
|
||||
return num
|
||||
## move:
|
||||
del templates[num]
|
||||
templates[pos:0] = [ddtempl]
|
||||
## correct first unused:
|
||||
while self.__firstUnused < len(templates) and templates[self.__firstUnused].hits:
|
||||
self.__firstUnused += 1
|
||||
if logSys.getEffectiveLevel() <= logLevel:
|
||||
logSys.log(logLevel, " -> moved template #%02i -> #%02i", num, pos)
|
||||
return pos
|
||||
return num
|
||||
396
fail2ban-master/fail2ban/server/datetemplate.py
Normal file
396
fail2ban-master/fail2ban/server/datetemplate.py
Normal file
@@ -0,0 +1,396 @@
|
||||
# emacs: -*- mode: python; coding: utf-8; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Author: Cyril Jaquier
|
||||
#
|
||||
|
||||
__author__ = "Cyril Jaquier"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
|
||||
__license__ = "GPL"
|
||||
|
||||
import re, time
|
||||
from abc import abstractmethod
|
||||
|
||||
from .strptime import reGroupDictStrptime, timeRE, getTimePatternRE
|
||||
from ..helpers import getLogger
|
||||
|
||||
logSys = getLogger(__name__)
|
||||
|
||||
# check already grouped contains "(", but ignores char "\(" and conditional "(?(id)...)":
|
||||
RE_GROUPED = re.compile(r'(?<!(?:\(\?))(?<!\\)\((?!\?)')
|
||||
RE_GROUP = ( re.compile(r'^((?:\(\?\w+\))?\^?(?:\(\?\w+\))?)(.*?)(\$?)$'), r"\1(\2)\3" )
|
||||
RE_GLOBALFLAGS = re.compile(r'((?:^|(?<!\\))\(\?[a-z]+\))')
|
||||
|
||||
RE_EXLINE_NO_BOUNDS = re.compile(r'^\{UNB\}')
|
||||
RE_EXLINE_BOUND_BEG = re.compile(r'^\{\^LN-BEG\}')
|
||||
RE_EXSANC_BOUND_BEG = re.compile(r'^\((?:\?:)?\^\|\\b\|\\W\)')
|
||||
RE_EXEANC_BOUND_BEG = re.compile(r'\(\?=\\b\|\\W\|\$\)$')
|
||||
RE_NO_WRD_BOUND_BEG = re.compile(r'^\(*(?:\(\?\w+\))?(?:\^|\(*\*\*|\((?:\?:)?\^)')
|
||||
RE_NO_WRD_BOUND_END = re.compile(r'(?<!\\)(?:\$\)?|\\b|\\s|\*\*\)*)$')
|
||||
RE_DEL_WRD_BOUNDS = ( re.compile(r'^\(*(?:\(\?\w+\))?\(*\*\*|(?<!\\)\*\*\)*$'),
|
||||
lambda m: m.group().replace('**', '') )
|
||||
|
||||
RE_LINE_BOUND_BEG = re.compile(r'^(?:\(\?\w+\))?(?:\^|\((?:\?:)?\^(?!\|))')
|
||||
RE_LINE_BOUND_END = re.compile(r'(?<![\\\|])(?:\$\)?)$')
|
||||
|
||||
RE_ALPHA_PATTERN = re.compile(r'(?<!\%)\%[aAbBpc]')
|
||||
|
||||
RE_EPOCH_PATTERN = re.compile(r"(?<!\\)\{L?EPOCH\}", re.IGNORECASE)
|
||||
|
||||
|
||||
class DateTemplate(object):
|
||||
"""A template which searches for and returns a date from a log line.
|
||||
|
||||
This is an not functional abstract class which other templates should
|
||||
inherit from.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
name
|
||||
regex
|
||||
"""
|
||||
|
||||
LINE_BEGIN = 8
|
||||
LINE_END = 4
|
||||
WORD_BEGIN = 2
|
||||
WORD_END = 1
|
||||
|
||||
def __init__(self):
|
||||
self.name = ""
|
||||
self.weight = 1.0
|
||||
self.flags = 0
|
||||
self.hits = 0
|
||||
self.time = 0
|
||||
self._regex = ""
|
||||
self._cRegex = None
|
||||
|
||||
def getRegex(self):
|
||||
return self._regex
|
||||
|
||||
def setRegex(self, regex, wordBegin=True, wordEnd=True):
|
||||
r"""Sets regex to use for searching for date in log line.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
regex : str
|
||||
The regex the template will use for searching for a date.
|
||||
wordBegin : bool
|
||||
Defines whether the regex should be modified to search at beginning of a
|
||||
word, by adding special boundary r'(?=^|\b|\W)' to start of regex.
|
||||
Can be disabled with specifying of ** at front of regex.
|
||||
Default True.
|
||||
wordEnd : bool
|
||||
Defines whether the regex should be modified to search at end of a word,
|
||||
by adding special boundary r'(?=\b|\W|$)' to end of regex.
|
||||
Can be disabled with specifying of ** at end of regex.
|
||||
Default True.
|
||||
|
||||
Raises
|
||||
------
|
||||
re.error
|
||||
If regular expression fails to compile
|
||||
"""
|
||||
# Warning: don't use lookahead for line-begin boundary,
|
||||
# (e. g. r"^(?:\W{0,2})?" is much faster as r"(?:^|(?<=^\W)|(?<=^\W{2}))")
|
||||
# because it may be very slow in negative case (by long log-lines not matching pattern)
|
||||
|
||||
regex = regex.strip()
|
||||
# cut global flags like (?iu) from RE in order to pre-set it after processing:
|
||||
gf = RE_GLOBALFLAGS.search(regex)
|
||||
if gf:
|
||||
regex = RE_GLOBALFLAGS.sub('', regex, count=1)
|
||||
# check word boundaries needed:
|
||||
boundBegin = wordBegin and not RE_NO_WRD_BOUND_BEG.search(regex)
|
||||
boundEnd = wordEnd and not RE_NO_WRD_BOUND_END.search(regex)
|
||||
# if no group add it now, should always have a group(1):
|
||||
if not RE_GROUPED.search(regex):
|
||||
regex = RE_GROUP[0].sub(RE_GROUP[1], regex)
|
||||
self.flags = 0
|
||||
# if word or line start boundary:
|
||||
if boundBegin:
|
||||
self.flags |= DateTemplate.WORD_BEGIN if wordBegin != 'start' else DateTemplate.LINE_BEGIN
|
||||
if wordBegin != 'start':
|
||||
regex = r'(?=^|\b|\W)' + regex
|
||||
else:
|
||||
regex = r"^(?:\W{0,2})?" + regex
|
||||
if not self.name.startswith('{^LN-BEG}'):
|
||||
self.name = '{^LN-BEG}' + self.name
|
||||
# if word end boundary:
|
||||
if boundEnd:
|
||||
self.flags |= DateTemplate.WORD_END
|
||||
regex += r'(?=\b|\W|$)'
|
||||
if not (self.flags & DateTemplate.LINE_BEGIN) and RE_LINE_BOUND_BEG.search(regex):
|
||||
self.flags |= DateTemplate.LINE_BEGIN
|
||||
if not (self.flags & DateTemplate.LINE_END) and RE_LINE_BOUND_END.search(regex):
|
||||
self.flags |= DateTemplate.LINE_END
|
||||
# remove possible special pattern "**" in front and end of regex:
|
||||
regex = RE_DEL_WRD_BOUNDS[0].sub(RE_DEL_WRD_BOUNDS[1], regex)
|
||||
if gf: # restore global flags:
|
||||
regex = gf.group(1) + regex
|
||||
self._regex = regex
|
||||
logSys.log(4, ' constructed regex %s', regex)
|
||||
self._cRegex = None
|
||||
|
||||
regex = property(getRegex, setRegex, doc=
|
||||
"""Regex used to search for date.
|
||||
""")
|
||||
|
||||
def _compileRegex(self):
|
||||
"""Compile regex by first usage.
|
||||
"""
|
||||
if not self._cRegex:
|
||||
try:
|
||||
# print('*'*10 + (' compile - %-30.30s -- %s' % (getattr(self, 'pattern', self.regex), self.name)))
|
||||
self._cRegex = re.compile(self.regex)
|
||||
except Exception as e:
|
||||
logSys.error('Compile %r failed, expression %r', self.name, self.regex)
|
||||
raise e
|
||||
|
||||
def matchDate(self, line, *args):
|
||||
"""Check if regex for date matches on a log line.
|
||||
"""
|
||||
if not self._cRegex:
|
||||
self._compileRegex()
|
||||
logSys.log(4, " search %s", self.regex)
|
||||
dateMatch = self._cRegex.search(line, *args); # pos, endpos
|
||||
if dateMatch:
|
||||
self.hits += 1
|
||||
# print('*'*10 + ('[%s] - %-30.30s -- %s' % ('*' if dateMatch else ' ', getattr(self, 'pattern', self.regex), self.name)))
|
||||
return dateMatch
|
||||
|
||||
@abstractmethod
|
||||
def getDate(self, line, dateMatch=None, default_tz=None):
|
||||
"""Abstract method, which should return the date for a log line
|
||||
|
||||
This should return the date for a log line, typically taking the
|
||||
date from the part of the line which matched the templates regex.
|
||||
This requires abstraction, therefore just raises exception.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
line : str
|
||||
Log line, of which the date should be extracted from.
|
||||
default_tz: if no explicit time zone is present in the line
|
||||
passing this will interpret it as in that time zone.
|
||||
|
||||
Raises
|
||||
------
|
||||
NotImplementedError
|
||||
Abstract method, therefore always returns this.
|
||||
"""
|
||||
raise NotImplementedError("getDate() is abstract")
|
||||
|
||||
@staticmethod
|
||||
def unboundPattern(pattern):
|
||||
return RE_EXEANC_BOUND_BEG.sub('',
|
||||
RE_EXSANC_BOUND_BEG.sub('',
|
||||
RE_EXLINE_BOUND_BEG.sub('', RE_EXLINE_NO_BOUNDS.sub('', pattern))
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class DateEpoch(DateTemplate):
|
||||
"""A date template which searches for Unix timestamps.
|
||||
|
||||
This includes Unix timestamps which appear at start of a line, optionally
|
||||
within square braces (nsd), or on SELinux audit log lines.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
name
|
||||
regex
|
||||
"""
|
||||
|
||||
def __init__(self, lineBeginOnly=False, pattern=None, longFrm=False):
|
||||
DateTemplate.__init__(self)
|
||||
self.name = "Epoch" if not pattern else pattern
|
||||
self._longFrm = longFrm;
|
||||
self._grpIdx = 1
|
||||
epochRE = r"\d{10,11}\b(?:\.\d{3,6})?"
|
||||
if longFrm:
|
||||
self.name = "LongEpoch" if not pattern else pattern
|
||||
epochRE = r"\d{10,11}(?:\d{3}(?:\.\d{1,6}|\d{3})?)?"
|
||||
if pattern:
|
||||
# pattern should find the whole pattern, but cut out grouped match (or whole match if no groups specified):
|
||||
regex = RE_EPOCH_PATTERN.sub(lambda v: "(%s)" % epochRE, pattern)
|
||||
if not RE_GROUPED.search(pattern):
|
||||
regex = "(" + regex + ")"
|
||||
self._grpIdx = 2
|
||||
self.setRegex(regex)
|
||||
elif not lineBeginOnly:
|
||||
regex = r"((?:^|(?P<square>(?<=^\[))|(?P<selinux>(?<=\baudit\()))%s)(?:(?(selinux)(?=:\d+\)))|(?(square)(?=\])))" % epochRE
|
||||
self.setRegex(regex, wordBegin=False) ;# already line begin resp. word begin anchored
|
||||
else:
|
||||
regex = r"((?P<square>(?<=^\[))?%s)(?(square)(?=\]))" % epochRE
|
||||
self.setRegex(regex, wordBegin='start', wordEnd=True)
|
||||
|
||||
def getDate(self, line, dateMatch=None, default_tz=None):
|
||||
"""Method to return the date for a log line.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
line : str
|
||||
Log line, of which the date should be extracted from.
|
||||
default_tz: ignored, Unix timestamps are time zone independent
|
||||
|
||||
Returns
|
||||
-------
|
||||
(float, str)
|
||||
Tuple containing a Unix timestamp, and the string of the date
|
||||
which was matched and in turned used to calculated the timestamp.
|
||||
"""
|
||||
if not dateMatch:
|
||||
dateMatch = self.matchDate(line)
|
||||
if dateMatch:
|
||||
v = dateMatch.group(self._grpIdx)
|
||||
# extract part of format which represents seconds since epoch
|
||||
if self._longFrm and len(v) >= 13:
|
||||
if len(v) >= 16 and '.' not in v:
|
||||
v = float(v) / 1000000
|
||||
else:
|
||||
v = float(v) / 1000
|
||||
return (float(v), dateMatch)
|
||||
|
||||
|
||||
class DatePatternRegex(DateTemplate):
|
||||
"""Date template, with regex/pattern
|
||||
|
||||
Parameters
|
||||
----------
|
||||
pattern : str
|
||||
Sets the date templates pattern.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
name
|
||||
regex
|
||||
pattern
|
||||
"""
|
||||
|
||||
_patternRE, _patternName = getTimePatternRE()
|
||||
_patternRE = re.compile(_patternRE)
|
||||
|
||||
def __init__(self, pattern=None, **kwargs):
|
||||
super(DatePatternRegex, self).__init__()
|
||||
self._pattern = None
|
||||
if pattern is not None:
|
||||
self.setRegex(pattern, **kwargs)
|
||||
|
||||
@property
|
||||
def pattern(self):
|
||||
"""The pattern used for regex with strptime "%" time fields.
|
||||
|
||||
This should be a valid regular expression, of which matching string
|
||||
will be extracted from the log line. strptime style "%" fields will
|
||||
be replaced by appropriate regular expressions, or custom regex
|
||||
groups with names as per the strptime fields can also be used
|
||||
instead.
|
||||
"""
|
||||
return self._pattern
|
||||
|
||||
@pattern.setter
|
||||
def pattern(self, pattern):
|
||||
self.setRegex(pattern)
|
||||
|
||||
def setRegex(self, pattern, wordBegin=True, wordEnd=True):
|
||||
# original pattern:
|
||||
self._pattern = pattern
|
||||
# if unbound signalled - reset boundaries left and right:
|
||||
if RE_EXLINE_NO_BOUNDS.search(pattern):
|
||||
pattern = RE_EXLINE_NO_BOUNDS.sub('', pattern)
|
||||
wordBegin = wordEnd = False
|
||||
# if explicit given {^LN-BEG} - remove it from pattern and set 'start' in wordBegin:
|
||||
if wordBegin and RE_EXLINE_BOUND_BEG.search(pattern):
|
||||
pattern = RE_EXLINE_BOUND_BEG.sub('', pattern)
|
||||
wordBegin = 'start'
|
||||
try:
|
||||
# wrap to regex:
|
||||
fmt = self._patternRE.sub(r'%(\1)s', pattern)
|
||||
self.name = fmt % self._patternName
|
||||
regex = fmt % timeRE
|
||||
# if expected add (?iu) for "ignore case" and "unicode":
|
||||
if RE_ALPHA_PATTERN.search(pattern):
|
||||
regex = r'(?iu)' + regex
|
||||
super(DatePatternRegex, self).setRegex(regex, wordBegin, wordEnd)
|
||||
except Exception as e:
|
||||
raise TypeError("Failed to set datepattern '%s' (may be an invalid format or unescaped percent char): %s" % (pattern, e))
|
||||
|
||||
def getDate(self, line, dateMatch=None, default_tz=None):
|
||||
"""Method to return the date for a log line.
|
||||
|
||||
This uses a custom version of strptime, using the named groups
|
||||
from the instances `pattern` property.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
line : str
|
||||
Log line, of which the date should be extracted from.
|
||||
default_tz: optionally used to correct timezone
|
||||
|
||||
Returns
|
||||
-------
|
||||
(float, str)
|
||||
Tuple containing a Unix timestamp, and the string of the date
|
||||
which was matched and in turned used to calculated the timestamp.
|
||||
"""
|
||||
if not dateMatch:
|
||||
dateMatch = self.matchDate(line)
|
||||
if dateMatch:
|
||||
return (reGroupDictStrptime(dateMatch.groupdict(), default_tz=default_tz),
|
||||
dateMatch)
|
||||
|
||||
|
||||
class DateTai64n(DateTemplate):
|
||||
"""A date template which matches TAI64N format timestamps.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
name
|
||||
regex
|
||||
"""
|
||||
|
||||
def __init__(self, wordBegin=False):
|
||||
DateTemplate.__init__(self)
|
||||
self.name = "TAI64N"
|
||||
# We already know the format for TAI64N
|
||||
self.setRegex("@[0-9a-f]{24}", wordBegin=wordBegin)
|
||||
|
||||
def getDate(self, line, dateMatch=None, default_tz=None):
|
||||
"""Method to return the date for a log line.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
line : str
|
||||
Log line, of which the date should be extracted from.
|
||||
default_tz: ignored, since TAI is time zone independent
|
||||
|
||||
Returns
|
||||
-------
|
||||
(float, str)
|
||||
Tuple containing a Unix timestamp, and the string of the date
|
||||
which was matched and in turned used to calculated the timestamp.
|
||||
"""
|
||||
if not dateMatch:
|
||||
dateMatch = self.matchDate(line)
|
||||
if dateMatch:
|
||||
# extract part of format which represents seconds since epoch
|
||||
value = dateMatch.group(1)
|
||||
seconds_since_epoch = value[2:17]
|
||||
# convert seconds from HEX into local time stamp
|
||||
return (int(seconds_since_epoch, 16), dateMatch)
|
||||
169
fail2ban-master/fail2ban/server/failmanager.py
Normal file
169
fail2ban-master/fail2ban/server/failmanager.py
Normal file
@@ -0,0 +1,169 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Author: Cyril Jaquier
|
||||
#
|
||||
|
||||
__author__ = "Cyril Jaquier"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
|
||||
__license__ = "GPL"
|
||||
|
||||
from threading import Lock
|
||||
import logging
|
||||
|
||||
from .ticket import FailTicket, BanTicket
|
||||
from ..helpers import getLogger, BgService
|
||||
|
||||
# Gets the instance of the logger.
|
||||
logSys = getLogger(__name__)
|
||||
logLevel = logging.DEBUG
|
||||
|
||||
|
||||
class FailManager:
|
||||
|
||||
def __init__(self):
|
||||
self.__lock = Lock()
|
||||
self.__failList = dict()
|
||||
self.__maxRetry = 3
|
||||
self.__maxTime = 600
|
||||
self.__failTotal = 0
|
||||
self.maxMatches = 5
|
||||
self.__bgSvc = BgService()
|
||||
|
||||
def setFailTotal(self, value):
|
||||
self.__failTotal = value
|
||||
|
||||
def getFailTotal(self):
|
||||
return self.__failTotal
|
||||
|
||||
def getFailCount(self):
|
||||
# may be slow on large list of failures, should be used for test purposes only...
|
||||
with self.__lock:
|
||||
return len(self.__failList), sum([f.getRetry() for f in list(self.__failList.values())])
|
||||
|
||||
def setMaxRetry(self, value):
|
||||
self.__maxRetry = value
|
||||
|
||||
def getMaxRetry(self):
|
||||
return self.__maxRetry
|
||||
|
||||
def setMaxTime(self, value):
|
||||
self.__maxTime = value
|
||||
|
||||
def getMaxTime(self):
|
||||
return self.__maxTime
|
||||
|
||||
def addFailure(self, ticket, count=1, observed=False):
|
||||
attempts = 1
|
||||
with self.__lock:
|
||||
fid = ticket.getID()
|
||||
try:
|
||||
fData = self.__failList[fid]
|
||||
# if the same object - the same matches but +1 attempt:
|
||||
if fData is ticket:
|
||||
matches = None
|
||||
attempt = 1
|
||||
else:
|
||||
# will be incremented / extended (be sure we have at least +1 attempt):
|
||||
matches = ticket.getMatches() if self.maxMatches else None
|
||||
attempt = ticket.getAttempt()
|
||||
if attempt <= 0:
|
||||
attempt += 1
|
||||
unixTime = ticket.getTime()
|
||||
fData.adjustTime(unixTime, self.__maxTime)
|
||||
fData.inc(matches, attempt, count)
|
||||
# truncate to maxMatches:
|
||||
if self.maxMatches:
|
||||
matches = fData.getMatches()
|
||||
if len(matches) > self.maxMatches:
|
||||
fData.setMatches(matches[-self.maxMatches:])
|
||||
else:
|
||||
fData.setMatches(None)
|
||||
except KeyError:
|
||||
# not found - already banned - prevent to add failure if comes from observer:
|
||||
if observed or isinstance(ticket, BanTicket):
|
||||
return ticket.getRetry()
|
||||
# if already FailTicket - add it direct, otherwise create (using copy all ticket data):
|
||||
if isinstance(ticket, FailTicket):
|
||||
fData = ticket;
|
||||
else:
|
||||
fData = FailTicket.wrap(ticket)
|
||||
if count > ticket.getAttempt():
|
||||
fData.setRetry(count)
|
||||
self.__failList[fid] = fData
|
||||
|
||||
attempts = fData.getRetry()
|
||||
self.__failTotal += 1
|
||||
|
||||
if logSys.getEffectiveLevel() <= logLevel:
|
||||
# yoh: Since composing this list might be somewhat time consuming
|
||||
# in case of having many active failures, it should be ran only
|
||||
# if debug level is "low" enough
|
||||
failures_summary = ', '.join(['%s:%d' % (k, v.getRetry())
|
||||
for k,v in self.__failList.items()])
|
||||
logSys.log(logLevel, "Total # of detected failures: %d. Current failures from %d IPs (IP:count): %s"
|
||||
% (self.__failTotal, len(self.__failList), failures_summary))
|
||||
|
||||
self.__bgSvc.service()
|
||||
return attempts
|
||||
|
||||
def size(self):
|
||||
return len(self.__failList)
|
||||
|
||||
def cleanup(self, time):
|
||||
time -= self.__maxTime
|
||||
with self.__lock:
|
||||
todelete = [fid for fid,item in self.__failList.items() \
|
||||
if item.getTime() <= time]
|
||||
if len(todelete) == len(self.__failList):
|
||||
# remove all:
|
||||
self.__failList = dict()
|
||||
elif not len(todelete):
|
||||
# nothing:
|
||||
return
|
||||
if len(todelete) / 2.0 <= len(self.__failList) / 3.0:
|
||||
# few as 2/3 should be removed - remove particular items:
|
||||
for fid in todelete:
|
||||
del self.__failList[fid]
|
||||
else:
|
||||
# create new dictionary without items to be deleted:
|
||||
self.__failList = dict((fid,item) for fid,item in self.__failList.items() \
|
||||
if item.getTime() > time)
|
||||
self.__bgSvc.service()
|
||||
|
||||
def delFailure(self, fid):
|
||||
with self.__lock:
|
||||
try:
|
||||
del self.__failList[fid]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
def toBan(self, fid=None):
|
||||
with self.__lock:
|
||||
for fid in ([fid] if fid is not None and fid in self.__failList else self.__failList):
|
||||
data = self.__failList[fid]
|
||||
if data.getRetry() >= self.__maxRetry:
|
||||
del self.__failList[fid]
|
||||
return data
|
||||
self.__bgSvc.service()
|
||||
raise FailManagerEmpty
|
||||
|
||||
|
||||
class FailManagerEmpty(Exception):
|
||||
pass
|
||||
466
fail2ban-master/fail2ban/server/failregex.py
Normal file
466
fail2ban-master/fail2ban/server/failregex.py
Normal file
@@ -0,0 +1,466 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
__author__ = "Cyril Jaquier"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
|
||||
__license__ = "GPL"
|
||||
|
||||
import re
|
||||
import sys
|
||||
|
||||
from .ipdns import IPAddr
|
||||
|
||||
|
||||
FTAG_CRE = re.compile(r'</?[\w\-]+/?>')
|
||||
|
||||
FCUSTNAME_CRE = re.compile(r'^(/?)F-([A-Z0-9_\-]+)$'); # currently uppercase only
|
||||
|
||||
R_HOST = [
|
||||
# separated ipv4:
|
||||
r"""(?:::f{4,6}:)?(?P<ip4>%s)""" % (IPAddr.IP_4_RE,),
|
||||
# separated ipv6:
|
||||
r"""(?P<ip6>%s)""" % (IPAddr.IP_6_RE,),
|
||||
# separated dns:
|
||||
r"""(?P<dns>[\w\-.^_]*\w)""",
|
||||
# place-holder for ADDR tag-replacement (joined):
|
||||
"",
|
||||
# place-holder for HOST tag replacement (joined):
|
||||
"",
|
||||
# CIDR in simplest integer form:
|
||||
r"(?P<cidr>\d+)",
|
||||
# place-holder for SUBNET tag-replacement
|
||||
"",
|
||||
]
|
||||
RI_IPV4 = 0
|
||||
RI_IPV6 = 1
|
||||
RI_DNS = 2
|
||||
RI_ADDR = 3
|
||||
RI_HOST = 4
|
||||
RI_CIDR = 5
|
||||
RI_SUBNET = 6
|
||||
|
||||
R_HOST[RI_ADDR] = r"\[?(?:%s|%s)\]?" % (R_HOST[RI_IPV4], R_HOST[RI_IPV6],)
|
||||
R_HOST[RI_HOST] = r"(?:%s|%s)" % (R_HOST[RI_ADDR], R_HOST[RI_DNS],)
|
||||
R_HOST[RI_SUBNET] = r"\[?(?:%s|%s)(?:/%s)?\]?" % (R_HOST[RI_IPV4], R_HOST[RI_IPV6], R_HOST[RI_CIDR],)
|
||||
|
||||
RH4TAG = {
|
||||
# separated ipv4 (self closed, closed):
|
||||
"IP4": R_HOST[RI_IPV4],
|
||||
"F-IP4/": R_HOST[RI_IPV4],
|
||||
# separated ipv6 (self closed, closed):
|
||||
"IP6": R_HOST[RI_IPV6],
|
||||
"F-IP6/": R_HOST[RI_IPV6],
|
||||
# 2 address groups instead of <ADDR> - in opposition to `<HOST>`,
|
||||
# for separate usage of 2 address groups only (regardless of `usedns`), `ip4` and `ip6` together
|
||||
"ADDR": R_HOST[RI_ADDR],
|
||||
"F-ADDR/": R_HOST[RI_ADDR],
|
||||
# subnet tags for usage as `<ADDR>/<CIDR>` or `<SUBNET>`:
|
||||
"CIDR": R_HOST[RI_CIDR],
|
||||
"F-CIDR/": R_HOST[RI_CIDR],
|
||||
"SUBNET": R_HOST[RI_SUBNET],
|
||||
"F-SUBNET/":R_HOST[RI_SUBNET],
|
||||
# separated dns (self closed, closed):
|
||||
"DNS": R_HOST[RI_DNS],
|
||||
"F-DNS/": R_HOST[RI_DNS],
|
||||
# default failure-id as no space tag:
|
||||
"F-ID/": r"""(?P<fid>\S+)""",
|
||||
# default failure port, like 80 or http :
|
||||
"F-PORT/": r"""(?P<fport>\w+)""",
|
||||
}
|
||||
|
||||
# default failure groups map for customizable expressions (with different group-id):
|
||||
R_MAP = {
|
||||
"id": "fid",
|
||||
"port": "fport",
|
||||
}
|
||||
|
||||
# map global flags like ((?i)xxx) or (?:(?i)xxx) to local flags (?i:xxx) if supported by RE-engine in this python version:
|
||||
try:
|
||||
re.search("^re(?i:val)$", "reVAL")
|
||||
R_GLOB2LOCFLAGS = ( re.compile(r"(?<!\\)\((?:\?:)?(\(\?[a-z]+)\)"), r"\1:" )
|
||||
except:
|
||||
R_GLOB2LOCFLAGS = ()
|
||||
|
||||
def mapTag2Opt(tag):
|
||||
tag = tag.lower()
|
||||
return R_MAP.get(tag, tag)
|
||||
|
||||
|
||||
# complex names:
|
||||
# ALT_ - alternate names to be merged, e. g. alt_user_1 -> user ...
|
||||
ALTNAME_PRE = 'alt_'
|
||||
# TUPLE_ - names of parts to be combined to single value as tuple
|
||||
TUPNAME_PRE = 'tuple_'
|
||||
|
||||
COMPLNAME_PRE = (ALTNAME_PRE, TUPNAME_PRE)
|
||||
COMPLNAME_CRE = re.compile(r'^(' + '|'.join(COMPLNAME_PRE) + r')(.*?)(?:_\d+)?$')
|
||||
|
||||
|
||||
##
|
||||
# Regular expression class.
|
||||
#
|
||||
# This class represents a regular expression with its compiled version.
|
||||
|
||||
class Regex:
|
||||
|
||||
##
|
||||
# Constructor.
|
||||
#
|
||||
# Creates a new object. This method can throw RegexException in order to
|
||||
# avoid construction of invalid object.
|
||||
# @param value the regular expression
|
||||
|
||||
def __init__(self, regex, multiline=False, **kwargs):
|
||||
self._matchCache = None
|
||||
# Perform shortcuts expansions.
|
||||
# Replace standard f2b-tags (like "<HOST>", etc) using default regular expressions:
|
||||
regex = Regex._resolveHostTag(regex, **kwargs)
|
||||
#
|
||||
if regex.lstrip() == '':
|
||||
raise RegexException("Cannot add empty regex")
|
||||
# special handling wrapping global flags to local flags:
|
||||
if R_GLOB2LOCFLAGS:
|
||||
regex = R_GLOB2LOCFLAGS[0].sub(R_GLOB2LOCFLAGS[1], regex)
|
||||
try:
|
||||
self._regexObj = re.compile(regex, re.MULTILINE if multiline else 0)
|
||||
self._regex = regex
|
||||
self._altValues = []
|
||||
self._tupleValues = []
|
||||
for k in [k for k in self._regexObj.groupindex if len(k) > len(COMPLNAME_PRE[0])]:
|
||||
n = COMPLNAME_CRE.match(k)
|
||||
if n:
|
||||
g, n = n.group(1), mapTag2Opt(n.group(2))
|
||||
if g == ALTNAME_PRE:
|
||||
self._altValues.append((k,n))
|
||||
else:
|
||||
self._tupleValues.append((k,n))
|
||||
self._altValues.sort()
|
||||
self._tupleValues.sort()
|
||||
self._altValues = self._altValues if len(self._altValues) else None
|
||||
self._tupleValues = self._tupleValues if len(self._tupleValues) else None
|
||||
except re.error as e:
|
||||
raise RegexException("Unable to compile regular expression '%s':\n%s" %
|
||||
(regex, e))
|
||||
# set fetch handler depending on presence of alternate (or tuple) tags:
|
||||
self.getGroups = self._getGroupsWithAlt if (self._altValues or self._tupleValues) else self._getGroups
|
||||
|
||||
def __str__(self):
|
||||
return "%s(%r)" % (self.__class__.__name__, self._regex)
|
||||
|
||||
##
|
||||
# Replaces "<HOST>", "<IP4>", "<IP6>", "<FID>" with default regular expression for host
|
||||
#
|
||||
# (see gh-1374 for the discussion about other candidates)
|
||||
# @return the replaced regular expression as string
|
||||
|
||||
@staticmethod
|
||||
def _resolveHostTag(regex, useDns="yes"):
|
||||
|
||||
openTags = dict()
|
||||
props = {
|
||||
'nl': 0, # new lines counter by <SKIPLINES> tag;
|
||||
}
|
||||
# tag interpolation callable:
|
||||
def substTag(m):
|
||||
tag = m.group()
|
||||
tn = tag[1:-1]
|
||||
# 3 groups instead of <HOST> - separated ipv4, ipv6 and host (dns)
|
||||
if tn == "HOST":
|
||||
return R_HOST[RI_HOST if useDns not in ("no",) else RI_ADDR]
|
||||
# replace "<SKIPLINES>" with regular expression for multiple lines (by buffering with maxlines)
|
||||
if tn == "SKIPLINES":
|
||||
nl = props['nl']
|
||||
props['nl'] = nl + 1
|
||||
return r"\n(?P<skiplines%i>(?:(?:.*\n)*?))" % (nl,)
|
||||
# static replacement from RH4TAG:
|
||||
try:
|
||||
return RH4TAG[tn]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
# (begin / end tag) for customizable expressions, additionally used as
|
||||
# user custom tags (match will be stored in ticket data, can be used in actions):
|
||||
m = FCUSTNAME_CRE.match(tn)
|
||||
if m: # match F-...
|
||||
m = m.groups()
|
||||
tn = m[1]
|
||||
# close tag:
|
||||
if m[0]:
|
||||
# check it was already open:
|
||||
if openTags.get(tn):
|
||||
return ")"
|
||||
return tag; # tag not opened, use original
|
||||
# open tag:
|
||||
openTags[tn] = 1
|
||||
# if should be mapped:
|
||||
tn = mapTag2Opt(tn)
|
||||
return "(?P<%s>" % (tn,)
|
||||
|
||||
# original, no replacement:
|
||||
return tag
|
||||
|
||||
# substitute tags:
|
||||
return FTAG_CRE.sub(substTag, regex)
|
||||
|
||||
##
|
||||
# Gets the regular expression.
|
||||
#
|
||||
# The effective regular expression used is returned.
|
||||
# @return the regular expression
|
||||
|
||||
def getRegex(self):
|
||||
return self._regex
|
||||
|
||||
##
|
||||
# Returns string buffer using join of the tupleLines.
|
||||
#
|
||||
@staticmethod
|
||||
def _tupleLinesBuf(tupleLines):
|
||||
return "\n".join(["".join(v[::2]) for v in tupleLines]) + "\n"
|
||||
|
||||
##
|
||||
# Searches the regular expression.
|
||||
#
|
||||
# Sets an internal cache (match object) in order to avoid searching for
|
||||
# the pattern again. This method must be called before calling any other
|
||||
# method of this object.
|
||||
# @param a list of tuples. The tuples are ( prematch, datematch, postdatematch )
|
||||
|
||||
def search(self, tupleLines, orgLines=None):
|
||||
buf = tupleLines
|
||||
if not isinstance(tupleLines, str):
|
||||
buf = Regex._tupleLinesBuf(tupleLines)
|
||||
self._matchCache = self._regexObj.search(buf)
|
||||
if self._matchCache:
|
||||
if orgLines is None: orgLines = tupleLines
|
||||
# if single-line:
|
||||
if len(orgLines) <= 1:
|
||||
self._matchedTupleLines = orgLines
|
||||
self._unmatchedTupleLines = []
|
||||
else:
|
||||
# Find start of the first line where the match was found
|
||||
try:
|
||||
matchLineStart = self._matchCache.string.rindex(
|
||||
"\n", 0, self._matchCache.start() +1 ) + 1
|
||||
except ValueError:
|
||||
matchLineStart = 0
|
||||
# Find end of the last line where the match was found
|
||||
try:
|
||||
matchLineEnd = self._matchCache.string.index(
|
||||
"\n", self._matchCache.end() - 1) + 1
|
||||
except ValueError:
|
||||
matchLineEnd = len(self._matchCache.string)
|
||||
|
||||
lineCount1 = self._matchCache.string.count(
|
||||
"\n", 0, matchLineStart)
|
||||
lineCount2 = self._matchCache.string.count(
|
||||
"\n", 0, matchLineEnd)
|
||||
self._matchedTupleLines = orgLines[lineCount1:lineCount2]
|
||||
self._unmatchedTupleLines = orgLines[:lineCount1]
|
||||
n = 0
|
||||
for skippedLine in self.getSkippedLines():
|
||||
for m, matchedTupleLine in enumerate(
|
||||
self._matchedTupleLines[n:]):
|
||||
if "".join(matchedTupleLine[::2]) == skippedLine:
|
||||
self._unmatchedTupleLines.append(
|
||||
self._matchedTupleLines.pop(n+m))
|
||||
n += m
|
||||
break
|
||||
self._unmatchedTupleLines.extend(orgLines[lineCount2:])
|
||||
|
||||
# Checks if the previous call to search() matched.
|
||||
#
|
||||
# @return True if a match was found, False otherwise
|
||||
|
||||
def hasMatched(self):
|
||||
if self._matchCache:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
##
|
||||
# Returns all matched groups.
|
||||
#
|
||||
|
||||
def _getGroups(self):
|
||||
return self._matchCache.groupdict()
|
||||
|
||||
def _getGroupsWithAlt(self):
|
||||
fail = self._matchCache.groupdict()
|
||||
#fail = fail.copy()
|
||||
# merge alternate values (e. g. 'alt_user_1' -> 'user' or 'alt_host' -> 'host'):
|
||||
if self._altValues:
|
||||
for k,n in self._altValues:
|
||||
v = fail.get(k)
|
||||
if v and not fail.get(n):
|
||||
fail[n] = v
|
||||
# combine tuple values (e. g. 'id', 'tuple_id' ... 'tuple_id_N' -> 'id'):
|
||||
if self._tupleValues:
|
||||
for k,n in self._tupleValues:
|
||||
v = fail.get(k)
|
||||
t = fail.get(n)
|
||||
if isinstance(t, tuple):
|
||||
t += (v,)
|
||||
else:
|
||||
t = (t,v,)
|
||||
fail[n] = t
|
||||
return fail
|
||||
|
||||
def getGroups(self): # pragma: no cover - abstract function (replaced in __init__)
|
||||
pass
|
||||
|
||||
##
|
||||
# Returns skipped lines.
|
||||
#
|
||||
# This returns skipped lines captured by the <SKIPLINES> tag.
|
||||
# @return list of skipped lines
|
||||
|
||||
def getSkippedLines(self):
|
||||
if not self._matchCache:
|
||||
return []
|
||||
skippedLines = ""
|
||||
n = 0
|
||||
while True:
|
||||
try:
|
||||
if self._matchCache.group("skiplines%i" % n) is not None:
|
||||
skippedLines += self._matchCache.group("skiplines%i" % n)
|
||||
n += 1
|
||||
except IndexError:
|
||||
break
|
||||
# KeyError is because of PyPy issue1665 affecting pypy <= 2.2.1
|
||||
except KeyError:
|
||||
if 'PyPy' not in sys.version: # pragma: no cover - not sure this is even reachable
|
||||
raise
|
||||
break
|
||||
return skippedLines.splitlines(False)
|
||||
|
||||
##
|
||||
# Returns unmatched lines.
|
||||
#
|
||||
# This returns unmatched lines including captured by the <SKIPLINES> tag.
|
||||
# @return list of unmatched lines
|
||||
|
||||
def getUnmatchedTupleLines(self):
|
||||
if not self.hasMatched():
|
||||
return []
|
||||
else:
|
||||
return self._unmatchedTupleLines
|
||||
|
||||
def getUnmatchedLines(self):
|
||||
if not self.hasMatched():
|
||||
return []
|
||||
else:
|
||||
return ["".join(line) for line in self._unmatchedTupleLines]
|
||||
|
||||
##
|
||||
# Returns matched lines.
|
||||
#
|
||||
# This returns matched lines by excluding those captured
|
||||
# by the <SKIPLINES> tag.
|
||||
# @return list of matched lines
|
||||
|
||||
def getMatchedTupleLines(self):
|
||||
if not self.hasMatched():
|
||||
return []
|
||||
else:
|
||||
return self._matchedTupleLines
|
||||
|
||||
def getMatchedLines(self):
|
||||
if not self.hasMatched():
|
||||
return []
|
||||
else:
|
||||
return ["".join(line) for line in self._matchedTupleLines]
|
||||
|
||||
|
||||
##
|
||||
# Exception dedicated to the class Regex.
|
||||
|
||||
class RegexException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
##
|
||||
# Groups used as failure identifier.
|
||||
#
|
||||
# The order of this tuple is important while searching for failure-id
|
||||
#
|
||||
FAILURE_ID_GROPS = ("fid", "ip4", "ip6", "dns")
|
||||
|
||||
# Additionally allows multi-line failure-id (used for wrapping e. g. conn-id to host)
|
||||
#
|
||||
FAILURE_ID_PRESENTS = FAILURE_ID_GROPS + ("mlfid",)
|
||||
|
||||
##
|
||||
# Regular expression class.
|
||||
#
|
||||
# This class represents a regular expression with its compiled version.
|
||||
|
||||
class FailRegex(Regex):
|
||||
|
||||
##
|
||||
# Constructor.
|
||||
#
|
||||
# Creates a new object. This method can throw RegexException in order to
|
||||
# avoid construction of invalid object.
|
||||
# @param value the regular expression
|
||||
|
||||
def __init__(self, regex, prefRegex=None, **kwargs):
|
||||
# Initializes the parent.
|
||||
Regex.__init__(self, regex, **kwargs)
|
||||
# Check for group "dns", "ip4", "ip6", "fid"
|
||||
if (not [grp for grp in FAILURE_ID_PRESENTS if grp in self._regexObj.groupindex]
|
||||
and (prefRegex is None or
|
||||
not [grp for grp in FAILURE_ID_PRESENTS if grp in prefRegex._regexObj.groupindex])
|
||||
):
|
||||
raise RegexException("No failure-id group in '%s'" % self._regex)
|
||||
|
||||
##
|
||||
# Returns the matched failure id.
|
||||
#
|
||||
# This corresponds to the pattern matched by the named group from given groups.
|
||||
# @return the matched failure-id
|
||||
|
||||
def getFailID(self, groups=FAILURE_ID_GROPS):
|
||||
fid = None
|
||||
for grp in groups:
|
||||
try:
|
||||
fid = self._matchCache.group(grp)
|
||||
except (IndexError, KeyError):
|
||||
continue
|
||||
if fid is not None:
|
||||
break
|
||||
if fid is None:
|
||||
# Gets a few information.
|
||||
s = self._matchCache.string
|
||||
r = self._matchCache.re
|
||||
raise RegexException("No group found in '%s' using '%s'" % (s, r))
|
||||
return str(fid)
|
||||
|
||||
##
|
||||
# Returns the matched host.
|
||||
#
|
||||
# This corresponds to the pattern matched by the named group "ip4", "ip6" or "dns".
|
||||
# @return the matched host
|
||||
|
||||
def getHost(self):
|
||||
return self.getFailID(("ip4", "ip6", "dns"))
|
||||
|
||||
def getIP(self):
|
||||
fail = self.getGroups()
|
||||
return IPAddr(self.getFailID(("ip4", "ip6")), int(fail.get("cidr") or IPAddr.CIDR_UNSPEC))
|
||||
1574
fail2ban-master/fail2ban/server/filter.py
Normal file
1574
fail2ban-master/fail2ban/server/filter.py
Normal file
File diff suppressed because it is too large
Load Diff
176
fail2ban-master/fail2ban/server/filterpoll.py
Normal file
176
fail2ban-master/fail2ban/server/filterpoll.py
Normal file
@@ -0,0 +1,176 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Author: Cyril Jaquier, Yaroslav Halchenko
|
||||
#
|
||||
|
||||
__author__ = "Cyril Jaquier, Yaroslav Halchenko"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier; 2012 Yaroslav Halchenko"
|
||||
__license__ = "GPL"
|
||||
|
||||
import os
|
||||
import time
|
||||
|
||||
from .filter import FileFilter
|
||||
from .utils import Utils
|
||||
from ..helpers import getLogger, logging
|
||||
|
||||
|
||||
# Gets the instance of the logger.
|
||||
logSys = getLogger(__name__)
|
||||
|
||||
|
||||
##
|
||||
# Log reader class.
|
||||
#
|
||||
# This class reads a log file and detects login failures or anything else
|
||||
# that matches a given regular expression. This class is instantiated by
|
||||
# a Jail object.
|
||||
|
||||
class FilterPoll(FileFilter):
|
||||
|
||||
##
|
||||
# Constructor.
|
||||
#
|
||||
# Initialize the filter object with default values.
|
||||
# @param jail the jail object
|
||||
|
||||
def __init__(self, jail):
|
||||
FileFilter.__init__(self, jail)
|
||||
## The time of the last modification of the file.
|
||||
self.__prevStats = dict()
|
||||
self.__file404Cnt = dict()
|
||||
logSys.debug("Created FilterPoll")
|
||||
|
||||
##
|
||||
# Add a log file path
|
||||
#
|
||||
# @param path log file path
|
||||
|
||||
def _addLogPath(self, path):
|
||||
self.__prevStats[path] = (0, None, None) # mtime, ino, size
|
||||
self.__file404Cnt[path] = 0
|
||||
|
||||
##
|
||||
# Delete a log path
|
||||
#
|
||||
# @param path the log file to delete
|
||||
|
||||
def _delLogPath(self, path):
|
||||
del self.__prevStats[path]
|
||||
del self.__file404Cnt[path]
|
||||
|
||||
##
|
||||
# Get a modified log path at once
|
||||
#
|
||||
def getModified(self, modlst):
|
||||
for filename in self.getLogPaths():
|
||||
if self.isModified(filename):
|
||||
modlst.append(filename)
|
||||
return modlst
|
||||
|
||||
##
|
||||
# Main loop.
|
||||
#
|
||||
# This function is the main loop of the thread. It checks if the
|
||||
# file has been modified and looks for failures.
|
||||
# @return True when the thread exits nicely
|
||||
|
||||
def run(self):
|
||||
while self.active:
|
||||
try:
|
||||
if logSys.getEffectiveLevel() <= 4:
|
||||
logSys.log(4, "Woke up idle=%s with %d files monitored",
|
||||
self.idle, self.getLogCount())
|
||||
if self.idle:
|
||||
if not Utils.wait_for(lambda: not self.active or not self.idle,
|
||||
self.sleeptime * 10, self.sleeptime
|
||||
):
|
||||
self.ticks += 1
|
||||
continue
|
||||
# Get file modification
|
||||
modlst = []
|
||||
Utils.wait_for(lambda: not self.active or self.getModified(modlst),
|
||||
self.sleeptime)
|
||||
if not self.active: # pragma: no cover - timing
|
||||
break
|
||||
for filename in modlst:
|
||||
self.getFailures(filename)
|
||||
|
||||
self.ticks += 1
|
||||
if self.ticks % 10 == 0:
|
||||
self.performSvc()
|
||||
except Exception as e: # pragma: no cover
|
||||
if not self.active: # if not active - error by stop...
|
||||
break
|
||||
logSys.error("Caught unhandled exception in main cycle: %r", e,
|
||||
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
# incr common error counter:
|
||||
self.commonError("unhandled", e)
|
||||
logSys.debug("[%s] filter terminated", self.jailName)
|
||||
return True
|
||||
|
||||
##
|
||||
# Checks if the log file has been modified.
|
||||
#
|
||||
# Checks if the log file has been modified using os.stat().
|
||||
# @return True if log file has been modified
|
||||
|
||||
def isModified(self, filename):
|
||||
try:
|
||||
logStats = os.stat(filename)
|
||||
stats = logStats.st_mtime, logStats.st_ino, logStats.st_size
|
||||
pstats = self.__prevStats.get(filename, (0,))
|
||||
if logSys.getEffectiveLevel() <= 4:
|
||||
# we do not want to waste time on strftime etc if not necessary
|
||||
dt = logStats.st_mtime - pstats[0]
|
||||
logSys.log(4, "Checking %s for being modified. Previous/current stats: %s / %s. dt: %s",
|
||||
filename, pstats, stats, dt)
|
||||
# os.system("stat %s | grep Modify" % filename)
|
||||
self.__file404Cnt[filename] = 0
|
||||
if pstats == stats:
|
||||
return False
|
||||
logSys.debug("%s has been modified", filename)
|
||||
self.__prevStats[filename] = stats
|
||||
return True
|
||||
except Exception as e:
|
||||
# still alive (may be deleted because multi-threaded):
|
||||
if not self.getLog(filename) or self.__prevStats.get(filename) is None:
|
||||
logSys.warning("Log %r seems to be down: %s", filename, e)
|
||||
return False
|
||||
# log error:
|
||||
if self.__file404Cnt[filename] < 2:
|
||||
if e.errno == 2:
|
||||
logSys.debug("Log absence detected (possibly rotation) for %s, reason: %s",
|
||||
filename, e)
|
||||
else: # pragma: no cover
|
||||
logSys.error("Unable to get stat on %s because of: %s",
|
||||
filename, e,
|
||||
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
# increase file and common error counters:
|
||||
self.__file404Cnt[filename] += 1
|
||||
self.commonError()
|
||||
if self.__file404Cnt[filename] > 50:
|
||||
logSys.warning("Too many errors. Remove file %r from monitoring process", filename)
|
||||
self.__file404Cnt[filename] = 0
|
||||
self.delLogPath(filename)
|
||||
return False
|
||||
|
||||
def getPendingPaths(self):
|
||||
return list(self.__file404Cnt.keys())
|
||||
401
fail2ban-master/fail2ban/server/filterpyinotify.py
Normal file
401
fail2ban-master/fail2ban/server/filterpyinotify.py
Normal file
@@ -0,0 +1,401 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Original author: Cyril Jaquier
|
||||
|
||||
__author__ = "Cyril Jaquier, Lee Clemens, Yaroslav Halchenko"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier, 2011-2012 Lee Clemens, 2012 Yaroslav Halchenko"
|
||||
__license__ = "GPL"
|
||||
|
||||
import logging
|
||||
import os
|
||||
from os.path import dirname, sep as pathsep
|
||||
|
||||
from .failmanager import FailManagerEmpty
|
||||
from .filter import FileFilter
|
||||
from .mytime import MyTime, time
|
||||
from .utils import Utils
|
||||
from ..helpers import getLogger
|
||||
|
||||
# pyinotify may have dependency to asyncore, so import it after helper to ensure
|
||||
# we've a path to compat folder:
|
||||
import pyinotify
|
||||
|
||||
# Verify that pyinotify is functional on this system
|
||||
# Even though imports -- might be dysfunctional, e.g. as on kfreebsd
|
||||
try:
|
||||
manager = pyinotify.WatchManager()
|
||||
del manager
|
||||
except Exception as e: # pragma: no cover
|
||||
raise ImportError("Pyinotify is probably not functional on this system: %s"
|
||||
% str(e))
|
||||
|
||||
# Gets the instance of the logger.
|
||||
logSys = getLogger(__name__)
|
||||
|
||||
# Override pyinotify default logger/init-handler:
|
||||
def _pyinotify_logger_init(): # pragma: no cover
|
||||
return logSys
|
||||
pyinotify._logger_init = _pyinotify_logger_init
|
||||
pyinotify.log = logSys
|
||||
|
||||
##
|
||||
# Log reader class.
|
||||
#
|
||||
# This class reads a log file and detects login failures or anything else
|
||||
# that matches a given regular expression. This class is instantiated by
|
||||
# a Jail object.
|
||||
|
||||
class FilterPyinotify(FileFilter):
|
||||
##
|
||||
# Constructor.
|
||||
#
|
||||
# Initialize the filter object with default values.
|
||||
# @param jail the jail object
|
||||
|
||||
def __init__(self, jail):
|
||||
FileFilter.__init__(self, jail)
|
||||
# Pyinotify watch manager
|
||||
self.__monitor = pyinotify.WatchManager()
|
||||
self.__notifier = None
|
||||
self.__watchFiles = dict()
|
||||
self.__watchDirs = dict()
|
||||
self.__pending = dict()
|
||||
self.__pendingChkTime = 0
|
||||
self.__pendingMinTime = 60
|
||||
logSys.debug("Created FilterPyinotify")
|
||||
|
||||
def callback(self, event, origin=''):
|
||||
logSys.log(4, "[%s] %sCallback for Event: %s", self.jailName, origin, event)
|
||||
path = event.pathname
|
||||
# check watching of this path:
|
||||
isWF = False
|
||||
isWD = path in self.__watchDirs
|
||||
if not isWD and path in self.__watchFiles:
|
||||
isWF = True
|
||||
assumeNoDir = False
|
||||
if event.mask & ( pyinotify.IN_CREATE | pyinotify.IN_MOVED_TO ):
|
||||
# skip directories altogether
|
||||
if event.mask & pyinotify.IN_ISDIR:
|
||||
logSys.debug("Ignoring creation of directory %s", path)
|
||||
return
|
||||
# check if that is a file we care about
|
||||
if not isWF:
|
||||
logSys.debug("Ignoring creation of %s we do not monitor", path)
|
||||
return
|
||||
self._refreshWatcher(path)
|
||||
elif event.mask & (pyinotify.IN_IGNORED | pyinotify.IN_MOVE_SELF | pyinotify.IN_DELETE_SELF):
|
||||
assumeNoDir = event.mask & (pyinotify.IN_MOVE_SELF | pyinotify.IN_DELETE_SELF)
|
||||
# fix pyinotify behavior with '-unknown-path' (if target not watched also):
|
||||
if (assumeNoDir and
|
||||
path.endswith('-unknown-path') and not isWF and not isWD
|
||||
):
|
||||
path = path[:-len('-unknown-path')]
|
||||
isWD = path in self.__watchDirs
|
||||
# watch was removed for some reasons (log-rotate?):
|
||||
if isWD and (assumeNoDir or not os.path.isdir(path)):
|
||||
self._addPending(path, event, isDir=True)
|
||||
elif not isWF: # pragma: no cover (assume too sporadic)
|
||||
for logpath in self.__watchDirs:
|
||||
if logpath.startswith(path + pathsep) and (assumeNoDir or not os.path.isdir(logpath)):
|
||||
self._addPending(logpath, event, isDir=True)
|
||||
if isWF and not os.path.isfile(path):
|
||||
self._addPending(path, event)
|
||||
return
|
||||
# do nothing if idle:
|
||||
if self.idle: # pragma: no cover (too sporadic to get idle in callback)
|
||||
return
|
||||
# be sure we process a file:
|
||||
if not isWF:
|
||||
logSys.debug("Ignoring event (%s) of %s we do not monitor", event.maskname, path)
|
||||
return
|
||||
self._process_file(path)
|
||||
|
||||
def _process_file(self, path):
|
||||
"""Process a given file
|
||||
|
||||
TODO -- RF:
|
||||
this is a common logic and must be shared/provided by FileFilter
|
||||
"""
|
||||
if not self.idle:
|
||||
self.getFailures(path)
|
||||
|
||||
def _addPending(self, path, reason, isDir=False):
|
||||
if path not in self.__pending:
|
||||
self.__pending[path] = [Utils.DEFAULT_SLEEP_INTERVAL, isDir];
|
||||
self.__pendingMinTime = 0
|
||||
if isinstance(reason, pyinotify.Event):
|
||||
reason = [reason.maskname, reason.pathname]
|
||||
logSys.log(logging.MSG, "Log absence detected (possibly rotation) for %s, reason: %s of %s",
|
||||
path, *reason)
|
||||
|
||||
def _delPending(self, path):
|
||||
try:
|
||||
del self.__pending[path]
|
||||
except KeyError: pass
|
||||
|
||||
def getPendingPaths(self):
|
||||
return list(self.__pending.keys())
|
||||
|
||||
def _checkPending(self):
|
||||
if not self.__pending:
|
||||
return
|
||||
ntm = time.time()
|
||||
if ntm < self.__pendingChkTime + self.__pendingMinTime:
|
||||
return
|
||||
found = {}
|
||||
minTime = 60
|
||||
for path, (retardTM, isDir) in list(self.__pending.items()):
|
||||
if ntm - self.__pendingChkTime < retardTM:
|
||||
if minTime > retardTM: minTime = retardTM
|
||||
continue
|
||||
chkpath = os.path.isdir if isDir else os.path.isfile
|
||||
if not chkpath(path): # not found - prolong for next time
|
||||
if retardTM < 60: retardTM *= 2
|
||||
if minTime > retardTM: minTime = retardTM
|
||||
try:
|
||||
self.__pending[path][0] = retardTM
|
||||
except KeyError: pass
|
||||
continue
|
||||
logSys.log(logging.MSG, "Log presence detected for %s %s",
|
||||
"directory" if isDir else "file", path)
|
||||
found[path] = isDir
|
||||
self.__pendingChkTime = time.time()
|
||||
self.__pendingMinTime = minTime
|
||||
# process now because we've missed it in monitoring:
|
||||
for path, isDir in found.items():
|
||||
self._delPending(path)
|
||||
# refresh monitoring of this:
|
||||
if isDir is not None:
|
||||
self._refreshWatcher(path, isDir=isDir)
|
||||
if isDir:
|
||||
# check all files belong to this dir:
|
||||
for logpath in list(self.__watchFiles):
|
||||
if logpath.startswith(path + pathsep):
|
||||
# if still no file - add to pending, otherwise refresh and process:
|
||||
if not os.path.isfile(logpath):
|
||||
self._addPending(logpath, ('FROM_PARDIR', path))
|
||||
else:
|
||||
self._refreshWatcher(logpath)
|
||||
self._process_file(logpath)
|
||||
else:
|
||||
# process (possibly no old events for it from watcher):
|
||||
self._process_file(path)
|
||||
|
||||
def _refreshWatcher(self, oldPath, newPath=None, isDir=False):
|
||||
if not newPath: newPath = oldPath
|
||||
# we need to substitute the watcher with a new one, so first
|
||||
# remove old one and then place a new one
|
||||
if not isDir:
|
||||
self._delFileWatcher(oldPath)
|
||||
self._addFileWatcher(newPath)
|
||||
else:
|
||||
self._delDirWatcher(oldPath)
|
||||
self._addDirWatcher(newPath)
|
||||
|
||||
def _addFileWatcher(self, path):
|
||||
# we need to watch also the directory for IN_CREATE
|
||||
self._addDirWatcher(dirname(path))
|
||||
# add file watcher:
|
||||
wd = self.__monitor.add_watch(path, pyinotify.IN_MODIFY)
|
||||
self.__watchFiles.update(wd)
|
||||
logSys.debug("Added file watcher for %s", path)
|
||||
|
||||
def _delWatch(self, wdInt):
|
||||
m = self.__monitor
|
||||
try:
|
||||
if m.get_path(wdInt) is not None:
|
||||
wd = m.rm_watch(wdInt, quiet=False)
|
||||
return True
|
||||
except pyinotify.WatchManagerError as e:
|
||||
if m.get_path(wdInt) is not None and not str(e).endswith("(EINVAL)"): # prama: no cover
|
||||
logSys.debug("Remove watch causes: %s", e)
|
||||
raise e
|
||||
return False
|
||||
|
||||
def _delFileWatcher(self, path):
|
||||
try:
|
||||
wdInt = self.__watchFiles.pop(path)
|
||||
if not self._delWatch(wdInt):
|
||||
logSys.debug("Non-existing file watcher %r for file %s", wdInt, path)
|
||||
logSys.debug("Removed file watcher for %s", path)
|
||||
return True
|
||||
except KeyError: # pragma: no cover
|
||||
pass
|
||||
return False
|
||||
|
||||
def _addDirWatcher(self, path_dir):
|
||||
# Add watch for the directory:
|
||||
if path_dir not in self.__watchDirs:
|
||||
self.__watchDirs.update(
|
||||
self.__monitor.add_watch(path_dir, pyinotify.IN_CREATE |
|
||||
pyinotify.IN_MOVED_TO | pyinotify.IN_MOVE_SELF |
|
||||
pyinotify.IN_DELETE_SELF | pyinotify.IN_ISDIR))
|
||||
logSys.debug("Added monitor for the parent directory %s", path_dir)
|
||||
|
||||
def _delDirWatcher(self, path_dir):
|
||||
# Remove watches for the directory:
|
||||
try:
|
||||
wdInt = self.__watchDirs.pop(path_dir)
|
||||
if not self._delWatch(wdInt): # pragma: no cover
|
||||
logSys.debug("Non-existing file watcher %r for directory %s", wdInt, path_dir)
|
||||
logSys.debug("Removed monitor for the parent directory %s", path_dir)
|
||||
except KeyError: # pragma: no cover
|
||||
pass
|
||||
|
||||
##
|
||||
# Add a log file path
|
||||
#
|
||||
# @param path log file path
|
||||
|
||||
def _addLogPath(self, path):
|
||||
self._addFileWatcher(path)
|
||||
# notify (wake up if in waiting):
|
||||
if self.active:
|
||||
self.__pendingMinTime = 0
|
||||
# retard until filter gets started, isDir=None signals special case: process file only (don't need to refresh monitor):
|
||||
self._addPending(path, ('INITIAL', path), isDir=None)
|
||||
|
||||
##
|
||||
# Delete a log path
|
||||
#
|
||||
# @param path the log file to delete
|
||||
|
||||
def _delLogPath(self, path):
|
||||
self._delPending(path)
|
||||
if not self._delFileWatcher(path): # pragma: no cover
|
||||
logSys.error("Failed to remove watch on path: %s", path)
|
||||
|
||||
path_dir = dirname(path)
|
||||
for k in list(self.__watchFiles):
|
||||
if k.startswith(path_dir + pathsep):
|
||||
path_dir = None
|
||||
break
|
||||
if path_dir:
|
||||
# Remove watches for the directory
|
||||
# since there is no other monitored file under this directory
|
||||
self._delPending(path_dir)
|
||||
self._delDirWatcher(path_dir)
|
||||
|
||||
# pyinotify.ProcessEvent default handler:
|
||||
def __process_default(self, event):
|
||||
try:
|
||||
self.callback(event, origin='Default ')
|
||||
except Exception as e: # pragma: no cover
|
||||
logSys.error("Error in FilterPyinotify callback: %s",
|
||||
e, exc_info=logSys.getEffectiveLevel() <= logging.DEBUG)
|
||||
# incr common error counter:
|
||||
self.commonError()
|
||||
self.ticks += 1
|
||||
|
||||
@property
|
||||
def __notify_maxtout(self):
|
||||
# timeout for pyinotify must be set in milliseconds (fail2ban time values are
|
||||
# floats contain seconds), max 0.5 sec (additionally regards pending check time)
|
||||
return min(self.sleeptime, 0.5, self.__pendingMinTime) * 1000
|
||||
|
||||
##
|
||||
# Main loop.
|
||||
#
|
||||
# Since all detection is offloaded to pyinotifier -- no manual
|
||||
# loop is necessary
|
||||
|
||||
def run(self):
|
||||
prcevent = pyinotify.ProcessEvent()
|
||||
prcevent.process_default = self.__process_default
|
||||
self.__notifier = pyinotify.Notifier(self.__monitor,
|
||||
prcevent, timeout=self.__notify_maxtout)
|
||||
logSys.debug("[%s] filter started (pyinotifier)", self.jailName)
|
||||
while self.active:
|
||||
try:
|
||||
|
||||
# slow check events while idle:
|
||||
if self.idle:
|
||||
if Utils.wait_for(lambda: not self.active or not self.idle,
|
||||
min(self.sleeptime * 10, self.__pendingMinTime),
|
||||
min(self.sleeptime, self.__pendingMinTime)
|
||||
):
|
||||
if not self.active: break
|
||||
|
||||
# default pyinotify handling using Notifier:
|
||||
self.__notifier.process_events()
|
||||
|
||||
# wait for events / timeout:
|
||||
def __check_events():
|
||||
return (
|
||||
not self.active
|
||||
or bool(self.__notifier.check_events(timeout=self.__notify_maxtout))
|
||||
or (self.__pendingMinTime and self.__pending)
|
||||
)
|
||||
wres = Utils.wait_for(__check_events, min(self.sleeptime, self.__pendingMinTime))
|
||||
if wres:
|
||||
if not self.active: break
|
||||
if not isinstance(wres, dict):
|
||||
self.__notifier.read_events()
|
||||
|
||||
self.ticks += 1
|
||||
|
||||
# check pending files/dirs (logrotate ready):
|
||||
if self.idle:
|
||||
continue
|
||||
self._checkPending()
|
||||
if self.ticks % 10 == 0:
|
||||
self.performSvc()
|
||||
|
||||
except Exception as e: # pragma: no cover
|
||||
if not self.active: # if not active - error by stop...
|
||||
break
|
||||
logSys.error("Caught unhandled exception in main cycle: %r", e,
|
||||
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
# incr common error counter:
|
||||
self.commonError("unhandled", e)
|
||||
|
||||
logSys.debug("[%s] filter exited (pyinotifier)", self.jailName)
|
||||
self.done()
|
||||
|
||||
return True
|
||||
|
||||
##
|
||||
# Clean-up: then stop the 'Notifier'
|
||||
|
||||
def afterStop(self):
|
||||
try:
|
||||
if self.__notifier: # stop the notifier
|
||||
self.__notifier.stop()
|
||||
self.__notifier = None
|
||||
except AttributeError: # pragma: no cover
|
||||
if self.__notifier: raise
|
||||
|
||||
##
|
||||
# Wait for exit with cleanup.
|
||||
|
||||
def join(self):
|
||||
self.join = lambda *args: 0
|
||||
self.__cleanup()
|
||||
super(FilterPyinotify, self).join()
|
||||
logSys.debug("[%s] filter terminated (pyinotifier)", self.jailName)
|
||||
|
||||
##
|
||||
# Deallocates the resources used by pyinotify.
|
||||
|
||||
def __cleanup(self):
|
||||
if self.__notifier:
|
||||
if Utils.wait_for(lambda: not self.__notifier, self.sleeptime * 10):
|
||||
self.__notifier = None
|
||||
self.__monitor = None
|
||||
565
fail2ban-master/fail2ban/server/filtersystemd.py
Normal file
565
fail2ban-master/fail2ban/server/filtersystemd.py
Normal file
@@ -0,0 +1,565 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
|
||||
__author__ = "Steven Hiscocks"
|
||||
__copyright__ = "Copyright (c) 2013 Steven Hiscocks"
|
||||
__license__ = "GPL"
|
||||
|
||||
import os
|
||||
import time
|
||||
|
||||
from glob import glob
|
||||
from systemd import journal
|
||||
|
||||
from .failmanager import FailManagerEmpty
|
||||
from .filter import JournalFilter, Filter
|
||||
from .mytime import MyTime
|
||||
from .utils import Utils
|
||||
from ..helpers import getLogger, logging, splitwords, uni_decode, _as_bool
|
||||
|
||||
# Gets the instance of the logger.
|
||||
logSys = getLogger(__name__)
|
||||
|
||||
|
||||
_systemdPathCache = Utils.Cache()
|
||||
def _getSystemdPath(path):
|
||||
"""Get systemd path using systemd-path command (cached)"""
|
||||
p = _systemdPathCache.get(path)
|
||||
if p: return p
|
||||
p = Utils.executeCmd('systemd-path %s' % path, timeout=10, shell=True, output=True)
|
||||
if p and p[0]:
|
||||
p = str(p[1].decode('utf-8')).split('\n')[0]
|
||||
_systemdPathCache.set(path, p)
|
||||
return p
|
||||
p = '/var/log' if path == 'system-state-logs' else ('/run/log' if path == 'system-runtime-logs' else None)
|
||||
_systemdPathCache.set(path, p)
|
||||
return p
|
||||
|
||||
def _globJournalFiles(flags=None, path=None):
|
||||
"""Get journal files without rotated files."""
|
||||
filesSet = set()
|
||||
_join = os.path.join
|
||||
def _addJF(filesSet, p, flags):
|
||||
"""add journal files to set corresponding path and flags (without rotated *@*.journal)"""
|
||||
# system journal:
|
||||
if (flags is None) or (flags & journal.SYSTEM_ONLY):
|
||||
filesSet |= set(glob(_join(p,'system.journal'))) - set(glob(_join(p,'system*@*.journal')))
|
||||
# current user-journal:
|
||||
if (flags is not None) and (flags & journal.CURRENT_USER):
|
||||
uid = os.geteuid()
|
||||
filesSet |= set(glob(_join(p,('user-%s.journal' % uid)))) - set(glob(_join(p,('user-%s@*.journal' % uid))))
|
||||
# all local journals:
|
||||
if (flags is None) or not (flags & (journal.SYSTEM_ONLY|journal.CURRENT_USER)):
|
||||
filesSet |= set(glob(_join(p,'*.journal'))) - set(glob(_join(p,'*@*.journal')))
|
||||
if path:
|
||||
# journals relative given path only:
|
||||
_addJF(filesSet, path, flags)
|
||||
else:
|
||||
# persistent journals corresponding flags:
|
||||
if (flags is None) or not (flags & journal.RUNTIME_ONLY):
|
||||
_addJF(filesSet, _join(_getSystemdPath('system-state-logs'), 'journal/*'), flags)
|
||||
# runtime journals corresponding flags:
|
||||
_addJF(filesSet, _join(_getSystemdPath('system-runtime-logs'), 'journal/*'), flags)
|
||||
# if not root, filter readable only:
|
||||
if os.geteuid() != 0:
|
||||
filesSet = [f for f in filesSet if os.access(f, os.R_OK)]
|
||||
return filesSet if filesSet else None
|
||||
|
||||
|
||||
##
|
||||
# Journal reader class.
|
||||
#
|
||||
# This class reads from systemd journal and detects login failures or anything
|
||||
# else that matches a given regular expression. This class is instantiated by
|
||||
# a Jail object.
|
||||
|
||||
class FilterSystemd(JournalFilter): # pragma: systemd no cover
|
||||
##
|
||||
# Constructor.
|
||||
#
|
||||
# Initialize the filter object with default values.
|
||||
# @param jail the jail object
|
||||
|
||||
def __init__(self, jail, **kwargs):
|
||||
self.__jrnlargs = FilterSystemd._getJournalArgs(kwargs)
|
||||
JournalFilter.__init__(self, jail, **kwargs)
|
||||
self.__modified = 0
|
||||
# Initialise systemd-journal connection
|
||||
self.__journal = journal.Reader(**self.__jrnlargs)
|
||||
self.__matches = []
|
||||
self.__bypassInvalidateMsg = 0
|
||||
self.setDatePattern(None)
|
||||
logSys.debug("Created FilterSystemd")
|
||||
|
||||
@staticmethod
|
||||
def _getJournalArgs(kwargs):
|
||||
args = {'converters':{'__CURSOR': lambda x: x}}
|
||||
try:
|
||||
args['path'] = kwargs.pop('journalpath')
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
try:
|
||||
args['files'] = kwargs.pop('journalfiles')
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
p = args['files']
|
||||
if not isinstance(p, (list, set, tuple)):
|
||||
p = splitwords(p)
|
||||
files = []
|
||||
for p in p:
|
||||
files.extend(glob(p))
|
||||
args['files'] = list(set(files))
|
||||
|
||||
rotated = _as_bool(kwargs.pop('rotated', 0))
|
||||
# Default flags is SYSTEM_ONLY(4) or LOCAL_ONLY(1), depending on rotated parameter.
|
||||
# This could lead to ignore user session files, so together with ignoring rotated
|
||||
# files would prevent "Too many open files" errors on a lot of user sessions (see gh-2392):
|
||||
try:
|
||||
args['flags'] = int(kwargs.pop('journalflags'))
|
||||
except KeyError:
|
||||
# be sure all journal types will be opened if files/path specified (don't set flags):
|
||||
if (not args.get('files') and not args.get('path')):
|
||||
args['flags'] = os.getenv("F2B_SYSTEMD_DEFAULT_FLAGS", None)
|
||||
if args['flags'] is not None:
|
||||
args['flags'] = int(args['flags'])
|
||||
elif rotated:
|
||||
args['flags'] = journal.SYSTEM_ONLY
|
||||
|
||||
try:
|
||||
args['namespace'] = kwargs.pop('namespace')
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
# To avoid monitoring rotated logs, as prevention against "Too many open files",
|
||||
# set the files to system.journal and user-*.journal (without rotated *@*.journal):
|
||||
if not rotated and not args.get('files') and not args.get('namespace'):
|
||||
args['files'] = _globJournalFiles(
|
||||
args.get('flags', journal.LOCAL_ONLY), args.get('path'))
|
||||
if args['files']:
|
||||
args['files'] = list(args['files'])
|
||||
# flags and path cannot be specified simultaneously with files:
|
||||
args['flags'] = None;
|
||||
args['path'] = None;
|
||||
else:
|
||||
args['files'] = None
|
||||
|
||||
return args
|
||||
|
||||
@property
|
||||
def _journalAlive(self):
|
||||
"""Checks journal is online.
|
||||
"""
|
||||
try:
|
||||
# open?
|
||||
if self.__journal.closed: # pragma: no cover
|
||||
return False
|
||||
# has cursor? if it is broken (e. g. no descriptor) - it'd raise this:
|
||||
# OSError: [Errno 99] Cannot assign requested address
|
||||
if self.__journal._get_cursor():
|
||||
return True
|
||||
except OSError: # pragma: no cover
|
||||
pass
|
||||
return False
|
||||
|
||||
def _reopenJournal(self): # pragma: no cover
|
||||
"""Reopen journal (if it becomes offline after rotation)
|
||||
"""
|
||||
if self.__journal.closed:
|
||||
# recreate reader:
|
||||
self.__journal = journal.Reader(**self.__jrnlargs)
|
||||
else:
|
||||
try:
|
||||
# workaround for gh-3929 (no journal descriptor after rotation),
|
||||
# to reopen journal we'd simply invoke inherited init again:
|
||||
self.__journal.close()
|
||||
ja = self.__jrnlargs
|
||||
super(journal.Reader, self.__journal).__init__(
|
||||
ja.get('flags', 0), ja.get('path'), ja.get('files'), ja.get('namespace'))
|
||||
except:
|
||||
# cannot reopen in that way, so simply recreate reader:
|
||||
self.closeJournal()
|
||||
self.__journal = journal.Reader(**self.__jrnlargs)
|
||||
# restore journalmatch specified for the jail:
|
||||
self.resetJournalMatches()
|
||||
# just to avoid "Invalidate signaled" happening again after reopen:
|
||||
self.__bypassInvalidateMsg = MyTime.time() + 1
|
||||
|
||||
##
|
||||
# Add a journal match filters from list structure
|
||||
#
|
||||
# @param matches list structure with journal matches
|
||||
|
||||
def _addJournalMatches(self, matches):
|
||||
if self.__matches:
|
||||
self.__journal.add_disjunction() # Add OR
|
||||
newMatches = []
|
||||
for match in matches:
|
||||
newMatches.append([])
|
||||
for match_element in match:
|
||||
self.__journal.add_match(match_element)
|
||||
newMatches[-1].append(match_element)
|
||||
self.__journal.add_disjunction()
|
||||
self.__matches.extend(newMatches)
|
||||
|
||||
##
|
||||
# Add a journal match filter
|
||||
#
|
||||
# @param match journalctl syntax matches in list structure
|
||||
|
||||
def addJournalMatch(self, match):
|
||||
newMatches = [[]]
|
||||
for match_element in match:
|
||||
if match_element == "+":
|
||||
newMatches.append([])
|
||||
else:
|
||||
newMatches[-1].append(match_element)
|
||||
try:
|
||||
self._addJournalMatches(newMatches)
|
||||
except ValueError:
|
||||
logSys.error(
|
||||
"Error adding journal match for: %r", " ".join(match))
|
||||
self.resetJournalMatches()
|
||||
raise
|
||||
else:
|
||||
logSys.info("[%s] Added journal match for: %r", self.jailName,
|
||||
" ".join(match))
|
||||
##
|
||||
# Reset a journal match filter called on removal or failure
|
||||
#
|
||||
# @return None
|
||||
|
||||
def resetJournalMatches(self):
|
||||
self.__journal.flush_matches()
|
||||
logSys.debug("[%s] Flushed all journal matches", self.jailName)
|
||||
match_copy = self.__matches[:]
|
||||
self.__matches = []
|
||||
try:
|
||||
self._addJournalMatches(match_copy)
|
||||
except ValueError:
|
||||
logSys.error("Error restoring journal matches")
|
||||
raise
|
||||
else:
|
||||
logSys.debug("Journal matches restored")
|
||||
|
||||
##
|
||||
# Delete a journal match filter
|
||||
#
|
||||
# @param match journalctl syntax matches
|
||||
|
||||
def delJournalMatch(self, match=None):
|
||||
# clear all:
|
||||
if match is None:
|
||||
if not self.__matches:
|
||||
return
|
||||
del self.__matches[:]
|
||||
# delete by index:
|
||||
elif match in self.__matches:
|
||||
del self.__matches[self.__matches.index(match)]
|
||||
else:
|
||||
raise ValueError("Match %r not found" % match)
|
||||
self.resetJournalMatches()
|
||||
logSys.info("[%s] Removed journal match for: %r", self.jailName,
|
||||
match if match else '*')
|
||||
|
||||
##
|
||||
# Get current journal match filter
|
||||
#
|
||||
# @return journalctl syntax matches
|
||||
|
||||
def getJournalMatch(self):
|
||||
return self.__matches
|
||||
|
||||
##
|
||||
# Get journal reader
|
||||
#
|
||||
# @return journal reader
|
||||
|
||||
def getJournalReader(self):
|
||||
return self.__journal
|
||||
|
||||
def getJrnEntTime(self, logentry):
|
||||
""" Returns time of entry as tuple (ISO-str, Posix)."""
|
||||
date = logentry.get('_SOURCE_REALTIME_TIMESTAMP')
|
||||
if date is None:
|
||||
date = logentry.get('__REALTIME_TIMESTAMP')
|
||||
return (date.isoformat(), time.mktime(date.timetuple()) + date.microsecond/1.0E6)
|
||||
|
||||
##
|
||||
# Format journal log entry into syslog style
|
||||
#
|
||||
# @param entry systemd journal entry dict
|
||||
# @return format log line
|
||||
|
||||
def formatJournalEntry(self, logentry):
|
||||
# Be sure, all argument of line tuple should have the same type:
|
||||
enc = self.getLogEncoding()
|
||||
logelements = []
|
||||
v = logentry.get('_HOSTNAME')
|
||||
if v:
|
||||
logelements.append(uni_decode(v, enc))
|
||||
v = logentry.get('SYSLOG_IDENTIFIER')
|
||||
if not v:
|
||||
v = logentry.get('_COMM')
|
||||
if v:
|
||||
logelements.append(uni_decode(v, enc))
|
||||
v = logentry.get('SYSLOG_PID')
|
||||
if not v:
|
||||
v = logentry.get('_PID')
|
||||
if v:
|
||||
try: # [integer] (if already numeric):
|
||||
v = "[%i]" % v
|
||||
except TypeError:
|
||||
try: # as [integer] (try to convert to int):
|
||||
v = "[%i]" % int(v, 0)
|
||||
except (TypeError, ValueError): # fallback - [string] as it is
|
||||
v = "[%s]" % v
|
||||
logelements[-1] += v
|
||||
logelements[-1] += ":"
|
||||
if logelements[-1] == "kernel:":
|
||||
monotonic = logentry.get('_SOURCE_MONOTONIC_TIMESTAMP')
|
||||
if monotonic is None:
|
||||
monotonic = logentry.get('__MONOTONIC_TIMESTAMP')[0]
|
||||
logelements.append("[%12.6f]" % monotonic.total_seconds())
|
||||
msg = logentry.get('MESSAGE','')
|
||||
if isinstance(msg, list):
|
||||
logelements.append(" ".join(uni_decode(v, enc) for v in msg))
|
||||
else:
|
||||
logelements.append(uni_decode(msg, enc))
|
||||
|
||||
logline = " ".join(logelements)
|
||||
|
||||
date = self.getJrnEntTime(logentry)
|
||||
logSys.log(5, "[%s] Read systemd journal entry: %s %s", self.jailName,
|
||||
date[0], logline)
|
||||
## use the same type for 1st argument:
|
||||
return ((logline[:0], date[0] + ' ', logline.replace('\n', '\\n')), date[1])
|
||||
|
||||
def seekToTime(self, date):
|
||||
if isinstance(date, int):
|
||||
date = float(date)
|
||||
self.__journal.seek_realtime(date)
|
||||
|
||||
def inOperationMode(self):
|
||||
self.inOperation = True
|
||||
logSys.info("[%s] Jail is in operation now (process new journal entries)", self.jailName)
|
||||
# just to avoid "Invalidate signaled" happening often at start:
|
||||
self.__bypassInvalidateMsg = MyTime.time() + 1
|
||||
|
||||
##
|
||||
# Main loop.
|
||||
#
|
||||
# Peridocily check for new journal entries matching the filter and
|
||||
# handover to FailManager
|
||||
|
||||
def run(self):
|
||||
|
||||
if not self.getJournalMatch():
|
||||
logSys.notice(
|
||||
"[%s] Jail started without 'journalmatch' set. "
|
||||
"Jail regexs will be checked against all journal entries, "
|
||||
"which is not advised for performance reasons.", self.jailName)
|
||||
|
||||
# Save current cursor position (to recognize in operation mode):
|
||||
logentry = None
|
||||
try:
|
||||
self.__journal.seek_tail()
|
||||
logentry = self.__journal.get_previous()
|
||||
if logentry:
|
||||
self.__journal.get_next()
|
||||
except OSError:
|
||||
logentry = None # Reading failure, so safe to ignore
|
||||
if logentry:
|
||||
# Try to obtain the last known time (position of journal)
|
||||
startTime = 0
|
||||
if self.jail.database is not None:
|
||||
startTime = self.jail.database.getJournalPos(self.jail, 'systemd-journal') or 0
|
||||
# Seek to max(last_known_time, now - findtime) in journal
|
||||
startTime = max( startTime, MyTime.time() - int(self.getFindTime()) )
|
||||
self.seekToTime(startTime)
|
||||
# Not in operation while we'll read old messages ...
|
||||
self.inOperation = False
|
||||
# Save current time in order to check time to switch "in operation" mode
|
||||
startTime = (1, MyTime.time(), logentry.get('__CURSOR'))
|
||||
else:
|
||||
# empty journal or no entries for current filter:
|
||||
self.inOperationMode()
|
||||
# seek_tail() seems to have a bug by no entries (could bypass some entries hereafter), so seek to now instead:
|
||||
startTime = MyTime.time()
|
||||
self.seekToTime(startTime)
|
||||
# for possible future switches of in-operation mode:
|
||||
startTime = (0, startTime)
|
||||
|
||||
# Move back one entry to ensure do not end up in dead space
|
||||
# if start time beyond end of journal
|
||||
try:
|
||||
self.__journal.get_previous()
|
||||
except OSError:
|
||||
pass # Reading failure, so safe to ignore
|
||||
|
||||
wcode = journal.NOP
|
||||
line = None
|
||||
while self.active:
|
||||
# wait for records (or for timeout in sleeptime seconds):
|
||||
try:
|
||||
if self.idle:
|
||||
# because journal.wait will returns immediately if we have records in journal,
|
||||
# just wait a little bit here for not idle, to prevent hi-load:
|
||||
if not Utils.wait_for(lambda: not self.active or not self.idle,
|
||||
self.sleeptime * 10, self.sleeptime
|
||||
):
|
||||
self.ticks += 1
|
||||
continue
|
||||
## wait for entries using journal.wait:
|
||||
if wcode == journal.NOP and self.inOperation:
|
||||
## todo: find better method as wait_for to break (e.g. notify) journal.wait(self.sleeptime),
|
||||
## don't use `journal.close()` for it, because in some python/systemd implementation it may
|
||||
## cause abnormal program termination (e. g. segfault)
|
||||
##
|
||||
## wait for entries without sleep in intervals, because "sleeping" in journal.wait,
|
||||
## journal.NOP is 0, so we can wait for non zero (APPEND or INVALIDATE):
|
||||
wcode = Utils.wait_for(lambda: not self.active and journal.APPEND or \
|
||||
self.__journal.wait(Utils.DEFAULT_SLEEP_INTERVAL),
|
||||
self.sleeptime, 0.00001)
|
||||
## if invalidate (due to rotation, vacuuming or journal files added/removed etc):
|
||||
if self.active and wcode == journal.INVALIDATE:
|
||||
if self.ticks:
|
||||
if not self.__bypassInvalidateMsg or MyTime.time() > self.__bypassInvalidateMsg:
|
||||
logSys.log(logging.MSG, "[%s] Invalidate signaled, take a little break (rotation ends)", self.jailName)
|
||||
time.sleep(self.sleeptime * 0.25)
|
||||
self.__bypassInvalidateMsg = 0
|
||||
Utils.wait_for(lambda: not self.active or \
|
||||
self.__journal.wait(Utils.DEFAULT_SLEEP_INTERVAL) != journal.INVALIDATE,
|
||||
self.sleeptime * 3, 0.00001)
|
||||
if self.ticks:
|
||||
# move back and forth to ensure do not end up in dead space by rotation or vacuuming,
|
||||
# if position beyond end of journal (gh-3396)
|
||||
try:
|
||||
if self.__journal.get_previous(): self.__journal.get_next()
|
||||
except OSError:
|
||||
pass
|
||||
# if it is not alive - reopen:
|
||||
if not self._journalAlive:
|
||||
logSys.log(logging.MSG, "[%s] Journal reader seems to be offline, reopen journal", self.jailName)
|
||||
self._reopenJournal()
|
||||
wcode = journal.NOP
|
||||
self.__modified = 0
|
||||
while self.active:
|
||||
logentry = None
|
||||
try:
|
||||
logentry = self.__journal.get_next()
|
||||
except OSError as e:
|
||||
logSys.error("Error reading line from systemd journal: %s",
|
||||
e, exc_info=logSys.getEffectiveLevel() <= logging.DEBUG)
|
||||
self.ticks += 1
|
||||
if logentry:
|
||||
line, tm = self.formatJournalEntry(logentry)
|
||||
# switch "in operation" mode if we'll find start entry (+ some delta):
|
||||
if not self.inOperation:
|
||||
if tm >= MyTime.time() - 1: # reached now (approximated):
|
||||
self.inOperationMode()
|
||||
elif startTime[0] == 1:
|
||||
# if it reached start entry (or get read time larger than start time)
|
||||
if logentry.get('__CURSOR') == startTime[2] or tm > startTime[1]:
|
||||
# give the filter same time it needed to reach the start entry:
|
||||
startTime = (0, MyTime.time()*2 - startTime[1])
|
||||
elif tm > startTime[1]: # reached start time (approximated):
|
||||
self.inOperationMode()
|
||||
# process line
|
||||
self.processLineAndAdd(line, tm)
|
||||
self.__modified += 1
|
||||
if self.__modified >= 100: # todo: should be configurable
|
||||
wcode = journal.APPEND; # don't need wait - there are still unprocessed entries
|
||||
break
|
||||
else:
|
||||
# "in operation" mode since we don't have messages anymore (reached end of journal):
|
||||
if not self.inOperation:
|
||||
self.inOperationMode()
|
||||
wcode = journal.NOP; # enter wait - no more entries to process
|
||||
break
|
||||
self.__modified = 0
|
||||
if self.ticks % 10 == 0:
|
||||
self.performSvc()
|
||||
# update position in log (time and iso string):
|
||||
if self.jail.database:
|
||||
if line:
|
||||
self._pendDBUpdates['systemd-journal'] = (tm, line[1])
|
||||
line = None
|
||||
if self._pendDBUpdates and (
|
||||
self.ticks % 100 == 0
|
||||
or MyTime.time() >= self._nextUpdateTM
|
||||
or not self.active
|
||||
):
|
||||
self._updateDBPending()
|
||||
self._nextUpdateTM = MyTime.time() + Utils.DEFAULT_SLEEP_TIME * 5
|
||||
except Exception as e: # pragma: no cover
|
||||
if not self.active: # if not active - error by stop...
|
||||
break
|
||||
wcode = journal.NOP
|
||||
logSys.error("Caught unhandled exception in main cycle: %r", e,
|
||||
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
# incr common error counter:
|
||||
self.commonError("unhandled", e)
|
||||
|
||||
logSys.debug("[%s] filter terminated", self.jailName)
|
||||
|
||||
# call afterStop once (close journal, etc):
|
||||
self.done()
|
||||
|
||||
logSys.debug("[%s] filter exited (systemd)", self.jailName)
|
||||
return True
|
||||
|
||||
def closeJournal(self):
|
||||
try:
|
||||
jnl, self.__journal = self.__journal, None
|
||||
if jnl:
|
||||
jnl.close()
|
||||
except Exception as e: # pragma: no cover
|
||||
logSys.error("Close journal failed: %r", e,
|
||||
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
|
||||
|
||||
def status(self, flavor="basic"):
|
||||
ret = super(FilterSystemd, self).status(flavor=flavor)
|
||||
if flavor == "stats":
|
||||
return ret
|
||||
ret.append(("Journal matches",
|
||||
[" + ".join(" ".join(match) for match in self.__matches)]))
|
||||
return ret
|
||||
|
||||
def _updateDBPending(self):
|
||||
"""Apply pending updates (journal position) to database.
|
||||
"""
|
||||
db = self.jail.database
|
||||
while True:
|
||||
try:
|
||||
log, args = self._pendDBUpdates.popitem()
|
||||
except KeyError:
|
||||
break
|
||||
db.updateJournal(self.jail, log, *args)
|
||||
|
||||
def afterStop(self):
|
||||
"""Cleanup"""
|
||||
# close journal:
|
||||
self.closeJournal()
|
||||
# ensure positions of pending logs are up-to-date:
|
||||
if self._pendDBUpdates and self.jail.database:
|
||||
self._updateDBPending()
|
||||
919
fail2ban-master/fail2ban/server/ipdns.py
Normal file
919
fail2ban-master/fail2ban/server/ipdns.py
Normal file
@@ -0,0 +1,919 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
__author__ = "Fail2Ban Developers, Alexander Koeppe, Serg G. Brester, Yaroslav Halchenko"
|
||||
__copyright__ = "Copyright (c) 2004-2016 Fail2ban Developers"
|
||||
__license__ = "GPL"
|
||||
|
||||
import socket
|
||||
import struct
|
||||
import os
|
||||
import re
|
||||
|
||||
from .utils import Utils
|
||||
from ..helpers import getLogger, MyTime, splitwords
|
||||
|
||||
# Gets the instance of the logger.
|
||||
logSys = getLogger(__name__)
|
||||
|
||||
|
||||
##
|
||||
# Helper functions
|
||||
#
|
||||
#
|
||||
def asip(ip):
|
||||
"""A little helper to guarantee ip being an IPAddr instance"""
|
||||
if isinstance(ip, IPAddr):
|
||||
return ip
|
||||
return IPAddr(ip)
|
||||
|
||||
def getfqdn(name=''):
|
||||
"""Get fully-qualified hostname of given host, thereby resolve of an external
|
||||
IPs and name will be preferred before the local domain (or a loopback), see gh-2438
|
||||
"""
|
||||
try:
|
||||
name = name or socket.gethostname()
|
||||
names = (
|
||||
ai[3] for ai in socket.getaddrinfo(
|
||||
name, None, 0, socket.SOCK_DGRAM, 0, socket.AI_CANONNAME
|
||||
) if ai[3]
|
||||
)
|
||||
if names:
|
||||
# first try to find a fqdn starting with the host name like www.domain.tld for www:
|
||||
pref = name+'.'
|
||||
first = None
|
||||
for ai in names:
|
||||
if ai.startswith(pref):
|
||||
return ai
|
||||
if not first: first = ai
|
||||
# not found - simply use first known fqdn:
|
||||
return first
|
||||
except socket.error:
|
||||
pass
|
||||
# fallback to python's own getfqdn routine:
|
||||
return socket.getfqdn(name)
|
||||
|
||||
|
||||
##
|
||||
# Utils class for DNS handling.
|
||||
#
|
||||
# This class contains only static methods used to handle DNS
|
||||
#
|
||||
class DNSUtils:
|
||||
|
||||
# todo: make configurable the expired time and max count of cache entries:
|
||||
CACHE_nameToIp = Utils.Cache(maxCount=1000, maxTime=5*60)
|
||||
CACHE_ipToName = Utils.Cache(maxCount=1000, maxTime=5*60)
|
||||
# static cache used to hold sets read from files:
|
||||
CACHE_fileToIp = Utils.Cache(maxCount=100, maxTime=5*60)
|
||||
|
||||
@staticmethod
|
||||
def dnsToIp(dns):
|
||||
""" Convert a DNS into an IP address using the Python socket module.
|
||||
Thanks to Kevin Drapel.
|
||||
"""
|
||||
# cache, also prevent long wait during retrieving of ip for wrong dns or lazy dns-system:
|
||||
ips = DNSUtils.CACHE_nameToIp.get(dns)
|
||||
if ips is not None:
|
||||
return ips
|
||||
# retrieve ips
|
||||
ips = set()
|
||||
saveerr = None
|
||||
for fam in ((socket.AF_INET,socket.AF_INET6) if DNSUtils.IPv6IsAllowed() else (socket.AF_INET,)):
|
||||
try:
|
||||
for result in socket.getaddrinfo(dns, None, fam, 0, socket.IPPROTO_TCP):
|
||||
# if getaddrinfo returns something unexpected:
|
||||
if len(result) < 4 or not len(result[4]): continue
|
||||
# get ip from `(2, 1, 6, '', ('127.0.0.1', 0))`,be sure we've an ip-string
|
||||
# (some python-versions resp. host configurations causes returning of integer there):
|
||||
ip = IPAddr(str(result[4][0]), IPAddr._AF2FAM(fam))
|
||||
if ip.isValid:
|
||||
ips.add(ip)
|
||||
except Exception as e:
|
||||
saveerr = e
|
||||
if not ips and saveerr:
|
||||
logSys.warning("Unable to find a corresponding IP address for %s: %s", dns, saveerr)
|
||||
|
||||
DNSUtils.CACHE_nameToIp.set(dns, ips)
|
||||
return ips
|
||||
|
||||
@staticmethod
|
||||
def ipToName(ip):
|
||||
# cache, also prevent long wait during retrieving of name for wrong addresses, lazy dns:
|
||||
v = DNSUtils.CACHE_ipToName.get(ip, ())
|
||||
if v != ():
|
||||
return v
|
||||
# retrieve name
|
||||
try:
|
||||
v = socket.gethostbyaddr(ip)[0]
|
||||
except socket.error as e:
|
||||
logSys.debug("Unable to find a name for the IP %s: %s", ip, e)
|
||||
v = None
|
||||
DNSUtils.CACHE_ipToName.set(ip, v)
|
||||
return v
|
||||
|
||||
@staticmethod
|
||||
def textToIp(text, useDns):
|
||||
""" Return the IP of DNS found in a given text.
|
||||
"""
|
||||
ipList = set()
|
||||
# Search for plain IP
|
||||
plainIP = IPAddr.searchIP(text)
|
||||
if plainIP is not None:
|
||||
ip = IPAddr(plainIP)
|
||||
if ip.isValid:
|
||||
ipList.add(ip)
|
||||
|
||||
# If we are allowed to resolve -- give it a try if nothing was found
|
||||
if useDns in ("yes", "warn") and not ipList:
|
||||
# Try to get IP from possible DNS
|
||||
ip = DNSUtils.dnsToIp(text)
|
||||
ipList.update(ip)
|
||||
if ip and useDns == "warn":
|
||||
logSys.warning("Determined IP using DNS Lookup: %s = %s",
|
||||
text, ipList)
|
||||
|
||||
return ipList
|
||||
|
||||
@staticmethod
|
||||
def getHostname(fqdn=True):
|
||||
"""Get short hostname or fully-qualified hostname of host self"""
|
||||
# try find cached own hostnames (this tuple-key cannot be used elsewhere):
|
||||
key = ('self','hostname', fqdn)
|
||||
name = DNSUtils.CACHE_ipToName.get(key)
|
||||
if name is not None:
|
||||
return name
|
||||
# get it using different ways (hostname, fully-qualified or vice versa):
|
||||
name = ''
|
||||
for hostname in (
|
||||
(getfqdn, socket.gethostname) if fqdn else (socket.gethostname, getfqdn)
|
||||
):
|
||||
try:
|
||||
name = hostname()
|
||||
break
|
||||
except Exception as e: # pragma: no cover
|
||||
logSys.warning("Retrieving own hostnames failed: %s", e)
|
||||
# cache and return :
|
||||
DNSUtils.CACHE_ipToName.set(key, name)
|
||||
return name
|
||||
|
||||
# key find cached own hostnames (this tuple-key cannot be used elsewhere):
|
||||
_getSelfNames_key = ('self','dns')
|
||||
|
||||
@staticmethod
|
||||
def getSelfNames():
|
||||
"""Get own host names of self"""
|
||||
# try find cached own hostnames:
|
||||
names = DNSUtils.CACHE_ipToName.get(DNSUtils._getSelfNames_key)
|
||||
if names is not None:
|
||||
return names
|
||||
# get it using different ways (a set with names of localhost, hostname, fully qualified):
|
||||
names = set([
|
||||
'localhost', DNSUtils.getHostname(False), DNSUtils.getHostname(True)
|
||||
]) - set(['']) # getHostname can return ''
|
||||
# cache and return :
|
||||
DNSUtils.CACHE_ipToName.set(DNSUtils._getSelfNames_key, names)
|
||||
return names
|
||||
|
||||
# key to find cached network interfaces IPs (this tuple-key cannot be used elsewhere):
|
||||
_getNetIntrfIPs_key = ('netintrf','ips')
|
||||
|
||||
@staticmethod
|
||||
def getNetIntrfIPs():
|
||||
"""Get own IP addresses of self"""
|
||||
# to find cached own IPs:
|
||||
ips = DNSUtils.CACHE_nameToIp.get(DNSUtils._getNetIntrfIPs_key)
|
||||
if ips is not None:
|
||||
return ips
|
||||
# try to obtain from network interfaces if possible (implemented for this platform):
|
||||
try:
|
||||
ips = IPAddrSet([a for ni, a in DNSUtils._NetworkInterfacesAddrs()])
|
||||
except:
|
||||
ips = IPAddrSet()
|
||||
# cache and return :
|
||||
DNSUtils.CACHE_nameToIp.set(DNSUtils._getNetIntrfIPs_key, ips)
|
||||
return ips
|
||||
|
||||
# key to find cached own IPs (this tuple-key cannot be used elsewhere):
|
||||
_getSelfIPs_key = ('self','ips')
|
||||
|
||||
@staticmethod
|
||||
def getSelfIPs():
|
||||
"""Get own IP addresses of self"""
|
||||
# to find cached own IPs:
|
||||
ips = DNSUtils.CACHE_nameToIp.get(DNSUtils._getSelfIPs_key)
|
||||
if ips is not None:
|
||||
return ips
|
||||
# firstly try to obtain from network interfaces if possible (implemented for this platform):
|
||||
ips = IPAddrSet(DNSUtils.getNetIntrfIPs())
|
||||
# extend it using different ways (a set with IPs of localhost, hostname, fully qualified):
|
||||
for hostname in DNSUtils.getSelfNames():
|
||||
try:
|
||||
ips |= IPAddrSet(DNSUtils.dnsToIp(hostname))
|
||||
except Exception as e: # pragma: no cover
|
||||
logSys.warning("Retrieving own IPs of %s failed: %s", hostname, e)
|
||||
# cache and return :
|
||||
DNSUtils.CACHE_nameToIp.set(DNSUtils._getSelfIPs_key, ips)
|
||||
return ips
|
||||
|
||||
@staticmethod
|
||||
def getIPsFromFile(fileName, noError=True):
|
||||
"""Get set of IP addresses or subnets from file"""
|
||||
# to find cached IPs:
|
||||
ips = DNSUtils.CACHE_fileToIp.get(fileName)
|
||||
if ips is not None:
|
||||
return ips
|
||||
# try to obtain set from file:
|
||||
ips = FileIPAddrSet(fileName)
|
||||
#ips.load() - load on demand
|
||||
# cache and return :
|
||||
DNSUtils.CACHE_fileToIp.set(fileName, ips)
|
||||
return ips
|
||||
|
||||
_IPv6IsAllowed = None
|
||||
|
||||
@staticmethod
|
||||
def _IPv6IsSupportedBySystem():
|
||||
if not socket.has_ipv6:
|
||||
return False
|
||||
# try to check sysctl net.ipv6.conf.all.disable_ipv6:
|
||||
try:
|
||||
with open('/proc/sys/net/ipv6/conf/all/disable_ipv6', 'rb') as f:
|
||||
# if 1 - disabled, 0 - enabled
|
||||
return not int(f.read())
|
||||
except:
|
||||
pass
|
||||
s = None
|
||||
try:
|
||||
# try to create INET6 socket:
|
||||
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
|
||||
# bind it to free port for any interface supporting IPv6:
|
||||
s.bind(("", 0));
|
||||
return True
|
||||
except Exception as e: # pragma: no cover
|
||||
if hasattr(e, 'errno'):
|
||||
import errno
|
||||
# negative (-9 'Address family not supported', etc) or not available/supported:
|
||||
if e.errno < 0 or e.errno in (errno.EADDRNOTAVAIL, errno.EAFNOSUPPORT):
|
||||
return False
|
||||
# in use:
|
||||
if e.errno in (errno.EADDRINUSE, errno.EACCES): # normally unreachable (free port and root)
|
||||
return True
|
||||
finally:
|
||||
if s: s.close()
|
||||
# unable to detect:
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def setIPv6IsAllowed(value):
|
||||
DNSUtils._IPv6IsAllowed = value
|
||||
logSys.debug("IPv6 is %s", ('on' if value else 'off') if value is not None else 'auto')
|
||||
return value
|
||||
|
||||
# key to find cached value of IPv6 allowance (this tuple-key cannot be used elsewhere):
|
||||
_IPv6IsAllowed_key = ('self','ipv6-allowed')
|
||||
|
||||
@staticmethod
|
||||
def IPv6IsAllowed():
|
||||
if DNSUtils._IPv6IsAllowed is not None:
|
||||
return DNSUtils._IPv6IsAllowed
|
||||
v = DNSUtils.CACHE_nameToIp.get(DNSUtils._IPv6IsAllowed_key)
|
||||
if v is not None:
|
||||
return v
|
||||
v = DNSUtils._IPv6IsSupportedBySystem()
|
||||
if v is None:
|
||||
# detect by IPs of host:
|
||||
ips = DNSUtils.getNetIntrfIPs()
|
||||
if not ips:
|
||||
DNSUtils._IPv6IsAllowed = True; # avoid self recursion from getSelfIPs -> dnsToIp -> IPv6IsAllowed
|
||||
try:
|
||||
ips = DNSUtils.getSelfIPs()
|
||||
finally:
|
||||
DNSUtils._IPv6IsAllowed = None
|
||||
v = any((':' in ip.ntoa) for ip in ips)
|
||||
DNSUtils.CACHE_nameToIp.set(DNSUtils._IPv6IsAllowed_key, v)
|
||||
return v
|
||||
|
||||
|
||||
##
|
||||
# Class for IP address handling.
|
||||
#
|
||||
# This class contains methods for handling IPv4 and IPv6 addresses.
|
||||
#
|
||||
class IPAddr(object):
|
||||
"""Encapsulate functionality for IPv4 and IPv6 addresses
|
||||
"""
|
||||
|
||||
IP_4_RE = r"""(?:\d{1,3}\.){3}\d{1,3}"""
|
||||
IP_6_RE = r"""(?:[0-9a-fA-F]{1,4}::?|:){1,7}(?:[0-9a-fA-F]{1,4}|(?<=:):)"""
|
||||
IP_4_6_CRE = re.compile(
|
||||
r"""^(?:(?P<IPv4>%s)|\[?(?P<IPv6>%s)\]?)$""" % (IP_4_RE, IP_6_RE))
|
||||
IP_W_CIDR_CRE = re.compile(
|
||||
r"""^(%s|%s)/(?:(\d+)|(%s|%s))$""" % (IP_4_RE, IP_6_RE, IP_4_RE, IP_6_RE))
|
||||
# An IPv4 compatible IPv6 to be reused (see below)
|
||||
IP6_4COMPAT = None
|
||||
|
||||
# object attributes
|
||||
__slots__ = '_family','_addr','_plen','_maskplen','_raw'
|
||||
|
||||
# todo: make configurable the expired time and max count of cache entries:
|
||||
CACHE_OBJ = Utils.Cache(maxCount=10000, maxTime=5*60)
|
||||
|
||||
CIDR_RAW = -2
|
||||
CIDR_UNSPEC = -1
|
||||
FAM_IPv4 = CIDR_RAW - socket.AF_INET
|
||||
FAM_IPv6 = CIDR_RAW - socket.AF_INET6
|
||||
@staticmethod
|
||||
def _AF2FAM(v):
|
||||
return IPAddr.CIDR_RAW - v
|
||||
|
||||
def __new__(cls, ipstr, cidr=CIDR_UNSPEC):
|
||||
if cidr == IPAddr.CIDR_UNSPEC and isinstance(ipstr, (tuple, list)):
|
||||
cidr = IPAddr.CIDR_RAW
|
||||
if cidr == IPAddr.CIDR_RAW: # don't cache raw
|
||||
ip = super(IPAddr, cls).__new__(cls)
|
||||
ip.__init(ipstr, cidr)
|
||||
return ip
|
||||
# check already cached as IPAddr
|
||||
args = (ipstr, cidr)
|
||||
ip = IPAddr.CACHE_OBJ.get(args)
|
||||
if ip is not None:
|
||||
return ip
|
||||
# wrap mask to cidr (correct plen):
|
||||
if cidr == IPAddr.CIDR_UNSPEC:
|
||||
ipstr, cidr = IPAddr.__wrap_ipstr(ipstr)
|
||||
args = (ipstr, cidr)
|
||||
# check cache again:
|
||||
if cidr != IPAddr.CIDR_UNSPEC:
|
||||
ip = IPAddr.CACHE_OBJ.get(args)
|
||||
if ip is not None:
|
||||
return ip
|
||||
ip = super(IPAddr, cls).__new__(cls)
|
||||
ip.__init(ipstr, cidr)
|
||||
if ip._family != IPAddr.CIDR_RAW:
|
||||
IPAddr.CACHE_OBJ.set(args, ip)
|
||||
return ip
|
||||
|
||||
@staticmethod
|
||||
def __wrap_ipstr(ipstr):
|
||||
# because of standard spelling of IPv6 (with port) enclosed in brackets ([ipv6]:port),
|
||||
# remove they now (be sure the <HOST> inside failregex uses this for IPv6 (has \[?...\]?)
|
||||
if len(ipstr) > 2 and ipstr[0] == '[' and ipstr[-1] == ']':
|
||||
ipstr = ipstr[1:-1]
|
||||
# test mask:
|
||||
if "/" not in ipstr:
|
||||
return ipstr, IPAddr.CIDR_UNSPEC
|
||||
s = IPAddr.IP_W_CIDR_CRE.match(ipstr)
|
||||
if s is None:
|
||||
return ipstr, IPAddr.CIDR_UNSPEC
|
||||
s = list(s.groups())
|
||||
if s[2]: # 255.255.255.0 resp. ffff:: style mask
|
||||
s[1] = IPAddr.masktoplen(s[2])
|
||||
del s[2]
|
||||
try:
|
||||
s[1] = int(s[1])
|
||||
except ValueError:
|
||||
return ipstr, IPAddr.CIDR_UNSPEC
|
||||
return s
|
||||
|
||||
def __init(self, ipstr, cidr=CIDR_UNSPEC):
|
||||
""" initialize IP object by converting IP address string
|
||||
to binary to integer
|
||||
"""
|
||||
self._family = socket.AF_UNSPEC
|
||||
self._addr = 0
|
||||
self._plen = 0
|
||||
self._maskplen = None
|
||||
# always save raw value (normally used if really raw or not valid only):
|
||||
self._raw = ipstr
|
||||
# if not raw - recognize family, set addr, etc.:
|
||||
if cidr != IPAddr.CIDR_RAW:
|
||||
if cidr is not None and cidr < IPAddr.CIDR_RAW:
|
||||
family = [IPAddr.CIDR_RAW - cidr]
|
||||
else:
|
||||
family = [socket.AF_INET, socket.AF_INET6]
|
||||
for family in family:
|
||||
try:
|
||||
binary = socket.inet_pton(family, ipstr)
|
||||
self._family = family
|
||||
break
|
||||
except socket.error:
|
||||
continue
|
||||
|
||||
if self._family == socket.AF_INET:
|
||||
# convert host to network byte order
|
||||
self._addr, = struct.unpack("!L", binary)
|
||||
self._plen = 32
|
||||
|
||||
# mask out host portion if prefix length is supplied
|
||||
if cidr is not None and cidr >= 0:
|
||||
mask = ~(0xFFFFFFFF >> cidr)
|
||||
self._addr &= mask
|
||||
self._plen = cidr
|
||||
|
||||
elif self._family == socket.AF_INET6:
|
||||
# convert host to network byte order
|
||||
hi, lo = struct.unpack("!QQ", binary)
|
||||
self._addr = (hi << 64) | lo
|
||||
self._plen = 128
|
||||
|
||||
# mask out host portion if prefix length is supplied
|
||||
if cidr is not None and cidr >= 0:
|
||||
mask = ~(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF >> cidr)
|
||||
self._addr &= mask
|
||||
self._plen = cidr
|
||||
|
||||
# if IPv6 address is a IPv4-compatible, make instance a IPv4
|
||||
elif self.isInNet(IPAddr.IP6_4COMPAT):
|
||||
self._addr = lo & 0xFFFFFFFF
|
||||
self._family = socket.AF_INET
|
||||
self._plen = 32
|
||||
else:
|
||||
self._family = IPAddr.CIDR_RAW
|
||||
|
||||
def __repr__(self):
|
||||
return repr(self.ntoa)
|
||||
|
||||
def __str__(self):
|
||||
return self.ntoa if isinstance(self.ntoa, str) else str(self.ntoa)
|
||||
|
||||
def __reduce__(self):
|
||||
"""IPAddr pickle-handler, that simply wraps IPAddr to the str
|
||||
|
||||
Returns a string as instance to be pickled, because fail2ban-client can't
|
||||
unserialize IPAddr objects
|
||||
"""
|
||||
return (str, (self.ntoa,))
|
||||
|
||||
@property
|
||||
def addr(self):
|
||||
return self._addr
|
||||
|
||||
@property
|
||||
def family(self):
|
||||
return self._family
|
||||
|
||||
FAM2STR = {socket.AF_INET: 'inet4', socket.AF_INET6: 'inet6'}
|
||||
@property
|
||||
def familyStr(self):
|
||||
return IPAddr.FAM2STR.get(self._family)
|
||||
|
||||
@property
|
||||
def instanceType(self):
|
||||
return "ip" if self.isValid else "dns"
|
||||
|
||||
@property
|
||||
def plen(self):
|
||||
return self._plen
|
||||
|
||||
@property
|
||||
def raw(self):
|
||||
"""The raw address
|
||||
|
||||
Should only be set to a non-empty string if prior address
|
||||
conversion wasn't possible
|
||||
"""
|
||||
return self._raw
|
||||
|
||||
@property
|
||||
def isValid(self):
|
||||
"""Either the object corresponds to a valid IP address
|
||||
"""
|
||||
return self._family != socket.AF_UNSPEC
|
||||
|
||||
@property
|
||||
def isSingle(self):
|
||||
"""Returns whether the object is a single IP address (not DNS and subnet)
|
||||
"""
|
||||
return self._plen == {socket.AF_INET: 32, socket.AF_INET6: 128}.get(self._family, -1000)
|
||||
|
||||
def __eq__(self, other):
|
||||
if self._family == IPAddr.CIDR_RAW and not isinstance(other, IPAddr):
|
||||
return self._raw == other
|
||||
if not isinstance(other, IPAddr):
|
||||
if other is None: return False
|
||||
other = IPAddr(other)
|
||||
if self._family != other._family: return False
|
||||
if self._family == socket.AF_UNSPEC:
|
||||
return self._raw == other._raw
|
||||
return (
|
||||
(self._addr == other._addr) and
|
||||
(self._plen == other._plen)
|
||||
)
|
||||
|
||||
def __ne__(self, other):
|
||||
return not (self == other)
|
||||
|
||||
def __lt__(self, other):
|
||||
if self._family == IPAddr.CIDR_RAW and not isinstance(other, IPAddr):
|
||||
return self._raw < other
|
||||
if not isinstance(other, IPAddr):
|
||||
if other is None: return False
|
||||
other = IPAddr(other)
|
||||
return self._family < other._family or self._addr < other._addr
|
||||
|
||||
def __add__(self, other):
|
||||
if not isinstance(other, IPAddr):
|
||||
other = IPAddr(other)
|
||||
return "%s%s" % (self, other)
|
||||
|
||||
def __radd__(self, other):
|
||||
if not isinstance(other, IPAddr):
|
||||
other = IPAddr(other)
|
||||
return "%s%s" % (other, self)
|
||||
|
||||
def __hash__(self):
|
||||
# should be the same as by string (because of possible compare with string):
|
||||
return hash(self.ntoa)
|
||||
#return hash(self._addr)^hash((self._plen<<16)|self._family)
|
||||
|
||||
@property
|
||||
def hexdump(self):
|
||||
"""Hex representation of the IP address (for debug purposes)
|
||||
"""
|
||||
if self._family == socket.AF_INET:
|
||||
return "%08x" % self._addr
|
||||
elif self._family == socket.AF_INET6:
|
||||
return "%032x" % self._addr
|
||||
else:
|
||||
return ""
|
||||
|
||||
# TODO: could be lazily evaluated
|
||||
@property
|
||||
def ntoa(self):
|
||||
""" represent IP object as text like the deprecated
|
||||
C pendant inet.ntoa but address family independent
|
||||
"""
|
||||
add = ''
|
||||
if self.isIPv4:
|
||||
# convert network to host byte order
|
||||
binary = struct.pack("!L", self._addr)
|
||||
if self._plen and self._plen < 32:
|
||||
add = "/%d" % self._plen
|
||||
elif self.isIPv6:
|
||||
# convert network to host byte order
|
||||
hi = self._addr >> 64
|
||||
lo = self._addr & 0xFFFFFFFFFFFFFFFF
|
||||
binary = struct.pack("!QQ", hi, lo)
|
||||
if self._plen and self._plen < 128:
|
||||
add = "/%d" % self._plen
|
||||
else:
|
||||
return self._raw
|
||||
|
||||
return socket.inet_ntop(self._family, binary) + add
|
||||
|
||||
def getPTR(self, suffix=None):
|
||||
""" return the DNS PTR string of the provided IP address object
|
||||
|
||||
If "suffix" is provided it will be appended as the second and top
|
||||
level reverse domain.
|
||||
If omitted it is implicitly set to the second and top level reverse
|
||||
domain of the according IP address family
|
||||
"""
|
||||
if self.isIPv4:
|
||||
exploded_ip = self.ntoa.split(".")
|
||||
if suffix is None:
|
||||
suffix = "in-addr.arpa."
|
||||
elif self.isIPv6:
|
||||
exploded_ip = self.hexdump
|
||||
if suffix is None:
|
||||
suffix = "ip6.arpa."
|
||||
else:
|
||||
return ""
|
||||
|
||||
return "%s.%s" % (".".join(reversed(exploded_ip)), suffix)
|
||||
|
||||
def getHost(self):
|
||||
"""Return the host name (DNS) of the provided IP address object
|
||||
"""
|
||||
return DNSUtils.ipToName(self.ntoa)
|
||||
|
||||
@property
|
||||
def isIPv4(self):
|
||||
"""Either the IP object is of address family AF_INET
|
||||
"""
|
||||
return self.family == socket.AF_INET
|
||||
|
||||
@property
|
||||
def isIPv6(self):
|
||||
"""Either the IP object is of address family AF_INET6
|
||||
"""
|
||||
return self.family == socket.AF_INET6
|
||||
|
||||
def isInNet(self, net):
|
||||
"""Return either the IP object is in the provided network
|
||||
"""
|
||||
# if addr-set:
|
||||
if isinstance(net, IPAddrSet):
|
||||
return self in net
|
||||
# if it isn't a valid IP address, try DNS resolution
|
||||
if not net.isValid and net.raw != "":
|
||||
# Check if IP in DNS
|
||||
return self in DNSUtils.dnsToIp(net.raw)
|
||||
|
||||
if self.family != net.family:
|
||||
return False
|
||||
if self.isIPv4:
|
||||
mask = ~(0xFFFFFFFF >> net.plen)
|
||||
elif self.isIPv6:
|
||||
mask = ~(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF >> net.plen)
|
||||
else:
|
||||
return False
|
||||
|
||||
return (self.addr & mask) == net.addr
|
||||
|
||||
def contains(self, ip):
|
||||
"""Return whether the object (as network) contains given IP
|
||||
"""
|
||||
return isinstance(ip, IPAddr) and (ip == self or ip.isInNet(self))
|
||||
|
||||
def __contains__(self, ip):
|
||||
return self.contains(ip)
|
||||
|
||||
# Pre-calculated map: addr to maskplen
|
||||
def __getMaskMap():
|
||||
m6 = (1 << 128)-1
|
||||
m4 = (1 << 32)-1
|
||||
mmap = {m6: 128, m4: 32, 0: 0}
|
||||
m = 0
|
||||
for i in range(0, 128):
|
||||
m |= 1 << i
|
||||
if i < 32:
|
||||
mmap[m ^ m4] = 32-1-i
|
||||
mmap[m ^ m6] = 128-1-i
|
||||
return mmap
|
||||
|
||||
MAP_ADDR2MASKPLEN = __getMaskMap()
|
||||
|
||||
@property
|
||||
def maskplen(self):
|
||||
mplen = 0
|
||||
if self._maskplen is not None:
|
||||
return self._maskplen
|
||||
mplen = IPAddr.MAP_ADDR2MASKPLEN.get(self._addr)
|
||||
if mplen is None:
|
||||
raise ValueError("invalid mask %r, no plen representation" % (str(self),))
|
||||
self._maskplen = mplen
|
||||
return mplen
|
||||
|
||||
@staticmethod
|
||||
def masktoplen(mask):
|
||||
"""Convert mask string to prefix length
|
||||
|
||||
To be used only for IPv4 masks
|
||||
"""
|
||||
return IPAddr(mask).maskplen
|
||||
|
||||
@staticmethod
|
||||
def searchIP(text):
|
||||
"""Search if text is an IP address, and return it if so, else None
|
||||
"""
|
||||
match = IPAddr.IP_4_6_CRE.match(text)
|
||||
if not match:
|
||||
return None
|
||||
ipstr = match.group('IPv4')
|
||||
if ipstr is not None and ipstr != '':
|
||||
return ipstr
|
||||
return match.group('IPv6')
|
||||
|
||||
|
||||
# An IPv4 compatible IPv6 to be reused
|
||||
IPAddr.IP6_4COMPAT = IPAddr("::ffff:0:0", 96)
|
||||
|
||||
|
||||
class IPAddrSet(set):
|
||||
|
||||
hasSubNet = 0
|
||||
|
||||
def __init__(self, ips=[]):
|
||||
ips, subnet = IPAddrSet._list2set(ips)
|
||||
set.__init__(self, ips)
|
||||
self.hasSubNet = subnet
|
||||
|
||||
@staticmethod
|
||||
def _list2set(ips):
|
||||
ips2 = set()
|
||||
subnet = 0
|
||||
for ip in ips:
|
||||
if not isinstance(ip, IPAddr): ip = IPAddr(ip)
|
||||
ips2.add(ip)
|
||||
subnet += not ip.isSingle
|
||||
return ips2, subnet
|
||||
|
||||
@property
|
||||
def instanceType(self):
|
||||
return "ip-set"
|
||||
|
||||
def set(self, ips):
|
||||
ips, subnet = IPAddrSet._list2set(ips)
|
||||
self.clear()
|
||||
self.update(ips)
|
||||
self.hasSubNet = subnet
|
||||
|
||||
def add(self, ip):
|
||||
if not isinstance(ip, IPAddr): ip = IPAddr(ip)
|
||||
self.hasSubNet |= not ip.isSingle
|
||||
set.add(self, ip)
|
||||
|
||||
def __contains__(self, ip):
|
||||
if not isinstance(ip, IPAddr): ip = IPAddr(ip)
|
||||
# IP can be found directly or IP is in each subnet:
|
||||
return set.__contains__(self, ip) or (self.hasSubNet and any(n.contains(ip) for n in self))
|
||||
|
||||
|
||||
class FileIPAddrSet(IPAddrSet):
|
||||
|
||||
# RE matching file://... (absolute as well as relative file name)
|
||||
RE_FILE_IGN_IP = re.compile(r'^file:(?:/{0,2}(?=/(?!/|.{1,2}/))|/{0,2})(.*)$')
|
||||
|
||||
fileName = ''
|
||||
_reprName = None
|
||||
maxUpdateLatency = 1 # latency in seconds to update by changes
|
||||
_nextCheck = 0
|
||||
_fileStats = ()
|
||||
|
||||
def __init__(self, fileName=''):
|
||||
self.fileName = fileName
|
||||
# self.load() - lazy load on demand by first check (in, __contains__ etc)
|
||||
|
||||
@property
|
||||
def instanceType(self):
|
||||
return repr(self)
|
||||
|
||||
def __eq__(self, other):
|
||||
if id(self) == id(other): return 1
|
||||
# to allow remove file-set from list (delIgnoreIP) by its name:
|
||||
if isinstance(other, FileIPAddrSet):
|
||||
return self.fileName == other.fileName
|
||||
m = FileIPAddrSet.RE_FILE_IGN_IP.match(other)
|
||||
if m:
|
||||
return self.fileName == m.group(1)
|
||||
|
||||
def _isModified(self):
|
||||
"""Check whether the file is modified (file stats changed)
|
||||
|
||||
Side effect: if modified, _fileStats will be updated to last known stats of file
|
||||
"""
|
||||
tm = MyTime.time()
|
||||
# avoid to check it always (not often than maxUpdateLatency):
|
||||
if tm <= self._nextCheck:
|
||||
return None; # no check needed
|
||||
self._nextCheck = tm + self.maxUpdateLatency
|
||||
stats = os.stat(self.fileName)
|
||||
stats = stats.st_mtime, stats.st_ino, stats.st_size
|
||||
if self._fileStats != stats:
|
||||
self._fileStats = stats
|
||||
return True; # modified, needs to be reloaded
|
||||
return False; # unmodified
|
||||
|
||||
def load(self, forceReload=False, noError=True):
|
||||
"""Load set from file (on demand if needed or by forceReload)
|
||||
"""
|
||||
try:
|
||||
# load only if needed and modified (or first time load on demand)
|
||||
if self._isModified() or forceReload:
|
||||
with open(self.fileName, 'r') as f:
|
||||
ips = f.read()
|
||||
ips = splitwords(ips, ignoreComments=True)
|
||||
self.set(ips)
|
||||
except Exception as e: # pragma: no cover
|
||||
self._nextCheck += 60; # increase interval to check (to 1 minute, to avoid log flood on errors)
|
||||
if not noError: raise e
|
||||
logSys.warning("Retrieving IPs set from %r failed: %s", self.fileName, e)
|
||||
|
||||
def __repr__(self):
|
||||
if self._reprName is None:
|
||||
self._reprName = 'file:' + ('/' if self.fileName.startswith('/') else '') + self.fileName
|
||||
return self._reprName
|
||||
|
||||
def __contains__(self, ip):
|
||||
# load if needed:
|
||||
if self.fileName:
|
||||
self.load()
|
||||
# inherited contains:
|
||||
return IPAddrSet.__contains__(self, ip)
|
||||
|
||||
|
||||
def _NetworkInterfacesAddrs(withMask=False):
|
||||
|
||||
# Closure implementing lazy load modules and libc and define _NetworkInterfacesAddrs on demand:
|
||||
# Currently tested on Linux only (TODO: implement for MacOS, Solaris, etc)
|
||||
try:
|
||||
from ctypes import (
|
||||
Structure, Union, POINTER,
|
||||
pointer, get_errno, cast,
|
||||
c_ushort, c_byte, c_void_p, c_char_p, c_uint, c_int, c_uint16, c_uint32
|
||||
)
|
||||
import ctypes.util
|
||||
import ctypes
|
||||
|
||||
class struct_sockaddr(Structure):
|
||||
_fields_ = [
|
||||
('sa_family', c_ushort),
|
||||
('sa_data', c_byte * 14),]
|
||||
|
||||
class struct_sockaddr_in(Structure):
|
||||
_fields_ = [
|
||||
('sin_family', c_ushort),
|
||||
('sin_port', c_uint16),
|
||||
('sin_addr', c_byte * 4)]
|
||||
|
||||
class struct_sockaddr_in6(Structure):
|
||||
_fields_ = [
|
||||
('sin6_family', c_ushort),
|
||||
('sin6_port', c_uint16),
|
||||
('sin6_flowinfo', c_uint32),
|
||||
('sin6_addr', c_byte * 16),
|
||||
('sin6_scope_id', c_uint32)]
|
||||
|
||||
class union_ifa_ifu(Union):
|
||||
_fields_ = [
|
||||
('ifu_broadaddr', POINTER(struct_sockaddr)),
|
||||
('ifu_dstaddr', POINTER(struct_sockaddr)),]
|
||||
|
||||
class struct_ifaddrs(Structure):
|
||||
pass
|
||||
struct_ifaddrs._fields_ = [
|
||||
('ifa_next', POINTER(struct_ifaddrs)),
|
||||
('ifa_name', c_char_p),
|
||||
('ifa_flags', c_uint),
|
||||
('ifa_addr', POINTER(struct_sockaddr)),
|
||||
('ifa_netmask', POINTER(struct_sockaddr)),
|
||||
('ifa_ifu', union_ifa_ifu),
|
||||
('ifa_data', c_void_p),]
|
||||
|
||||
libc = ctypes.CDLL(ctypes.util.find_library('c') or "")
|
||||
if not libc.getifaddrs: # pragma: no cover
|
||||
raise NotImplementedError('libc.getifaddrs is not available')
|
||||
|
||||
def ifap_iter(ifap):
|
||||
ifa = ifap.contents
|
||||
while True:
|
||||
yield ifa
|
||||
if not ifa.ifa_next:
|
||||
break
|
||||
ifa = ifa.ifa_next.contents
|
||||
|
||||
def getfamaddr(ifa, withMask=False):
|
||||
sa = ifa.ifa_addr.contents
|
||||
fam = sa.sa_family
|
||||
if fam == socket.AF_INET:
|
||||
sa = cast(pointer(sa), POINTER(struct_sockaddr_in)).contents
|
||||
addr = socket.inet_ntop(fam, sa.sin_addr)
|
||||
if withMask:
|
||||
nm = ifa.ifa_netmask.contents
|
||||
if nm is not None and nm.sa_family == socket.AF_INET:
|
||||
nm = cast(pointer(nm), POINTER(struct_sockaddr_in)).contents
|
||||
addr += '/'+socket.inet_ntop(fam, nm.sin_addr)
|
||||
return IPAddr(addr)
|
||||
elif fam == socket.AF_INET6:
|
||||
sa = cast(pointer(sa), POINTER(struct_sockaddr_in6)).contents
|
||||
addr = socket.inet_ntop(fam, sa.sin6_addr)
|
||||
if withMask:
|
||||
nm = ifa.ifa_netmask.contents
|
||||
if nm is not None and nm.sa_family == socket.AF_INET6:
|
||||
nm = cast(pointer(nm), POINTER(struct_sockaddr_in6)).contents
|
||||
addr += '/'+socket.inet_ntop(fam, nm.sin6_addr)
|
||||
return IPAddr(addr)
|
||||
return None
|
||||
|
||||
def _NetworkInterfacesAddrs(withMask=False):
|
||||
ifap = POINTER(struct_ifaddrs)()
|
||||
result = libc.getifaddrs(pointer(ifap))
|
||||
if result != 0:
|
||||
raise OSError(get_errno())
|
||||
del result
|
||||
try:
|
||||
for ifa in ifap_iter(ifap):
|
||||
name = ifa.ifa_name.decode("UTF-8")
|
||||
addr = getfamaddr(ifa, withMask)
|
||||
if addr:
|
||||
yield name, addr
|
||||
finally:
|
||||
libc.freeifaddrs(ifap)
|
||||
|
||||
except Exception as e: # pragma: no cover
|
||||
_init_error = NotImplementedError(e)
|
||||
def _NetworkInterfacesAddrs():
|
||||
raise _init_error
|
||||
|
||||
DNSUtils._NetworkInterfacesAddrs = staticmethod(_NetworkInterfacesAddrs);
|
||||
return _NetworkInterfacesAddrs(withMask)
|
||||
|
||||
DNSUtils._NetworkInterfacesAddrs = staticmethod(_NetworkInterfacesAddrs);
|
||||
353
fail2ban-master/fail2ban/server/jail.py
Normal file
353
fail2ban-master/fail2ban/server/jail.py
Normal file
@@ -0,0 +1,353 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Author: Cyril Jaquier
|
||||
|
||||
__author__ = "Cyril Jaquier, Lee Clemens, Yaroslav Halchenko"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier, 2011-2012 Lee Clemens, 2012 Yaroslav Halchenko"
|
||||
__license__ = "GPL"
|
||||
|
||||
import logging
|
||||
import math
|
||||
import random
|
||||
import queue
|
||||
|
||||
from .actions import Actions
|
||||
from ..helpers import getLogger, _as_bool, extractOptions, MyTime
|
||||
from .mytime import MyTime
|
||||
|
||||
# Gets the instance of the logger.
|
||||
logSys = getLogger(__name__)
|
||||
|
||||
|
||||
class Jail(object):
|
||||
"""Fail2Ban jail, which manages a filter and associated actions.
|
||||
|
||||
The class handles the initialisation of a filter, and actions. It's
|
||||
role is then to act as an interface between the filter and actions,
|
||||
passing bans detected by the filter, for the actions to then act upon.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
name : str
|
||||
Name assigned to the jail.
|
||||
backend : str
|
||||
Backend to be used for filter. "auto" will attempt to pick
|
||||
the most preferred backend method. Default: "auto"
|
||||
db : Fail2BanDb
|
||||
Fail2Ban persistent database instance. Default: `None`
|
||||
|
||||
Attributes
|
||||
----------
|
||||
name
|
||||
database
|
||||
filter
|
||||
actions
|
||||
idle
|
||||
status
|
||||
"""
|
||||
|
||||
#Known backends. Each backend should have corresponding __initBackend method
|
||||
# yoh: stored in a list instead of a tuple since only
|
||||
# list had .index until 2.6
|
||||
_BACKENDS = ['pyinotify', 'polling', 'systemd']
|
||||
|
||||
def __init__(self, name, backend = "auto", db=None):
|
||||
self.__db = db
|
||||
# 26 based on iptable chain name limit of 30 less len('f2b-')
|
||||
if len(name) >= 26:
|
||||
logSys.warning("Jail name %r might be too long and some commands "
|
||||
"might not function correctly. Please shorten"
|
||||
% name)
|
||||
self.__name = name
|
||||
self.__queue = queue.Queue()
|
||||
self.__filter = None
|
||||
# Extra parameters for increase ban time
|
||||
self._banExtra = {};
|
||||
logSys.info("Creating new jail '%s'" % self.name)
|
||||
self._realBackend = None
|
||||
if backend is not None:
|
||||
self._realBackend = self._setBackend(backend)
|
||||
self.backend = backend
|
||||
|
||||
def __repr__(self):
|
||||
return "%s(%r)" % (self.__class__.__name__, self.name)
|
||||
|
||||
def _setBackend(self, backend):
|
||||
backend, beArgs = extractOptions(backend)
|
||||
backend = backend.lower() # to assure consistent matching
|
||||
|
||||
backends = self._BACKENDS
|
||||
if backend != 'auto':
|
||||
# we have got strict specification of the backend to use
|
||||
if not (backend in self._BACKENDS):
|
||||
logSys.error("Unknown backend %s. Must be among %s or 'auto'"
|
||||
% (backend, backends))
|
||||
raise ValueError("Unknown backend %s. Must be among %s or 'auto'"
|
||||
% (backend, backends))
|
||||
# so explore starting from it till the 'end'
|
||||
backends = backends[backends.index(backend):]
|
||||
|
||||
for b in backends:
|
||||
initmethod = getattr(self, '_init%s' % b.capitalize())
|
||||
try:
|
||||
initmethod(**beArgs)
|
||||
if backend != 'auto' and b != backend:
|
||||
logSys.warning("Could only initiated %r backend whenever "
|
||||
"%r was requested" % (b, backend))
|
||||
else:
|
||||
logSys.info("Initiated %r backend" % b)
|
||||
self.__actions = Actions(self)
|
||||
return b # we are done
|
||||
except ImportError as e: # pragma: no cover
|
||||
# Log debug if auto, but error if specific
|
||||
logSys.log(
|
||||
logging.DEBUG if backend == "auto" else logging.ERROR,
|
||||
"Backend %r failed to initialize due to %s" % (b, e))
|
||||
# pragma: no cover
|
||||
# log error since runtime error message isn't printed, INVALID COMMAND
|
||||
logSys.error(
|
||||
"Failed to initialize any backend for Jail %r" % self.name)
|
||||
raise RuntimeError(
|
||||
"Failed to initialize any backend for Jail %r" % self.name)
|
||||
|
||||
def _initPolling(self, **kwargs):
|
||||
from .filterpoll import FilterPoll
|
||||
logSys.info("Jail '%s' uses poller %r" % (self.name, kwargs))
|
||||
self.__filter = FilterPoll(self, **kwargs)
|
||||
|
||||
def _initPyinotify(self, **kwargs):
|
||||
# Try to import pyinotify
|
||||
from .filterpyinotify import FilterPyinotify
|
||||
logSys.info("Jail '%s' uses pyinotify %r" % (self.name, kwargs))
|
||||
self.__filter = FilterPyinotify(self, **kwargs)
|
||||
|
||||
def _initSystemd(self, **kwargs): # pragma: systemd no cover
|
||||
# Try to import systemd
|
||||
from .filtersystemd import FilterSystemd
|
||||
logSys.info("Jail '%s' uses systemd %r" % (self.name, kwargs))
|
||||
self.__filter = FilterSystemd(self, **kwargs)
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
"""Name of jail.
|
||||
"""
|
||||
return self.__name
|
||||
|
||||
@property
|
||||
def database(self):
|
||||
"""The database used to store persistent data for the jail.
|
||||
"""
|
||||
return self.__db
|
||||
|
||||
@database.setter
|
||||
def database(self, value):
|
||||
self.__db = value;
|
||||
|
||||
@property
|
||||
def filter(self):
|
||||
"""The filter which the jail is using to monitor log files.
|
||||
"""
|
||||
return self.__filter
|
||||
|
||||
@property
|
||||
def actions(self):
|
||||
"""Actions object used to manage actions for jail.
|
||||
"""
|
||||
return self.__actions
|
||||
|
||||
@property
|
||||
def idle(self):
|
||||
"""A boolean indicating whether jail is idle.
|
||||
"""
|
||||
return self.filter.idle or self.actions.idle
|
||||
|
||||
@idle.setter
|
||||
def idle(self, value):
|
||||
self.filter.idle = value
|
||||
self.actions.idle = value
|
||||
|
||||
def status(self, flavor="basic"):
|
||||
"""The status of the jail.
|
||||
"""
|
||||
fstat = self.filter.status(flavor=flavor)
|
||||
astat = self.actions.status(flavor=flavor)
|
||||
if flavor == "stats":
|
||||
backend = type(self.filter).__name__.replace('Filter', '').lower()
|
||||
return [self._realBackend or self.backend, fstat, astat]
|
||||
return [
|
||||
("Filter", fstat),
|
||||
("Actions", astat),
|
||||
]
|
||||
|
||||
@property
|
||||
def hasFailTickets(self):
|
||||
"""Retrieve whether queue has tickets to ban.
|
||||
"""
|
||||
return not self.__queue.empty()
|
||||
|
||||
def putFailTicket(self, ticket):
|
||||
"""Add a fail ticket to the jail.
|
||||
|
||||
Used by filter to add a failure for banning.
|
||||
"""
|
||||
self.__queue.put(ticket)
|
||||
# add ban to database moved to observer (should previously check not already banned
|
||||
# and increase ticket time if "bantime.increment" set)
|
||||
|
||||
def getFailTicket(self):
|
||||
"""Get a fail ticket from the jail.
|
||||
|
||||
Used by actions to get a failure for banning.
|
||||
"""
|
||||
try:
|
||||
ticket = self.__queue.get(False)
|
||||
return ticket
|
||||
except queue.Empty:
|
||||
return False
|
||||
|
||||
def setBanTimeExtra(self, opt, value):
|
||||
# merge previous extra with new option:
|
||||
be = self._banExtra;
|
||||
if value == '':
|
||||
value = None
|
||||
if value is not None:
|
||||
be[opt] = value;
|
||||
elif opt in be:
|
||||
del be[opt]
|
||||
logSys.info('Set banTime.%s = %s', opt, value)
|
||||
if opt == 'increment':
|
||||
be[opt] = _as_bool(value)
|
||||
if be.get(opt) and self.database is None:
|
||||
logSys.warning("ban time increment is not available as long jail database is not set")
|
||||
if opt in ['maxtime', 'rndtime']:
|
||||
if not value is None:
|
||||
be[opt] = MyTime.str2seconds(value)
|
||||
# prepare formula lambda:
|
||||
if opt in ['formula', 'factor', 'maxtime', 'rndtime', 'multipliers'] or be.get('evformula', None) is None:
|
||||
# split multifiers to an array begins with 0 (or empty if not set):
|
||||
if opt == 'multipliers':
|
||||
be['evmultipliers'] = [int(i) for i in (value.split(' ') if value is not None and value != '' else [])]
|
||||
# if we have multifiers - use it in lambda, otherwise compile and use formula within lambda
|
||||
multipliers = be.get('evmultipliers', [])
|
||||
banFactor = eval(be.get('factor', "1"))
|
||||
if len(multipliers):
|
||||
evformula = lambda ban, banFactor=banFactor: (
|
||||
ban.Time * banFactor * multipliers[ban.Count if ban.Count < len(multipliers) else -1]
|
||||
)
|
||||
else:
|
||||
formula = be.get('formula', 'ban.Time * (1<<(ban.Count if ban.Count<20 else 20)) * banFactor')
|
||||
formula = compile(formula, '~inline-conf-expr~', 'eval')
|
||||
evformula = lambda ban, banFactor=banFactor, formula=formula: max(ban.Time, eval(formula))
|
||||
# extend lambda with max time :
|
||||
if not be.get('maxtime', None) is None:
|
||||
maxtime = be['maxtime']
|
||||
evformula = lambda ban, evformula=evformula: min(evformula(ban), maxtime)
|
||||
# mix lambda with random time (to prevent bot-nets to calculate exact time IP can be unbanned):
|
||||
if not be.get('rndtime', None) is None:
|
||||
rndtime = be['rndtime']
|
||||
evformula = lambda ban, evformula=evformula: (evformula(ban) + random.random() * rndtime)
|
||||
# set to extra dict:
|
||||
be['evformula'] = evformula
|
||||
#logSys.info('banTimeExtra : %s' % json.dumps(be))
|
||||
|
||||
def getBanTimeExtra(self, opt=None):
|
||||
if opt is not None:
|
||||
return self._banExtra.get(opt, None)
|
||||
return self._banExtra
|
||||
|
||||
def getMaxBanTime(self):
|
||||
"""Returns max possible ban-time of jail.
|
||||
"""
|
||||
return self._banExtra.get("maxtime", -1) \
|
||||
if self._banExtra.get('increment') else self.actions.getBanTime()
|
||||
|
||||
def restoreCurrentBans(self, correctBanTime=True):
|
||||
"""Restore any previous valid bans from the database.
|
||||
"""
|
||||
try:
|
||||
if self.database is not None:
|
||||
if self._banExtra.get('increment'):
|
||||
forbantime = None;
|
||||
if correctBanTime:
|
||||
correctBanTime = self.getMaxBanTime()
|
||||
else:
|
||||
# use ban time as search time if we have not enabled a increasing:
|
||||
forbantime = self.actions.getBanTime()
|
||||
for ticket in self.database.getCurrentBans(jail=self, forbantime=forbantime,
|
||||
correctBanTime=correctBanTime, maxmatches=self.filter.failManager.maxMatches
|
||||
):
|
||||
try:
|
||||
# mark ticked was restored from database - does not put it again into db:
|
||||
ticket.restored = True
|
||||
#logSys.debug('restored ticket: %s', ticket)
|
||||
if self.filter._inIgnoreIPList(ticket.getID(), ticket): continue
|
||||
# correct start time / ban time (by the same end of ban):
|
||||
btm = ticket.getBanTime(forbantime)
|
||||
diftm = MyTime.time() - ticket.getTime()
|
||||
if btm != -1 and diftm > 0:
|
||||
btm -= diftm
|
||||
# ignore obsolete tickets:
|
||||
if btm != -1 and btm <= 0:
|
||||
continue
|
||||
self.putFailTicket(ticket)
|
||||
except Exception as e: # pragma: no cover
|
||||
logSys.error('Restore ticket failed: %s', e,
|
||||
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
except Exception as e: # pragma: no cover
|
||||
logSys.error('Restore bans failed: %s', e,
|
||||
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
|
||||
def start(self):
|
||||
"""Start the jail, by starting filter and actions threads.
|
||||
|
||||
Once stated, also queries the persistent database to reinstate
|
||||
any valid bans.
|
||||
"""
|
||||
logSys.debug("Starting jail %r", self.name)
|
||||
self.filter.start()
|
||||
self.actions.start()
|
||||
self.restoreCurrentBans()
|
||||
logSys.info("Jail %r started", self.name)
|
||||
|
||||
def stop(self, stop=True, join=True):
|
||||
"""Stop the jail, by stopping filter and actions threads.
|
||||
"""
|
||||
if stop:
|
||||
logSys.debug("Stopping jail %r", self.name)
|
||||
for obj in (self.filter, self.actions):
|
||||
try:
|
||||
## signal to stop filter / actions:
|
||||
if stop:
|
||||
if obj.isAlive():
|
||||
obj.stop()
|
||||
obj.done(); # and clean-up everything
|
||||
## wait for end of threads:
|
||||
if join:
|
||||
obj.join()
|
||||
except Exception as e:
|
||||
logSys.error("Stop %r of jail %r failed: %s", obj, self.name, e,
|
||||
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
if join:
|
||||
logSys.info("Jail %r stopped", self.name)
|
||||
|
||||
def isAlive(self):
|
||||
"""Check jail "isAlive" by checking filter and actions threads.
|
||||
"""
|
||||
return self.filter.isAlive() or self.actions.isAlive()
|
||||
107
fail2ban-master/fail2ban/server/jails.py
Normal file
107
fail2ban-master/fail2ban/server/jails.py
Normal file
@@ -0,0 +1,107 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
__author__ = "Cyril Jaquier, Yaroslav Halchenko"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier, 2013- Yaroslav Halchenko"
|
||||
__license__ = "GPL"
|
||||
|
||||
from threading import Lock
|
||||
try:
|
||||
from collections.abc import Mapping
|
||||
except ImportError:
|
||||
from collections import Mapping
|
||||
|
||||
from ..exceptions import DuplicateJailException, UnknownJailException
|
||||
from .jail import Jail
|
||||
|
||||
|
||||
class Jails(Mapping):
|
||||
"""Handles the jails.
|
||||
|
||||
This class handles the jails. Creation, deletion or access to a jail
|
||||
must be done through this class. This class is thread-safe which is
|
||||
not the case of the jail itself, including filter and actions. This
|
||||
class is based on Mapping type, and the `add` method must be used to
|
||||
add additional jails.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.__lock = Lock()
|
||||
self._jails = dict()
|
||||
|
||||
def add(self, name, backend, db=None):
|
||||
"""Adds a jail.
|
||||
|
||||
Adds a new jail if not already present which should use the
|
||||
given backend.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
name : str
|
||||
The name of the jail.
|
||||
backend : str
|
||||
The backend to use.
|
||||
db : Fail2BanDb
|
||||
Fail2Ban's persistent database instance.
|
||||
|
||||
Raises
|
||||
------
|
||||
DuplicateJailException
|
||||
If jail name is already present.
|
||||
"""
|
||||
with self.__lock:
|
||||
if name in self._jails:
|
||||
raise DuplicateJailException(name)
|
||||
else:
|
||||
self._jails[name] = Jail(name, backend, db)
|
||||
|
||||
def exists(self, name):
|
||||
return name in self._jails
|
||||
|
||||
def __getitem__(self, name):
|
||||
try:
|
||||
self.__lock.acquire()
|
||||
return self._jails[name]
|
||||
except KeyError:
|
||||
raise UnknownJailException(name)
|
||||
finally:
|
||||
self.__lock.release()
|
||||
|
||||
def __delitem__(self, name):
|
||||
try:
|
||||
self.__lock.acquire()
|
||||
del self._jails[name]
|
||||
except KeyError:
|
||||
raise UnknownJailException(name)
|
||||
finally:
|
||||
self.__lock.release()
|
||||
|
||||
def __len__(self):
|
||||
try:
|
||||
self.__lock.acquire()
|
||||
return len(self._jails)
|
||||
finally:
|
||||
self.__lock.release()
|
||||
|
||||
def __iter__(self):
|
||||
try:
|
||||
self.__lock.acquire()
|
||||
return iter(self._jails)
|
||||
finally:
|
||||
self.__lock.release()
|
||||
143
fail2ban-master/fail2ban/server/jailthread.py
Normal file
143
fail2ban-master/fail2ban/server/jailthread.py
Normal file
@@ -0,0 +1,143 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Author: Cyril Jaquier
|
||||
#
|
||||
|
||||
__author__ = "Cyril Jaquier"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
|
||||
__license__ = "GPL"
|
||||
|
||||
import sys
|
||||
from threading import Thread
|
||||
from abc import abstractmethod
|
||||
|
||||
from .utils import Utils
|
||||
from ..helpers import excepthook, prctl_set_th_name
|
||||
|
||||
|
||||
class JailThread(Thread):
|
||||
"""Abstract class for threading elements in Fail2Ban.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
daemon
|
||||
ident
|
||||
name
|
||||
status
|
||||
active : bool
|
||||
Control the state of the thread.
|
||||
idle : bool
|
||||
Control the idle state of the thread.
|
||||
sleeptime : int
|
||||
The time the thread sleeps for in the loop.
|
||||
"""
|
||||
|
||||
def __init__(self, name=None):
|
||||
super(JailThread, self).__init__(name=name)
|
||||
## Should going with main thread also:
|
||||
self.daemon = True
|
||||
## Control the state of the thread (None - was not started, True - active, False - stopped).
|
||||
self.active = None
|
||||
## Control the idle state of the thread.
|
||||
self.idle = False
|
||||
## The time the thread sleeps in the loop.
|
||||
self.sleeptime = Utils.DEFAULT_SLEEP_TIME
|
||||
|
||||
# excepthook workaround for threads, derived from:
|
||||
# http://bugs.python.org/issue1230540#msg91244
|
||||
run = self.run
|
||||
|
||||
def run_with_except_hook(*args, **kwargs):
|
||||
try:
|
||||
run(*args, **kwargs)
|
||||
# call on stop callback to do some finalizations:
|
||||
self.onStop()
|
||||
except Exception as e:
|
||||
# avoid very sporadic error "'NoneType' object has no attribute 'exc_info'" (https://bugs.python.org/issue7336)
|
||||
# only extremely fast systems are affected ATM (2.7 / 3.x), if thread ends nothing is available here.
|
||||
if sys is not None:
|
||||
excepthook(*sys.exc_info())
|
||||
else:
|
||||
print(e)
|
||||
self.run = run_with_except_hook
|
||||
|
||||
def _bootstrap(self):
|
||||
prctl_set_th_name(self.name)
|
||||
return super(JailThread, self)._bootstrap();
|
||||
|
||||
@abstractmethod
|
||||
def status(self, flavor="basic"): # pragma: no cover - abstract
|
||||
"""Abstract - Should provide status information.
|
||||
"""
|
||||
pass
|
||||
|
||||
def start(self):
|
||||
"""Sets active flag and starts thread.
|
||||
"""
|
||||
self.active = True
|
||||
super(JailThread, self).start()
|
||||
|
||||
@abstractmethod
|
||||
def onStop(self): # pragma: no cover - absract
|
||||
"""Abstract - Called when thread ends (after run).
|
||||
"""
|
||||
pass
|
||||
|
||||
def stop(self):
|
||||
"""Sets `active` property to False, to flag run method to return.
|
||||
"""
|
||||
if self.active: self.active = False
|
||||
# normally onStop will be called automatically in thread after its run ends,
|
||||
# but for backwards compatibilities we'll invoke it in caller of stop method.
|
||||
self.onStop()
|
||||
self.onStop = lambda:()
|
||||
self.done()
|
||||
|
||||
def done(self):
|
||||
self.done = lambda:()
|
||||
# if still runniung - wait a bit before initiate clean-up:
|
||||
if self.is_alive():
|
||||
Utils.wait_for(lambda: not self.is_alive(), 5)
|
||||
# now clean-up everything:
|
||||
self.afterStop()
|
||||
|
||||
|
||||
@abstractmethod
|
||||
def run(self): # pragma: no cover - absract
|
||||
"""Abstract - Called when thread starts, thread stops when returns.
|
||||
"""
|
||||
pass
|
||||
|
||||
def afterStop(self):
|
||||
"""Cleanup resources."""
|
||||
pass
|
||||
|
||||
def join(self):
|
||||
""" Safer join, that could be called also for not started (or ended) threads (used for cleanup).
|
||||
"""
|
||||
## if cleanup needed - create derivative and call it before join...
|
||||
self.done()
|
||||
## if was really started - should call join:
|
||||
if self.active is not None:
|
||||
super(JailThread, self).join()
|
||||
|
||||
## python 3.9, restore isAlive method:
|
||||
if not hasattr(JailThread, 'isAlive'):
|
||||
JailThread.isAlive = JailThread.is_alive
|
||||
235
fail2ban-master/fail2ban/server/mytime.py
Normal file
235
fail2ban-master/fail2ban/server/mytime.py
Normal file
@@ -0,0 +1,235 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
__author__ = "Cyril Jaquier"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
|
||||
__license__ = "GPL"
|
||||
|
||||
import datetime
|
||||
import re
|
||||
import time
|
||||
|
||||
|
||||
##
|
||||
# MyTime class.
|
||||
#
|
||||
|
||||
class MyTime:
|
||||
"""A wrapper around time module primarily for testing purposes
|
||||
|
||||
This class is a wrapper around time.time() and time.gmtime(). When
|
||||
performing unit test, it is very useful to get a fixed value from
|
||||
these functions. Thus, time.time() and time.gmtime() should never
|
||||
be called directly. This wrapper should be called instead. The API
|
||||
are equivalent.
|
||||
"""
|
||||
|
||||
myTime = None
|
||||
alternateNowTime = None
|
||||
alternateNow = None
|
||||
|
||||
@staticmethod
|
||||
def setAlternateNow(t):
|
||||
"""Set current time.
|
||||
|
||||
Use None in order to always get the real current time.
|
||||
|
||||
@param t the time to set or None
|
||||
"""
|
||||
|
||||
MyTime.alternateNowTime = t
|
||||
MyTime.alternateNow = \
|
||||
datetime.datetime.fromtimestamp(t) if t is not None else None
|
||||
|
||||
@staticmethod
|
||||
def setTime(t):
|
||||
"""Set current time.
|
||||
|
||||
Use None in order to always get the real current time.
|
||||
|
||||
@param t the time to set or None
|
||||
"""
|
||||
|
||||
MyTime.myTime = t
|
||||
|
||||
@staticmethod
|
||||
def time():
|
||||
"""Decorate time.time() for the purpose of testing mocking
|
||||
|
||||
@return time.time() if setTime was called with None
|
||||
"""
|
||||
|
||||
if MyTime.myTime is None:
|
||||
return time.time()
|
||||
else:
|
||||
return MyTime.myTime
|
||||
|
||||
@staticmethod
|
||||
def gmtime():
|
||||
"""Decorate time.gmtime() for the purpose of testing mocking
|
||||
|
||||
@return time.gmtime() if setTime was called with None
|
||||
"""
|
||||
if MyTime.myTime is None:
|
||||
return time.gmtime()
|
||||
else:
|
||||
return time.gmtime(MyTime.myTime)
|
||||
|
||||
@staticmethod
|
||||
def now():
|
||||
"""Decorate datetime.now() for the purpose of testing mocking
|
||||
|
||||
@return datetime.now() if setTime was called with None
|
||||
"""
|
||||
if MyTime.myTime is None:
|
||||
return datetime.datetime.now()
|
||||
if MyTime.myTime == MyTime.alternateNowTime:
|
||||
return MyTime.alternateNow
|
||||
return datetime.datetime.fromtimestamp(MyTime.myTime)
|
||||
|
||||
@staticmethod
|
||||
def localtime(x=None):
|
||||
"""Decorate time.localtime() for the purpose of testing mocking
|
||||
|
||||
@return time.localtime() if setTime was called with None
|
||||
"""
|
||||
if MyTime.myTime is None or x is not None:
|
||||
return time.localtime(x)
|
||||
else:
|
||||
return time.localtime(MyTime.myTime)
|
||||
|
||||
@staticmethod
|
||||
def time2str(unixTime, format="%Y-%m-%d %H:%M:%S"):
|
||||
"""Convert time to a string representing as date and time using given format.
|
||||
Default format is ISO 8601, YYYY-MM-DD HH:MM:SS without microseconds.
|
||||
|
||||
@return ISO-capable string representation of given unixTime
|
||||
"""
|
||||
# consider end of 9999th year (in GMT+23 to avoid year overflow in other TZ)
|
||||
dt = datetime.datetime.fromtimestamp(
|
||||
unixTime).replace(microsecond=0
|
||||
) if unixTime < 253402214400 else datetime.datetime(9999, 12, 31, 23, 59, 59)
|
||||
return dt.strftime(format)
|
||||
|
||||
## precreate/precompile primitives used in str2seconds:
|
||||
|
||||
## preparing expression:
|
||||
_str2sec_prep = re.compile(r"(?i)(?<=[a-z])(\d)")
|
||||
## finally expression:
|
||||
_str2sec_fini = re.compile(r"(\d)\s+(\d)")
|
||||
## wrapper for each sub part:
|
||||
_str2sec_subpart = r"(?i)(?<=[\d\s])(%s)\b"
|
||||
## parts to be replaced - pair of (regexp x replacement):
|
||||
_str2sec_parts = (
|
||||
(re.compile(_str2sec_subpart % r"days?|da|dd?"), "*"+str(24*60*60)),
|
||||
(re.compile(_str2sec_subpart % r"weeks?|wee?|ww?"), "*"+str(7*24*60*60)),
|
||||
(re.compile(_str2sec_subpart % r"months?|mon?"), "*"+str((365*3+366)*24*60*60/4/12)),
|
||||
(re.compile(_str2sec_subpart % r"years?|yea?|yy?"), "*"+str((365*3+366)*24*60*60/4)),
|
||||
(re.compile(_str2sec_subpart % r"seconds?|sec?|ss?"), "*"+str(1)),
|
||||
(re.compile(_str2sec_subpart % r"minutes?|min?|mm?"), "*"+str(60)),
|
||||
(re.compile(_str2sec_subpart % r"hours?|hou?|hh?"), "*"+str(60*60)),
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def str2seconds(val):
|
||||
"""Wraps string expression like "1h 2m 3s" into number contains seconds (3723).
|
||||
The string expression will be evaluated as mathematical expression, spaces between each groups
|
||||
will be wrapped to "+" operand (only if any operand does not specified between).
|
||||
Because of case insensitivity and overwriting with minutes ("m" or "mm"), the short replacement for month
|
||||
are "mo" or "mon".
|
||||
Ex: 1hour+30min = 5400
|
||||
0d 1h 30m = 5400
|
||||
1year-6mo = 15778800
|
||||
6 months = 15778800
|
||||
warn: month is not 30 days, it is a year in seconds / 12, the leap years will be respected also:
|
||||
>>>> float(str2seconds("1month")) / 60 / 60 / 24
|
||||
30.4375
|
||||
>>>> float(str2seconds("1year")) / 60 / 60 / 24
|
||||
365.25
|
||||
|
||||
@returns number (calculated seconds from expression "val")
|
||||
"""
|
||||
if isinstance(val, (int, float, complex)):
|
||||
return val
|
||||
# replace together standing abbreviations, example '1d12h' -> '1d 12h':
|
||||
val = MyTime._str2sec_prep.sub(r" \1", val)
|
||||
# replace abbreviation with expression:
|
||||
for rexp, rpl in MyTime._str2sec_parts:
|
||||
val = rexp.sub(rpl, val)
|
||||
val = MyTime._str2sec_fini.sub(r"\1+\2", val)
|
||||
return eval(val)
|
||||
|
||||
class seconds2str():
|
||||
"""Converts seconds to string on demand (if string representation needed).
|
||||
Ex: seconds2str(86400*390) = 1y 3w 4d
|
||||
seconds2str(86400*368) = 1y 3d
|
||||
seconds2str(86400*365.5) = 1y
|
||||
seconds2str(86400*2+3600*7+60*15) = 2d 7h 15m
|
||||
seconds2str(86400*2+3599) = 2d 1h
|
||||
seconds2str(3600-5) = 1h
|
||||
seconds2str(3600-10) = 59m 50s
|
||||
seconds2str(59) = 59s
|
||||
"""
|
||||
def __init__(self, sec):
|
||||
self.sec = sec
|
||||
def __str__(self):
|
||||
# s = str(datetime.timedelta(seconds=int(self.sec)))
|
||||
# return s if s[-3:] != ":00" else s[:-3]
|
||||
s = self.sec; c = 3
|
||||
# automatic accuracy: round by large values (and maximally 3 groups)
|
||||
if s >= 31536000: # a year as 365*24*60*60 (don't need to consider leap year by this accuracy)
|
||||
s = int(round(float(s)/86400)) # round by a day
|
||||
r = str(s//365) + 'y '; s %= 365
|
||||
if s >= 7:
|
||||
r += str(s//7) + 'w '; s %= 7
|
||||
if s:
|
||||
r += str(s) + 'd '
|
||||
return r[:-1]
|
||||
if s >= 604800: # a week as 24*60*60*7
|
||||
s = int(round(float(s)/3600)) # round by a hour
|
||||
r = str(s//168) + 'w '; s %= 168
|
||||
if s >= 24:
|
||||
r += str(s//24) + 'd '; s %= 24
|
||||
if s:
|
||||
r += str(s) + 'h '
|
||||
return r[:-1]
|
||||
if s >= 86400: # a day as 24*60*60
|
||||
s = int(round(float(s)/60)) # round by a minute
|
||||
r = str(s//1440) + 'd '; s %= 1440
|
||||
if s >= 60:
|
||||
r += str(s//60) + 'h '; s %= 60
|
||||
if s:
|
||||
r += str(s) + 'm '
|
||||
return r[:-1]
|
||||
if s >= 3595: # a hour as 60*60 (- 5 seconds)
|
||||
s = int(round(float(s)/10)) # round by 10 seconds
|
||||
r = str(s//360) + 'h '; s %= 360
|
||||
if s >= 6: # a minute
|
||||
r += str(s//6) + 'm '; s %= 6
|
||||
return r[:-1]
|
||||
r = ''
|
||||
if s >= 60: # a minute
|
||||
r += str(s//60) + 'm '; s %= 60
|
||||
if s: # remaining seconds
|
||||
r += str(s) + 's '
|
||||
elif not self.sec: # 0s
|
||||
r = '0 '
|
||||
return r[:-1]
|
||||
def __repr__(self):
|
||||
return self.__str__()
|
||||
529
fail2ban-master/fail2ban/server/observer.py
Normal file
529
fail2ban-master/fail2ban/server/observer.py
Normal file
@@ -0,0 +1,529 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Author: Serg G. Brester (sebres)
|
||||
#
|
||||
# This module was written as part of ban time increment feature.
|
||||
|
||||
__author__ = "Serg G. Brester (sebres)"
|
||||
__copyright__ = "Copyright (c) 2014 Serg G. Brester"
|
||||
__license__ = "GPL"
|
||||
|
||||
import threading
|
||||
from .jailthread import JailThread
|
||||
from .failmanager import FailManagerEmpty
|
||||
import os, logging, time, datetime, math, json, random
|
||||
import sys
|
||||
from ..helpers import getLogger
|
||||
from .mytime import MyTime
|
||||
from .utils import Utils
|
||||
|
||||
# Gets the instance of the logger.
|
||||
logSys = getLogger(__name__)
|
||||
|
||||
class ObserverThread(JailThread):
|
||||
"""Handles observing a database, managing bad ips and ban increment.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
Attributes
|
||||
----------
|
||||
daemon
|
||||
ident
|
||||
name
|
||||
status
|
||||
active : bool
|
||||
Control the state of the thread.
|
||||
idle : bool
|
||||
Control the idle state of the thread.
|
||||
sleeptime : int
|
||||
The time the thread sleeps for in the loop.
|
||||
"""
|
||||
|
||||
# observer is event driven and it sleep organized incremental, so sleep intervals can be shortly:
|
||||
DEFAULT_SLEEP_INTERVAL = Utils.DEFAULT_SLEEP_INTERVAL / 10
|
||||
|
||||
def __init__(self):
|
||||
# init thread
|
||||
super(ObserverThread, self).__init__(name='f2b/observer')
|
||||
# before started - idle:
|
||||
self.idle = True
|
||||
## Event queue
|
||||
self._queue_lock = threading.RLock()
|
||||
self._queue = []
|
||||
## Event, be notified if anything added to event queue
|
||||
self._notify = threading.Event()
|
||||
## Sleep for max 60 seconds, it possible to specify infinite to always sleep up to notifying via event,
|
||||
## but so we can later do some service "events" occurred infrequently directly in main loop of observer (not using queue)
|
||||
self.sleeptime = 60
|
||||
#
|
||||
self._timers = {}
|
||||
self._paused = False
|
||||
self.__db = None
|
||||
self.__db_purge_interval = 60*60
|
||||
# observer is a not main thread:
|
||||
self.daemon = True
|
||||
|
||||
def __getitem__(self, i):
|
||||
try:
|
||||
return self._queue[i]
|
||||
except KeyError:
|
||||
raise KeyError("Invalid event index : %s" % i)
|
||||
|
||||
def __delitem__(self, i):
|
||||
try:
|
||||
del self._queue[i]
|
||||
except KeyError:
|
||||
raise KeyError("Invalid event index: %s" % i)
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self._queue)
|
||||
|
||||
def __len__(self):
|
||||
return len(self._queue)
|
||||
|
||||
def __eq__(self, other): # Required for Threading
|
||||
return False
|
||||
|
||||
def __hash__(self): # Required for Threading
|
||||
return id(self)
|
||||
|
||||
def add_named_timer(self, name, starttime, *event):
|
||||
"""Add a named timer event to queue will start (and wake) in 'starttime' seconds
|
||||
|
||||
Previous timer event with same name will be canceled and trigger self into
|
||||
queue after new 'starttime' value
|
||||
"""
|
||||
t = self._timers.get(name, None)
|
||||
if t is not None:
|
||||
t.cancel()
|
||||
t = threading.Timer(starttime, self.add, event)
|
||||
self._timers[name] = t
|
||||
t.start()
|
||||
|
||||
def add_timer(self, starttime, *event):
|
||||
"""Add a timer event to queue will start (and wake) in 'starttime' seconds
|
||||
"""
|
||||
# in testing we should wait (looping) for the possible time drifts:
|
||||
if MyTime.myTime is not None and starttime:
|
||||
# test time after short sleep:
|
||||
t = threading.Timer(Utils.DEFAULT_SLEEP_INTERVAL, self._delayedEvent,
|
||||
(MyTime.time() + starttime, time.time() + starttime, event)
|
||||
)
|
||||
t.start()
|
||||
return
|
||||
# add timer event:
|
||||
t = threading.Timer(starttime, self.add, event)
|
||||
t.start()
|
||||
|
||||
def _delayedEvent(self, endMyTime, endTime, event):
|
||||
if MyTime.time() >= endMyTime or time.time() >= endTime:
|
||||
self.add_timer(0, *event)
|
||||
return
|
||||
# repeat after short sleep:
|
||||
t = threading.Timer(Utils.DEFAULT_SLEEP_INTERVAL, self._delayedEvent,
|
||||
(endMyTime, endTime, event)
|
||||
)
|
||||
t.start()
|
||||
|
||||
def pulse_notify(self):
|
||||
"""Notify wakeup (sets /and resets/ notify event)
|
||||
"""
|
||||
if not self._paused:
|
||||
n = self._notify
|
||||
if n:
|
||||
n.set()
|
||||
#n.clear()
|
||||
|
||||
def add(self, *event):
|
||||
"""Add a event to queue and notify thread to wake up.
|
||||
"""
|
||||
## lock and add new event to queue:
|
||||
with self._queue_lock:
|
||||
self._queue.append(event)
|
||||
self.pulse_notify()
|
||||
|
||||
def add_wn(self, *event):
|
||||
"""Add a event to queue without notifying thread to wake up.
|
||||
"""
|
||||
## lock and add new event to queue:
|
||||
with self._queue_lock:
|
||||
self._queue.append(event)
|
||||
|
||||
def call_lambda(self, l, *args):
|
||||
l(*args)
|
||||
|
||||
def run(self):
|
||||
"""Main loop for Threading.
|
||||
|
||||
This function is the main loop of the thread.
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True when the thread exits nicely.
|
||||
"""
|
||||
logSys.info("Observer start...")
|
||||
## first time create named timer to purge database each hour (clean old entries) ...
|
||||
self.add_named_timer('DB_PURGE', self.__db_purge_interval, 'db_purge')
|
||||
## Mapping of all possible event types of observer:
|
||||
__meth = {
|
||||
# universal lambda:
|
||||
'call': self.call_lambda,
|
||||
# system and service events:
|
||||
'db_set': self.db_set,
|
||||
'db_purge': self.db_purge,
|
||||
# service events of observer self:
|
||||
'is_alive' : self.isAlive,
|
||||
'is_active': self.isActive,
|
||||
'start': self.start,
|
||||
'stop': self.stop,
|
||||
'nop': lambda:(),
|
||||
'shutdown': lambda:()
|
||||
}
|
||||
try:
|
||||
## check it self with sending is_alive event
|
||||
self.add('is_alive')
|
||||
## if we should stop - break a main loop
|
||||
while self.active:
|
||||
self.idle = False
|
||||
## check events available and execute all events from queue
|
||||
while not self._paused:
|
||||
## lock, check and pop one from begin of queue:
|
||||
try:
|
||||
ev = None
|
||||
with self._queue_lock:
|
||||
if len(self._queue):
|
||||
ev = self._queue.pop(0)
|
||||
if ev is None:
|
||||
break
|
||||
## retrieve method by name
|
||||
meth = ev[0]
|
||||
if not callable(ev[0]): meth = __meth.get(meth) or getattr(self, meth)
|
||||
## execute it with rest of event as variable arguments
|
||||
meth(*ev[1:])
|
||||
except Exception as e:
|
||||
#logSys.error('%s', e, exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
logSys.error('%s', e, exc_info=True)
|
||||
## going sleep, wait for events (in queue)
|
||||
n = self._notify
|
||||
if n:
|
||||
self.idle = True
|
||||
n.wait(self.sleeptime)
|
||||
## wake up - reset signal now (we don't need it so long as we reed from queue)
|
||||
n.clear()
|
||||
if self._paused:
|
||||
continue
|
||||
else:
|
||||
## notify event deleted (shutdown) - just sleep a little bit (waiting for shutdown events, prevent high cpu usage)
|
||||
time.sleep(ObserverThread.DEFAULT_SLEEP_INTERVAL)
|
||||
## stop by shutdown and empty queue :
|
||||
if not self.is_full:
|
||||
break
|
||||
## end of main loop - exit
|
||||
logSys.info("Observer stopped, %s events remaining.", len(self._queue))
|
||||
self._notify = None
|
||||
#print("Observer stopped, %s events remaining." % len(self._queue))
|
||||
except Exception as e:
|
||||
logSys.error('Observer stopped after error: %s', e, exc_info=True)
|
||||
#print("Observer stopped with error: %s" % str(e))
|
||||
# clear all events - exit, for possible calls of wait_empty:
|
||||
with self._queue_lock:
|
||||
self._queue = []
|
||||
self.idle = True
|
||||
return True
|
||||
|
||||
def isAlive(self):
|
||||
#logSys.debug("Observer alive...")
|
||||
return True
|
||||
|
||||
def isActive(self, fromStr=None):
|
||||
# logSys.info("Observer alive, %s%s",
|
||||
# 'active' if self.active else 'inactive',
|
||||
# '' if fromStr is None else (", called from '%s'" % fromStr))
|
||||
return self.active
|
||||
|
||||
def start(self):
|
||||
with self._queue_lock:
|
||||
if not self.active:
|
||||
super(ObserverThread, self).start()
|
||||
|
||||
def stop(self, wtime=5, forceQuit=True):
|
||||
if self.active and self._notify:
|
||||
logSys.info("Observer stop ... try to end queue %s seconds", wtime)
|
||||
#print("Observer stop ....")
|
||||
# just add shutdown job to make possible wait later until full (events remaining)
|
||||
with self._queue_lock:
|
||||
self.add_wn('shutdown')
|
||||
#don't pulse - just set, because we will delete it hereafter (sometimes not wakeup)
|
||||
n = self._notify
|
||||
self._notify.set()
|
||||
#self.pulse_notify()
|
||||
self._notify = None
|
||||
# wait max wtime seconds until full (events remaining)
|
||||
if self.wait_empty(wtime) or forceQuit:
|
||||
n.clear()
|
||||
self.active = False; # leave outer (active) loop
|
||||
self._paused = True; # leave inner (queue) loop
|
||||
self.__db = None
|
||||
else:
|
||||
self._notify = n
|
||||
return self.wait_idle(min(wtime, 0.5)) and not self.is_full
|
||||
return True
|
||||
|
||||
@property
|
||||
def is_full(self):
|
||||
with self._queue_lock:
|
||||
return True if len(self._queue) else False
|
||||
|
||||
def wait_empty(self, sleeptime=None):
|
||||
"""Wait observer is running and returns if observer has no more events (queue is empty)
|
||||
"""
|
||||
time.sleep(ObserverThread.DEFAULT_SLEEP_INTERVAL)
|
||||
if sleeptime is not None:
|
||||
e = MyTime.time() + sleeptime
|
||||
# block queue with not operation to be sure all really jobs are executed if nop goes from queue :
|
||||
if self._notify is not None:
|
||||
self.add_wn('nop')
|
||||
if self.is_full and self.idle:
|
||||
self.pulse_notify()
|
||||
while self.is_full:
|
||||
if sleeptime is not None and MyTime.time() > e:
|
||||
break
|
||||
time.sleep(ObserverThread.DEFAULT_SLEEP_INTERVAL)
|
||||
# wait idle to be sure the last queue element is processed (because pop event before processing it) :
|
||||
self.wait_idle(0.001)
|
||||
return not self.is_full
|
||||
|
||||
|
||||
def wait_idle(self, sleeptime=None):
|
||||
"""Wait observer is running and returns if observer idle (observer sleeps)
|
||||
"""
|
||||
time.sleep(ObserverThread.DEFAULT_SLEEP_INTERVAL)
|
||||
if self.idle:
|
||||
return True
|
||||
if sleeptime is not None:
|
||||
e = MyTime.time() + sleeptime
|
||||
while not self.idle:
|
||||
if sleeptime is not None and MyTime.time() > e:
|
||||
break
|
||||
time.sleep(ObserverThread.DEFAULT_SLEEP_INTERVAL)
|
||||
return self.idle
|
||||
|
||||
@property
|
||||
def paused(self):
|
||||
return self._paused;
|
||||
|
||||
@paused.setter
|
||||
def paused(self, pause):
|
||||
if self._paused == pause:
|
||||
return
|
||||
self._paused = pause
|
||||
# wake after pause ended
|
||||
self.pulse_notify()
|
||||
|
||||
|
||||
@property
|
||||
def status(self):
|
||||
"""Status of observer to be implemented. [TODO]
|
||||
"""
|
||||
return ('', '')
|
||||
|
||||
## -----------------------------------------
|
||||
## [Async] database service functionality ...
|
||||
## -----------------------------------------
|
||||
|
||||
def db_set(self, db):
|
||||
self.__db = db
|
||||
|
||||
def db_purge(self):
|
||||
logSys.debug("Purge database event occurred")
|
||||
if self.__db is not None:
|
||||
self.__db.purge()
|
||||
# trigger timer again ...
|
||||
self.add_named_timer('DB_PURGE', self.__db_purge_interval, 'db_purge')
|
||||
|
||||
## -----------------------------------------
|
||||
## [Async] ban time increment functionality ...
|
||||
## -----------------------------------------
|
||||
|
||||
def failureFound(self, jail, ticket):
|
||||
""" Notify observer a failure for ip was found
|
||||
|
||||
Observer will check ip was known (bad) and possibly increase an retry count
|
||||
"""
|
||||
# check jail active :
|
||||
if not jail.isAlive() or not jail.getBanTimeExtra("increment"):
|
||||
return
|
||||
ip = ticket.getID()
|
||||
unixTime = ticket.getTime()
|
||||
logSys.debug("[%s] Observer: failure found %s", jail.name, ip)
|
||||
# increase retry count for known (bad) ip, corresponding banCount of it (one try will count than 2, 3, 5, 9 ...) :
|
||||
banCount = 0
|
||||
retryCount = 1
|
||||
timeOfBan = None
|
||||
try:
|
||||
maxRetry = jail.filter.failManager.getMaxRetry()
|
||||
db = jail.database
|
||||
if db is not None:
|
||||
for banCount, timeOfBan, lastBanTime in db.getBan(ip, jail):
|
||||
banCount = max(banCount, ticket.getBanCount())
|
||||
retryCount = ((1 << (banCount if banCount < 20 else 20))/2 + 1)
|
||||
# if lastBanTime == -1 or timeOfBan + lastBanTime * 2 > MyTime.time():
|
||||
# retryCount = maxRetry
|
||||
break
|
||||
retryCount = min(retryCount, maxRetry)
|
||||
# check this ticket already known (line was already processed and in the database and will be restored from there):
|
||||
if timeOfBan is not None and unixTime <= timeOfBan:
|
||||
logSys.debug("[%s] Ignore failure %s before last ban %s < %s, restored",
|
||||
jail.name, ip, unixTime, timeOfBan)
|
||||
return
|
||||
# for not increased failures observer should not add it to fail manager, because was already added by filter self
|
||||
if retryCount <= 1:
|
||||
return
|
||||
# retry counter was increased - add it again:
|
||||
logSys.info("[%s] Found %s, bad - %s, %s # -> %s%s", jail.name, ip,
|
||||
MyTime.time2str(unixTime), banCount, retryCount,
|
||||
(', Ban' if retryCount >= maxRetry else ''))
|
||||
# retryCount-1, because a ticket was already once incremented by filter self
|
||||
retryCount = jail.filter.failManager.addFailure(ticket, retryCount - 1, True)
|
||||
ticket.setBanCount(banCount)
|
||||
# after observe we have increased attempt count, compare it >= maxretry ...
|
||||
if retryCount >= maxRetry:
|
||||
# perform the banning of the IP now (again)
|
||||
jail.filter.performBan(ip)
|
||||
|
||||
except Exception as e:
|
||||
logSys.error('%s', e, exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
|
||||
|
||||
class BanTimeIncr:
|
||||
def __init__(self, banTime, banCount):
|
||||
self.Time = banTime
|
||||
self.Count = banCount
|
||||
|
||||
def calcBanTime(self, jail, banTime, banCount):
|
||||
be = jail.getBanTimeExtra()
|
||||
return be['evformula'](self.BanTimeIncr(banTime, banCount))
|
||||
|
||||
def incrBanTime(self, jail, banTime, ticket):
|
||||
"""Check for IP address to increment ban time (if was already banned).
|
||||
|
||||
Returns
|
||||
-------
|
||||
float
|
||||
new ban time.
|
||||
"""
|
||||
# check jail active :
|
||||
if not jail.isAlive() or not jail.database:
|
||||
return banTime
|
||||
be = jail.getBanTimeExtra()
|
||||
ip = ticket.getID()
|
||||
orgBanTime = banTime
|
||||
# check ip was already banned (increment time of ban):
|
||||
try:
|
||||
if banTime > 0 and be.get('increment', False):
|
||||
# search IP in database and increase time if found:
|
||||
for banCount, timeOfBan, lastBanTime in \
|
||||
jail.database.getBan(ip, jail, overalljails=be.get('overalljails', False)) \
|
||||
:
|
||||
# increment count in ticket (if still not increased from banmanager, test-cases?):
|
||||
if banCount >= ticket.getBanCount():
|
||||
ticket.setBanCount(banCount+1)
|
||||
logSys.debug('IP %s was already banned: %s #, %s', ip, banCount, timeOfBan);
|
||||
# calculate new ban time
|
||||
if banCount > 0:
|
||||
banTime = be['evformula'](self.BanTimeIncr(banTime, banCount))
|
||||
ticket.setBanTime(banTime)
|
||||
# check current ticket time to prevent increasing for twice read tickets (restored from log file besides database after restart)
|
||||
if ticket.getTime() > timeOfBan:
|
||||
logSys.info('[%s] IP %s is bad: %s # last %s - incr %s to %s' % (jail.name, ip, banCount,
|
||||
MyTime.time2str(timeOfBan),
|
||||
MyTime.seconds2str(orgBanTime), MyTime.seconds2str(banTime)))
|
||||
else:
|
||||
ticket.restored = True
|
||||
break
|
||||
except Exception as e:
|
||||
logSys.error('%s', e, exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
return banTime
|
||||
|
||||
def banFound(self, ticket, jail, btime):
|
||||
""" Notify observer a ban occurred for ip
|
||||
|
||||
Observer will check ip was known (bad) and possibly increase/prolong a ban time
|
||||
Secondary we will actualize the bans and bips (bad ip) in database
|
||||
"""
|
||||
if ticket.restored: # pragma: no cover (normally not resored tickets only)
|
||||
return
|
||||
try:
|
||||
oldbtime = btime
|
||||
ip = ticket.getID()
|
||||
logSys.debug("[%s] Observer: ban found %s, %s", jail.name, ip, btime)
|
||||
# if not permanent and ban time was not set - check time should be increased:
|
||||
if btime != -1 and ticket.getBanTime() is None:
|
||||
btime = self.incrBanTime(jail, btime, ticket)
|
||||
# if we should prolong ban time:
|
||||
if btime == -1 or btime > oldbtime:
|
||||
ticket.setBanTime(btime)
|
||||
# if not permanent
|
||||
if btime != -1:
|
||||
bendtime = ticket.getTime() + btime
|
||||
logtime = (MyTime.seconds2str(btime), MyTime.time2str(bendtime))
|
||||
# check ban is not too old :
|
||||
if bendtime < MyTime.time():
|
||||
logSys.debug('Ignore old bantime %s', logtime[1])
|
||||
return False
|
||||
else:
|
||||
logtime = ('permanent', 'infinite')
|
||||
# if ban time was prolonged - log again with new ban time:
|
||||
if btime != oldbtime:
|
||||
logSys.notice("[%s] Increase Ban %s (%d # %s -> %s)", jail.name,
|
||||
ip, ticket.getBanCount(), *logtime)
|
||||
# delayed prolonging ticket via actions that expected this (not later than 10 sec):
|
||||
logSys.log(5, "[%s] Observer: prolong %s in %s", jail.name, ip, (btime, oldbtime))
|
||||
self.add_timer(min(10, max(0, btime - oldbtime - 5)), self.prolongBan, ticket, jail)
|
||||
# add ticket to database, but only if was not restored (not already read from database):
|
||||
if jail.database is not None and not ticket.restored:
|
||||
# add to database always only after ban time was calculated an not yet already banned:
|
||||
jail.database.addBan(jail, ticket)
|
||||
except Exception as e:
|
||||
logSys.error('%s', e, exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
|
||||
def prolongBan(self, ticket, jail):
|
||||
""" Notify observer a ban occurred for ip
|
||||
|
||||
Observer will check ip was known (bad) and possibly increase/prolong a ban time
|
||||
Secondary we will actualize the bans and bips (bad ip) in database
|
||||
"""
|
||||
try:
|
||||
btime = ticket.getBanTime()
|
||||
ip = ticket.getID()
|
||||
logSys.debug("[%s] Observer: prolong %s, %s", jail.name, ip, btime)
|
||||
# prolong ticket via actions that expected this:
|
||||
jail.actions._prolongBan(ticket)
|
||||
except Exception as e:
|
||||
logSys.error('%s', e, exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
|
||||
# Global observer initial created in server (could be later rewritten via singleton)
|
||||
class _Observers:
|
||||
def __init__(self):
|
||||
self.Main = None
|
||||
|
||||
Observers = _Observers()
|
||||
957
fail2ban-master/fail2ban/server/server.py
Normal file
957
fail2ban-master/fail2ban/server/server.py
Normal file
@@ -0,0 +1,957 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Author: Cyril Jaquier
|
||||
#
|
||||
|
||||
__author__ = "Cyril Jaquier"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
|
||||
__license__ = "GPL"
|
||||
|
||||
import threading
|
||||
from threading import Lock, RLock
|
||||
import logging
|
||||
import os
|
||||
import signal
|
||||
import stat
|
||||
import sys
|
||||
|
||||
from .observer import Observers, ObserverThread
|
||||
from .jails import Jails
|
||||
from .filter import DNSUtils, FileFilter, JournalFilter
|
||||
from .transmitter import Transmitter
|
||||
from .asyncserver import AsyncServer, AsyncServerException
|
||||
from .. import version
|
||||
from ..helpers import getLogger, _as_bool, extractOptions, str2LogLevel, \
|
||||
getVerbosityFormat, excepthook, prctl_set_th_name
|
||||
|
||||
# Gets the instance of the logger.
|
||||
logSys = getLogger(__name__)
|
||||
|
||||
DEF_SYSLOGSOCKET = "auto"
|
||||
DEF_LOGLEVEL = "INFO"
|
||||
DEF_LOGTARGET = "STDOUT"
|
||||
|
||||
try:
|
||||
from .database import Fail2BanDb
|
||||
except ImportError: # pragma: no cover
|
||||
# Dont print error here, as database may not even be used
|
||||
Fail2BanDb = None
|
||||
|
||||
|
||||
def _thread_name():
|
||||
return threading.current_thread().__class__.__name__
|
||||
|
||||
def _make_file_path(name):
|
||||
"""Creates path of file (last level only) on demand"""
|
||||
name = os.path.dirname(name)
|
||||
# only if it is absolute (e. g. important for socket, so if unix path):
|
||||
if os.path.isabs(name):
|
||||
# be sure path exists (create last level of directory on demand):
|
||||
try:
|
||||
os.mkdir(name)
|
||||
except (OSError, FileExistsError) as e:
|
||||
if e.errno != 17: # pragma: no cover - not EEXIST is not covered
|
||||
raise
|
||||
|
||||
|
||||
class Server:
|
||||
|
||||
def __init__(self, daemon=False):
|
||||
self.__loggingLock = Lock()
|
||||
self.__lock = RLock()
|
||||
self.__jails = Jails()
|
||||
self.__db = None
|
||||
self.__daemon = daemon
|
||||
self.__transm = Transmitter(self)
|
||||
self.__reload_state = {}
|
||||
#self.__asyncServer = AsyncServer(self.__transm)
|
||||
self.__asyncServer = None
|
||||
self.__logLevel = None
|
||||
self.__logTarget = None
|
||||
self.__verbose = None
|
||||
self.__syslogSocket = None
|
||||
self.__autoSyslogSocketPaths = {
|
||||
'Darwin': '/var/run/syslog',
|
||||
'FreeBSD': '/var/run/log',
|
||||
'Linux': '/dev/log',
|
||||
}
|
||||
self.__prev_signals = {}
|
||||
|
||||
def __sigTERMhandler(self, signum, frame): # pragma: no cover - indirect tested
|
||||
logSys.debug("Caught signal %d. Exiting", signum)
|
||||
self.quit()
|
||||
|
||||
def __sigUSR1handler(self, signum, fname): # pragma: no cover - indirect tested
|
||||
logSys.debug("Caught signal %d. Flushing logs", signum)
|
||||
self.flushLogs()
|
||||
|
||||
def _rebindSignal(self, s, new):
|
||||
"""Bind new signal handler while storing old one in _prev_signals"""
|
||||
self.__prev_signals[s] = signal.getsignal(s)
|
||||
signal.signal(s, new)
|
||||
|
||||
def start(self, sock, pidfile, force=False, observer=True, conf={}):
|
||||
# First set the mask to only allow access to owner
|
||||
os.umask(0o077)
|
||||
# Second daemonize before logging etc, because it will close all handles:
|
||||
if self.__daemon: # pragma: no cover
|
||||
logSys.info("Starting in daemon mode")
|
||||
ret = self.__createDaemon()
|
||||
# If forked parent - return here (parent process will configure server later):
|
||||
if ret is None:
|
||||
return False
|
||||
# If error:
|
||||
if not ret[0]:
|
||||
err = "Could not create daemon %s", ret[1:]
|
||||
logSys.error(err)
|
||||
raise ServerInitializationError(err)
|
||||
# We are daemon.
|
||||
|
||||
# replace main thread (and process) name to identify server (for top/ps/pstree or diagnostic):
|
||||
prctl_set_th_name(conf.get("pname", "fail2ban-server"))
|
||||
|
||||
# Set all logging parameters (or use default if not specified):
|
||||
self.__verbose = conf.get("verbose", None)
|
||||
self.setSyslogSocket(conf.get("syslogsocket",
|
||||
self.__syslogSocket if self.__syslogSocket is not None else DEF_SYSLOGSOCKET))
|
||||
self.setLogLevel(conf.get("loglevel",
|
||||
self.__logLevel if self.__logLevel is not None else DEF_LOGLEVEL))
|
||||
self.setLogTarget(conf.get("logtarget",
|
||||
self.__logTarget if self.__logTarget is not None else DEF_LOGTARGET))
|
||||
|
||||
logSys.info("-"*50)
|
||||
logSys.info("Starting Fail2ban v%s", version.version)
|
||||
|
||||
if self.__daemon: # pragma: no cover
|
||||
logSys.info("Daemon started")
|
||||
|
||||
# Install signal handlers
|
||||
if _thread_name() == '_MainThread':
|
||||
for s in (signal.SIGTERM, signal.SIGINT):
|
||||
self._rebindSignal(s, self.__sigTERMhandler)
|
||||
self._rebindSignal(signal.SIGUSR1, self.__sigUSR1handler)
|
||||
|
||||
# Ensure unhandled exceptions are logged
|
||||
sys.excepthook = excepthook
|
||||
|
||||
# Creates a PID file.
|
||||
try:
|
||||
logSys.debug("Creating PID file %s", pidfile)
|
||||
_make_file_path(pidfile)
|
||||
pidFile = open(pidfile, 'w')
|
||||
pidFile.write("%s\n" % os.getpid())
|
||||
pidFile.close()
|
||||
except (OSError, IOError) as e: # pragma: no cover
|
||||
logSys.error("Unable to create PID file: %s", e)
|
||||
|
||||
# Create observers and start it:
|
||||
if observer:
|
||||
if Observers.Main is None:
|
||||
Observers.Main = ObserverThread()
|
||||
Observers.Main.start()
|
||||
|
||||
# Start the communication
|
||||
logSys.debug("Starting communication")
|
||||
try:
|
||||
_make_file_path(sock)
|
||||
self.__asyncServer = AsyncServer(self.__transm)
|
||||
self.__asyncServer.onstart = conf.get('onstart')
|
||||
self.__asyncServer.start(sock, force)
|
||||
except AsyncServerException as e:
|
||||
logSys.error("Could not start server: %s", e)
|
||||
|
||||
# Stop (if not yet already executed):
|
||||
self.quit()
|
||||
|
||||
# Removes the PID file.
|
||||
try:
|
||||
logSys.debug("Remove PID file %s", pidfile)
|
||||
os.remove(pidfile)
|
||||
except (OSError, IOError) as e: # pragma: no cover
|
||||
logSys.error("Unable to remove PID file: %s", e)
|
||||
|
||||
def quit(self):
|
||||
# Prevent to call quit twice:
|
||||
self.quit = lambda: False
|
||||
|
||||
logSys.info("Shutdown in progress...")
|
||||
|
||||
# Stop communication first because if jail's unban action
|
||||
# tries to communicate via fail2ban-client we get a lockup
|
||||
# among threads. So the simplest resolution is to stop all
|
||||
# communications first (which should be ok anyways since we
|
||||
# are exiting)
|
||||
# See https://github.com/fail2ban/fail2ban/issues/7
|
||||
if self.__asyncServer is not None:
|
||||
self.__asyncServer.stop_communication()
|
||||
|
||||
# Restore default signal handlers:
|
||||
if _thread_name() == '_MainThread':
|
||||
for s, sh in self.__prev_signals.items():
|
||||
signal.signal(s, sh)
|
||||
|
||||
# Give observer a small chance to complete its work before exit
|
||||
obsMain = Observers.Main
|
||||
if obsMain is not None:
|
||||
if obsMain.stop(forceQuit=False):
|
||||
obsMain = None
|
||||
Observers.Main = None
|
||||
|
||||
# Now stop all the jails
|
||||
self.stopAllJail()
|
||||
|
||||
# Stop observer ultimately
|
||||
if obsMain is not None:
|
||||
obsMain.stop()
|
||||
|
||||
# Explicit close database (server can leave in a thread,
|
||||
# so delayed GC can prevent committing changes)
|
||||
if self.__db:
|
||||
self.__db.close()
|
||||
self.__db = None
|
||||
|
||||
# Stop async and exit
|
||||
if self.__asyncServer is not None:
|
||||
self.__asyncServer.stop()
|
||||
self.__asyncServer = None
|
||||
logSys.info("Exiting Fail2ban")
|
||||
|
||||
|
||||
def addJail(self, name, backend):
|
||||
addflg = True
|
||||
if self.__reload_state.get(name) and self.__jails.exists(name):
|
||||
jail = self.__jails[name]
|
||||
# if backend switch - restart instead of reload:
|
||||
if jail.backend == backend:
|
||||
addflg = False
|
||||
logSys.info("Reload jail %r", name)
|
||||
# prevent to reload the same jail twice (temporary keep it in state, needed to commit reload):
|
||||
self.__reload_state[name] = None
|
||||
else:
|
||||
logSys.info("Restart jail %r (reason: %r != %r)", name, jail.backend, backend)
|
||||
self.delJail(name, stop=True)
|
||||
# prevent to start the same jail twice (no reload more - restart):
|
||||
del self.__reload_state[name]
|
||||
if addflg:
|
||||
self.__jails.add(name, backend, self.__db)
|
||||
if self.__db is not None:
|
||||
self.__db.addJail(self.__jails[name])
|
||||
|
||||
def delJail(self, name, stop=True, join=True):
|
||||
jail = self.__jails[name]
|
||||
if join or jail.isAlive():
|
||||
jail.stop(stop=stop, join=join)
|
||||
if join:
|
||||
if self.__db is not None:
|
||||
self.__db.delJail(jail)
|
||||
del self.__jails[name]
|
||||
|
||||
def startJail(self, name):
|
||||
with self.__lock:
|
||||
jail = self.__jails[name]
|
||||
if not jail.isAlive():
|
||||
jail.start()
|
||||
elif name in self.__reload_state:
|
||||
logSys.info("Jail %r reloaded", name)
|
||||
del self.__reload_state[name]
|
||||
if jail.idle:
|
||||
jail.idle = False
|
||||
|
||||
def stopJail(self, name):
|
||||
with self.__lock:
|
||||
self.delJail(name, stop=True)
|
||||
|
||||
def stopAllJail(self):
|
||||
logSys.info("Stopping all jails")
|
||||
with self.__lock:
|
||||
# 1st stop all jails (signal and stop actions/filter thread):
|
||||
for name in list(self.__jails.keys()):
|
||||
self.delJail(name, stop=True, join=False)
|
||||
# 2nd wait for end and delete jails:
|
||||
for name in list(self.__jails.keys()):
|
||||
self.delJail(name, stop=False, join=True)
|
||||
|
||||
def clearCaches(self):
|
||||
# we need to clear caches, to be able to recognize new IPs/families etc:
|
||||
DNSUtils.CACHE_nameToIp.clear()
|
||||
DNSUtils.CACHE_ipToName.clear()
|
||||
|
||||
def reloadJails(self, name, opts, begin):
|
||||
if begin:
|
||||
# begin reload:
|
||||
if self.__reload_state and (name == '--all' or self.__reload_state.get(name)): # pragma: no cover
|
||||
raise ValueError('Reload already in progress')
|
||||
logSys.info("Reload " + (("jail %s" % name) if name != '--all' else "all jails"))
|
||||
with self.__lock:
|
||||
# if single jail:
|
||||
if name != '--all':
|
||||
jail = None
|
||||
# test jail exists (throws exception if not):
|
||||
if "--if-exists" not in opts or self.__jails.exists(name):
|
||||
jail = self.__jails[name]
|
||||
if jail:
|
||||
# first unban all ips (will be not restored after (re)start):
|
||||
if "--unban" in opts:
|
||||
self.setUnbanIP(name)
|
||||
# stop if expected:
|
||||
if "--restart" in opts:
|
||||
self.stopJail(name)
|
||||
else:
|
||||
# invalidate caches by reload
|
||||
self.clearCaches()
|
||||
# first unban all ips (will be not restored after (re)start):
|
||||
if "--unban" in opts:
|
||||
self.setUnbanIP()
|
||||
# stop if expected:
|
||||
if "--restart" in opts:
|
||||
self.stopAllJail()
|
||||
# first set all affected jail(s) to idle and reset filter regex and other lists/dicts:
|
||||
for jn, jail in self.__jails.items():
|
||||
if name == '--all' or jn == name:
|
||||
jail.idle = True
|
||||
self.__reload_state[jn] = jail
|
||||
jail.filter.reload(begin=True)
|
||||
jail.actions.reload(begin=True)
|
||||
pass
|
||||
else:
|
||||
# end reload, all affected (or new) jails have already all new parameters (via stream) and (re)started:
|
||||
with self.__lock:
|
||||
deljails = []
|
||||
for jn, jail in self.__jails.items():
|
||||
# still in reload state:
|
||||
if jn in self.__reload_state:
|
||||
# remove jails that are not reloaded (untouched, so not in new configuration)
|
||||
deljails.append(jn)
|
||||
else:
|
||||
# commit (reload was finished):
|
||||
jail.filter.reload(begin=False)
|
||||
jail.actions.reload(begin=False)
|
||||
for jn in deljails:
|
||||
self.delJail(jn)
|
||||
self.__reload_state = {}
|
||||
logSys.info("Reload finished.")
|
||||
|
||||
def setIdleJail(self, name, value):
|
||||
self.__jails[name].idle = value
|
||||
return True
|
||||
|
||||
def getIdleJail(self, name):
|
||||
return self.__jails[name].idle
|
||||
|
||||
# Filter
|
||||
def setIgnoreSelf(self, name, value):
|
||||
self.__jails[name].filter.ignoreSelf = _as_bool(value)
|
||||
|
||||
def getIgnoreSelf(self, name):
|
||||
return self.__jails[name].filter.ignoreSelf
|
||||
|
||||
def addIgnoreIP(self, name, ip):
|
||||
self.__jails[name].filter.addIgnoreIP(ip)
|
||||
|
||||
def delIgnoreIP(self, name, ip):
|
||||
self.__jails[name].filter.delIgnoreIP(ip)
|
||||
|
||||
def getIgnoreIP(self, name):
|
||||
return self.__jails[name].filter.getIgnoreIP()
|
||||
|
||||
def addLogPath(self, name, fileName, tail=False):
|
||||
filter_ = self.__jails[name].filter
|
||||
if isinstance(filter_, FileFilter):
|
||||
filter_.addLogPath(fileName, tail)
|
||||
|
||||
def delLogPath(self, name, fileName):
|
||||
filter_ = self.__jails[name].filter
|
||||
if isinstance(filter_, FileFilter):
|
||||
filter_.delLogPath(fileName)
|
||||
|
||||
def getLogPath(self, name):
|
||||
filter_ = self.__jails[name].filter
|
||||
if isinstance(filter_, FileFilter):
|
||||
return filter_.getLogPaths()
|
||||
else: # pragma: systemd no cover
|
||||
logSys.debug("Jail %s is not a FileFilter instance" % name)
|
||||
return []
|
||||
|
||||
def addJournalMatch(self, name, match): # pragma: systemd no cover
|
||||
filter_ = self.__jails[name].filter
|
||||
if isinstance(filter_, JournalFilter):
|
||||
filter_.addJournalMatch(match)
|
||||
|
||||
def delJournalMatch(self, name, match): # pragma: systemd no cover
|
||||
filter_ = self.__jails[name].filter
|
||||
if isinstance(filter_, JournalFilter):
|
||||
filter_.delJournalMatch(match)
|
||||
|
||||
def getJournalMatch(self, name): # pragma: systemd no cover
|
||||
filter_ = self.__jails[name].filter
|
||||
if isinstance(filter_, JournalFilter):
|
||||
return filter_.getJournalMatch()
|
||||
else:
|
||||
logSys.debug("Jail %s is not a JournalFilter instance" % name)
|
||||
return []
|
||||
|
||||
def setLogEncoding(self, name, encoding):
|
||||
filter_ = self.__jails[name].filter
|
||||
filter_.setLogEncoding(encoding)
|
||||
|
||||
def getLogEncoding(self, name):
|
||||
filter_ = self.__jails[name].filter
|
||||
return filter_.getLogEncoding()
|
||||
|
||||
def setFindTime(self, name, value):
|
||||
self.__jails[name].filter.setFindTime(value)
|
||||
|
||||
def getFindTime(self, name):
|
||||
return self.__jails[name].filter.getFindTime()
|
||||
|
||||
def setDatePattern(self, name, pattern):
|
||||
self.__jails[name].filter.setDatePattern(pattern)
|
||||
|
||||
def getDatePattern(self, name):
|
||||
return self.__jails[name].filter.getDatePattern()
|
||||
|
||||
def setLogTimeZone(self, name, tz):
|
||||
self.__jails[name].filter.setLogTimeZone(tz)
|
||||
|
||||
def getLogTimeZone(self, name):
|
||||
return self.__jails[name].filter.getLogTimeZone()
|
||||
|
||||
def setIgnoreCommand(self, name, value):
|
||||
self.__jails[name].filter.ignoreCommand = value
|
||||
|
||||
def getIgnoreCommand(self, name):
|
||||
return self.__jails[name].filter.ignoreCommand
|
||||
|
||||
def setIgnoreCache(self, name, value):
|
||||
value, options = extractOptions("cache["+value+"]")
|
||||
self.__jails[name].filter.ignoreCache = options
|
||||
|
||||
def getIgnoreCache(self, name):
|
||||
return self.__jails[name].filter.ignoreCache
|
||||
|
||||
def setPrefRegex(self, name, value):
|
||||
flt = self.__jails[name].filter
|
||||
logSys.debug(" prefregex: %r", value)
|
||||
flt.prefRegex = value
|
||||
|
||||
def getPrefRegex(self, name):
|
||||
return self.__jails[name].filter.prefRegex
|
||||
|
||||
def addFailRegex(self, name, value, multiple=False):
|
||||
flt = self.__jails[name].filter
|
||||
if not multiple: value = (value,)
|
||||
for value in value:
|
||||
logSys.debug(" failregex: %r", value)
|
||||
flt.addFailRegex(value)
|
||||
|
||||
def delFailRegex(self, name, index=None):
|
||||
self.__jails[name].filter.delFailRegex(index)
|
||||
|
||||
def getFailRegex(self, name):
|
||||
return self.__jails[name].filter.getFailRegex()
|
||||
|
||||
def addIgnoreRegex(self, name, value, multiple=False):
|
||||
flt = self.__jails[name].filter
|
||||
if not multiple: value = (value,)
|
||||
for value in value:
|
||||
logSys.debug(" ignoreregex: %r", value)
|
||||
flt.addIgnoreRegex(value)
|
||||
|
||||
def delIgnoreRegex(self, name, index):
|
||||
self.__jails[name].filter.delIgnoreRegex(index)
|
||||
|
||||
def getIgnoreRegex(self, name):
|
||||
return self.__jails[name].filter.getIgnoreRegex()
|
||||
|
||||
def setUseDns(self, name, value):
|
||||
self.__jails[name].filter.setUseDns(value)
|
||||
|
||||
def getUseDns(self, name):
|
||||
return self.__jails[name].filter.getUseDns()
|
||||
|
||||
def setMaxMatches(self, name, value):
|
||||
self.__jails[name].filter.failManager.maxMatches = value
|
||||
|
||||
def getMaxMatches(self, name):
|
||||
return self.__jails[name].filter.failManager.maxMatches
|
||||
|
||||
def setMaxRetry(self, name, value):
|
||||
self.__jails[name].filter.setMaxRetry(value)
|
||||
|
||||
def getMaxRetry(self, name):
|
||||
return self.__jails[name].filter.getMaxRetry()
|
||||
|
||||
def setMaxLines(self, name, value):
|
||||
self.__jails[name].filter.setMaxLines(value)
|
||||
|
||||
def getMaxLines(self, name):
|
||||
return self.__jails[name].filter.getMaxLines()
|
||||
|
||||
# Action
|
||||
def addAction(self, name, value, *args):
|
||||
## create (or reload) jail action:
|
||||
self.__jails[name].actions.add(value, *args,
|
||||
reload=name in self.__reload_state)
|
||||
|
||||
def getActions(self, name):
|
||||
return self.__jails[name].actions
|
||||
|
||||
def delAction(self, name, value):
|
||||
del self.__jails[name].actions[value]
|
||||
|
||||
def getAction(self, name, value):
|
||||
return self.__jails[name].actions[value]
|
||||
|
||||
def setBanTime(self, name, value):
|
||||
self.__jails[name].actions.setBanTime(value)
|
||||
|
||||
def addAttemptIP(self, name, *args):
|
||||
return self.__jails[name].filter.addAttempt(*args)
|
||||
|
||||
def setBanIP(self, name, value):
|
||||
return self.__jails[name].actions.addBannedIP(value)
|
||||
|
||||
def setUnbanIP(self, name=None, value=None, ifexists=True):
|
||||
if name is not None:
|
||||
# single jail:
|
||||
jails = [self.__jails[name]]
|
||||
else:
|
||||
# in all jails:
|
||||
jails = list(self.__jails.values())
|
||||
# unban given or all (if value is None):
|
||||
cnt = 0
|
||||
ifexists |= (name is None)
|
||||
for jail in jails:
|
||||
cnt += jail.actions.removeBannedIP(value, ifexists=ifexists)
|
||||
return cnt
|
||||
|
||||
def banned(self, name=None, ids=None):
|
||||
if name is not None:
|
||||
# single jail:
|
||||
jails = [self.__jails[name]]
|
||||
else:
|
||||
# in all jails:
|
||||
jails = list(self.__jails.values())
|
||||
# check banned ids:
|
||||
res = []
|
||||
if name is None and ids:
|
||||
for ip in ids:
|
||||
ret = []
|
||||
for jail in jails:
|
||||
if jail.actions.getBanned([ip]):
|
||||
ret.append(jail.name)
|
||||
res.append(ret)
|
||||
else:
|
||||
for jail in jails:
|
||||
ret = jail.actions.getBanned(ids)
|
||||
if name is not None:
|
||||
return ret
|
||||
res.append(ret)
|
||||
else:
|
||||
res.append({jail.name: ret})
|
||||
return res
|
||||
|
||||
def getBanTime(self, name):
|
||||
return self.__jails[name].actions.getBanTime()
|
||||
|
||||
def getBanList(self, name, withTime=False):
|
||||
"""Returns the list of banned IP addresses for a jail.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
name : str
|
||||
The name of a jail.
|
||||
|
||||
Returns
|
||||
-------
|
||||
list
|
||||
The list of banned IP addresses.
|
||||
"""
|
||||
return self.__jails[name].actions.getBanList(withTime)
|
||||
|
||||
def setBanTimeExtra(self, name, opt, value):
|
||||
self.__jails[name].setBanTimeExtra(opt, value)
|
||||
|
||||
def getBanTimeExtra(self, name, opt):
|
||||
return self.__jails[name].getBanTimeExtra(opt)
|
||||
|
||||
def isStarted(self):
|
||||
return self.__asyncServer is not None and self.__asyncServer.isActive()
|
||||
|
||||
def isAlive(self, jailnum=None):
|
||||
if jailnum is not None and len(self.__jails) != jailnum:
|
||||
return 0
|
||||
for jail in list(self.__jails.values()):
|
||||
if not jail.isAlive():
|
||||
return 0
|
||||
return 1
|
||||
|
||||
# Status
|
||||
def status(self, name="", flavor="basic"):
|
||||
try:
|
||||
self.__lock.acquire()
|
||||
jails = sorted(self.__jails.items())
|
||||
if flavor != "stats":
|
||||
jailList = [n for n, j in jails]
|
||||
ret = [
|
||||
("Number of jail", len(jailList)),
|
||||
("Jail list", ", ".join(jailList))
|
||||
]
|
||||
if name == '--all':
|
||||
jstat = dict(jails)
|
||||
for n, j in jails:
|
||||
jstat[n] = j.status(flavor=flavor)
|
||||
if flavor == "stats":
|
||||
return jstat
|
||||
ret.append(jstat)
|
||||
return ret
|
||||
finally:
|
||||
self.__lock.release()
|
||||
|
||||
def statusJail(self, name, flavor="basic"):
|
||||
return self.__jails[name].status(flavor=flavor)
|
||||
|
||||
# Logging
|
||||
|
||||
##
|
||||
# Set the logging level.
|
||||
#
|
||||
# CRITICAL
|
||||
# ERROR
|
||||
# WARNING
|
||||
# NOTICE
|
||||
# INFO
|
||||
# DEBUG
|
||||
# @param value the level
|
||||
|
||||
def setLogLevel(self, value):
|
||||
value = value.upper()
|
||||
with self.__loggingLock:
|
||||
if self.__logLevel == value:
|
||||
return
|
||||
ll = str2LogLevel(value)
|
||||
# don't change real log-level if running from the test cases:
|
||||
getLogger("fail2ban").setLevel(
|
||||
ll if DEF_LOGTARGET != "INHERITED" or ll < logging.DEBUG else DEF_LOGLEVEL)
|
||||
self.__logLevel = value
|
||||
|
||||
##
|
||||
# Get the logging level.
|
||||
#
|
||||
# @see setLogLevel
|
||||
# @return the log level
|
||||
|
||||
def getLogLevel(self):
|
||||
with self.__loggingLock:
|
||||
return self.__logLevel
|
||||
|
||||
##
|
||||
# Sets the logging target.
|
||||
#
|
||||
# target can be a file, SYSLOG, STDOUT or STDERR.
|
||||
# @param target the logging target
|
||||
|
||||
def setLogTarget(self, target):
|
||||
# check reserved targets in uppercase, don't change target, because it can be file:
|
||||
target, logOptions = extractOptions(target)
|
||||
systarget = target.upper()
|
||||
with self.__loggingLock:
|
||||
# don't set new handlers if already the same
|
||||
# or if "INHERITED" (foreground worker of the test cases, to prevent stop logging):
|
||||
if self.__logTarget == target:
|
||||
return True
|
||||
if systarget == "INHERITED":
|
||||
self.__logTarget = target
|
||||
return True
|
||||
padding = logOptions.get('padding')
|
||||
# set a format which is simpler for console use
|
||||
if systarget == "SYSTEMD-JOURNAL":
|
||||
from systemd.journal import JournalHandler
|
||||
hdlr = JournalHandler(SYSLOG_IDENTIFIER='fail2ban')
|
||||
elif systarget == "SYSLOG":
|
||||
facility = logOptions.get('facility', 'DAEMON').upper()
|
||||
# backwards compatibility - default no padding for syslog handler:
|
||||
if padding is None: padding = '0'
|
||||
try:
|
||||
facility = getattr(logging.handlers.SysLogHandler, 'LOG_' + facility)
|
||||
except AttributeError: # pragma: no cover
|
||||
logSys.error("Unable to set facility %r, using 'DAEMON'", logOptions.get('facility'))
|
||||
facility = logging.handlers.SysLogHandler.LOG_DAEMON
|
||||
if self.__syslogSocket == "auto":
|
||||
import platform
|
||||
self.__syslogSocket = self.__autoSyslogSocketPaths.get(
|
||||
platform.system())
|
||||
if self.__syslogSocket is not None\
|
||||
and os.path.exists(self.__syslogSocket)\
|
||||
and stat.S_ISSOCK(os.stat(
|
||||
self.__syslogSocket).st_mode):
|
||||
hdlr = logging.handlers.SysLogHandler(
|
||||
self.__syslogSocket, facility=facility)
|
||||
else:
|
||||
logSys.error(
|
||||
"Syslog socket file: %s does not exists"
|
||||
" or is not a socket" % self.__syslogSocket)
|
||||
return False
|
||||
elif systarget in ("STDOUT", "SYSOUT"):
|
||||
hdlr = logging.StreamHandler(sys.stdout)
|
||||
elif systarget == "STDERR":
|
||||
hdlr = logging.StreamHandler(sys.stderr)
|
||||
else:
|
||||
# Target should be a file
|
||||
try:
|
||||
open(target, "a").close()
|
||||
hdlr = logging.handlers.RotatingFileHandler(target)
|
||||
except IOError:
|
||||
logSys.error("Unable to log to %r", target)
|
||||
logSys.info("Logging to previous target %r", self.__logTarget)
|
||||
return False
|
||||
# Removes previous handlers -- in reverse order since removeHandler
|
||||
# alter the list in-place and that can confuses the iterable
|
||||
logger = getLogger("fail2ban")
|
||||
for handler in logger.handlers[::-1]:
|
||||
# Remove the handler.
|
||||
logger.removeHandler(handler)
|
||||
# And try to close -- it might be closed already
|
||||
handler.flush()
|
||||
handler.close()
|
||||
# detailed format by deep log levels (as DEBUG=10):
|
||||
if logger.getEffectiveLevel() <= logging.DEBUG: # pragma: no cover
|
||||
if self.__verbose is None:
|
||||
self.__verbose = logging.DEBUG - logger.getEffectiveLevel() + 1
|
||||
# If handler don't already add date to the message:
|
||||
addtime = logOptions.get('datetime')
|
||||
if addtime is not None:
|
||||
addtime = _as_bool(addtime)
|
||||
else:
|
||||
addtime = systarget not in ("SYSLOG", "SYSOUT")
|
||||
if padding is not None:
|
||||
padding = _as_bool(padding)
|
||||
else:
|
||||
padding = True
|
||||
# If log-format is redefined in options:
|
||||
if logOptions.get('format', '') != '':
|
||||
fmt = logOptions.get('format')
|
||||
else:
|
||||
# verbose log-format:
|
||||
verbose = 0
|
||||
if self.__verbose is not None and self.__verbose > 2: # pragma: no cover
|
||||
verbose = self.__verbose-1
|
||||
fmt = getVerbosityFormat(verbose, addtime=addtime, padding=padding)
|
||||
# tell the handler to use this format
|
||||
if target != "SYSTEMD-JOURNAL":
|
||||
hdlr.setFormatter(logging.Formatter(fmt))
|
||||
logger.addHandler(hdlr)
|
||||
# Does not display this message at startup.
|
||||
if self.__logTarget is not None:
|
||||
logSys.info("Start Fail2ban v%s", version.version)
|
||||
logSys.info(
|
||||
"Changed logging target to %s for Fail2ban v%s"
|
||||
% ((target
|
||||
if target != "SYSLOG"
|
||||
else "%s (%s)"
|
||||
% (target, self.__syslogSocket)),
|
||||
version.version))
|
||||
# Sets the logging target.
|
||||
self.__logTarget = target
|
||||
return True
|
||||
|
||||
##
|
||||
# Sets the syslog socket.
|
||||
#
|
||||
# syslogsocket is the full path to the syslog socket
|
||||
# @param syslogsocket the syslog socket path
|
||||
def setSyslogSocket(self, syslogsocket):
|
||||
with self.__loggingLock:
|
||||
if self.__syslogSocket == syslogsocket:
|
||||
return True
|
||||
self.__syslogSocket = syslogsocket
|
||||
# Conditionally reload, logtarget depends on socket path when SYSLOG
|
||||
return self.__logTarget != "SYSLOG"\
|
||||
or self.setLogTarget(self.__logTarget)
|
||||
|
||||
def getLogTarget(self):
|
||||
with self.__loggingLock:
|
||||
return self.__logTarget
|
||||
|
||||
def getSyslogSocket(self):
|
||||
with self.__loggingLock:
|
||||
return self.__syslogSocket
|
||||
|
||||
def flushLogs(self):
|
||||
if self.__logTarget not in ['STDERR', 'STDOUT', 'SYSLOG', 'SYSTEMD-JOURNAL']:
|
||||
for handler in getLogger("fail2ban").handlers:
|
||||
try:
|
||||
handler.doRollover()
|
||||
logSys.info("rollover performed on %s" % self.__logTarget)
|
||||
except AttributeError:
|
||||
handler.flush()
|
||||
logSys.info("flush performed on %s" % self.__logTarget)
|
||||
return "rolled over"
|
||||
else:
|
||||
for handler in getLogger("fail2ban").handlers:
|
||||
handler.flush()
|
||||
logSys.info("flush performed on %s" % self.__logTarget)
|
||||
return "flushed"
|
||||
|
||||
@staticmethod
|
||||
def setIPv6IsAllowed(value):
|
||||
value = _as_bool(value) if value != 'auto' else None
|
||||
return DNSUtils.setIPv6IsAllowed(value)
|
||||
|
||||
def setThreadOptions(self, value):
|
||||
for o, v in value.items():
|
||||
if o == 'stacksize':
|
||||
threading.stack_size(int(v)*1024)
|
||||
else: # pragma: no cover
|
||||
raise KeyError("unknown option %r" % o)
|
||||
|
||||
def getThreadOptions(self):
|
||||
return {'stacksize': threading.stack_size() // 1024}
|
||||
|
||||
def setDatabase(self, filename):
|
||||
# if not changed - nothing to do
|
||||
if self.__db and self.__db.filename == filename:
|
||||
return
|
||||
if not self.__db and filename.lower() == 'none':
|
||||
return
|
||||
if len(self.__jails) != 0:
|
||||
raise RuntimeError(
|
||||
"Cannot change database when there are jails present")
|
||||
if filename.lower() == "none":
|
||||
self.__db = None
|
||||
else:
|
||||
if Fail2BanDb is not None:
|
||||
_make_file_path(filename)
|
||||
self.__db = Fail2BanDb(filename)
|
||||
self.__db.delAllJails()
|
||||
else: # pragma: no cover
|
||||
logSys.error(
|
||||
"Unable to import fail2ban database module as sqlite "
|
||||
"is not available.")
|
||||
if Observers.Main is not None:
|
||||
Observers.Main.db_set(self.__db)
|
||||
|
||||
def getDatabase(self):
|
||||
return self.__db
|
||||
|
||||
@staticmethod
|
||||
def __get_fdlist():
|
||||
"""Generate a list of open file descriptors.
|
||||
|
||||
This wouldn't work on some platforms, or if proc/fdescfs not mounted, or a chroot environment,
|
||||
then it'd raise a FileExistsError.
|
||||
"""
|
||||
for path in (
|
||||
'/proc/self/fd', # Linux, Cygwin and NetBSD
|
||||
'/proc/fd', # MacOS and FreeBSD
|
||||
):
|
||||
if os.path.exists(path):
|
||||
def fdlist():
|
||||
for name in os.listdir(path):
|
||||
if name.isdigit():
|
||||
yield int(name)
|
||||
return fdlist()
|
||||
# other platform or unmounted, chroot etc:
|
||||
raise FileExistsError("fd-list not found")
|
||||
|
||||
def __createDaemon(self): # pragma: no cover
|
||||
""" Detach a process from the controlling terminal and run it in the
|
||||
background as a daemon.
|
||||
|
||||
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/278731
|
||||
"""
|
||||
|
||||
# When the first child terminates, all processes in the second child
|
||||
# are sent a SIGHUP, so it's ignored.
|
||||
|
||||
# We need to set this in the parent process, so it gets inherited by the
|
||||
# child process, and this makes sure that it is effect even if the parent
|
||||
# terminates quickly.
|
||||
self._rebindSignal(signal.SIGHUP, signal.SIG_IGN)
|
||||
|
||||
try:
|
||||
# Fork a child process so the parent can exit. This will return control
|
||||
# to the command line or shell. This is required so that the new process
|
||||
# is guaranteed not to be a process group leader. We have this guarantee
|
||||
# because the process GID of the parent is inherited by the child, but
|
||||
# the child gets a new PID, making it impossible for its PID to equal its
|
||||
# PGID.
|
||||
pid = os.fork()
|
||||
except OSError as e:
|
||||
return (False, (e.errno, e.strerror)) # ERROR (return a tuple)
|
||||
|
||||
if pid == 0: # The first child.
|
||||
|
||||
# Next we call os.setsid() to become the session leader of this new
|
||||
# session. The process also becomes the process group leader of the
|
||||
# new process group. Since a controlling terminal is associated with a
|
||||
# session, and this new session has not yet acquired a controlling
|
||||
# terminal our process now has no controlling terminal. This shouldn't
|
||||
# fail, since we're guaranteed that the child is not a process group
|
||||
# leader.
|
||||
os.setsid()
|
||||
|
||||
try:
|
||||
# Fork a second child to prevent zombies. Since the first child is
|
||||
# a session leader without a controlling terminal, it's possible for
|
||||
# it to acquire one by opening a terminal in the future. This second
|
||||
# fork guarantees that the child is no longer a session leader, thus
|
||||
# preventing the daemon from ever acquiring a controlling terminal.
|
||||
pid = os.fork() # Fork a second child.
|
||||
except OSError as e:
|
||||
return (False, (e.errno, e.strerror)) # ERROR (return a tuple)
|
||||
|
||||
if (pid == 0): # The second child.
|
||||
# Ensure that the daemon doesn't keep any directory in use. Failure
|
||||
# to do this could make a filesystem unmountable.
|
||||
os.chdir("/")
|
||||
else:
|
||||
os._exit(0) # Exit parent (the first child) of the second child.
|
||||
else:
|
||||
# Signal to exit, parent of the first child.
|
||||
return None
|
||||
|
||||
# Close all open files. Try to obtain the range of open descriptors directly.
|
||||
# As a fallback try the system configuration variable, SC_OPEN_MAX,
|
||||
# for the maximum number of open files to close. If it doesn't exist, use
|
||||
# the default value (configurable).
|
||||
try:
|
||||
fdlist = self.__get_fdlist()
|
||||
for fd in fdlist:
|
||||
try:
|
||||
os.close(fd)
|
||||
except OSError: # ERROR (ignore)
|
||||
pass
|
||||
except:
|
||||
try:
|
||||
maxfd = os.sysconf("SC_OPEN_MAX")
|
||||
except (AttributeError, ValueError):
|
||||
maxfd = 256 # default maximum
|
||||
os.closerange(0, maxfd)
|
||||
|
||||
# Redirect the standard file descriptors to /dev/null.
|
||||
os.open("/dev/null", os.O_RDONLY) # standard input (0)
|
||||
os.open("/dev/null", os.O_RDWR) # standard output (1)
|
||||
os.open("/dev/null", os.O_RDWR) # standard error (2)
|
||||
return (True,)
|
||||
|
||||
|
||||
class ServerInitializationError(Exception):
|
||||
pass
|
||||
392
fail2ban-master/fail2ban/server/strptime.py
Normal file
392
fail2ban-master/fail2ban/server/strptime.py
Normal file
@@ -0,0 +1,392 @@
|
||||
# emacs: -*- mode: python; coding: utf-8; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
import re
|
||||
import time
|
||||
import calendar
|
||||
import datetime
|
||||
from _strptime import LocaleTime, TimeRE, _calc_julian_from_U_or_W
|
||||
|
||||
from .mytime import MyTime
|
||||
|
||||
locale_time = LocaleTime()
|
||||
|
||||
TZ_ABBR_RE = r"[A-Z](?:[A-Z]{2,4})?"
|
||||
FIXED_OFFSET_TZ_RE = re.compile(r"(%s)?([+-][01]\d(?::?\d{2})?)?$" % (TZ_ABBR_RE,))
|
||||
|
||||
timeRE = TimeRE()
|
||||
|
||||
# %k - one- or two-digit number giving the hour of the day (0-23) on a 24-hour clock,
|
||||
# (corresponds %H, but allows space if not zero-padded).
|
||||
# %l - one- or two-digit number giving the hour of the day (12-11) on a 12-hour clock,
|
||||
# (corresponds %I, but allows space if not zero-padded).
|
||||
timeRE['k'] = r" ?(?P<H>[0-2]?\d)"
|
||||
timeRE['l'] = r" ?(?P<I>1?\d)"
|
||||
|
||||
# TODO: because python currently does not support mixing of case-sensitive with case-insensitive matching,
|
||||
# check how TZ (in uppercase) can be combined with %a/%b etc. (that are currently case-insensitive),
|
||||
# to avoid invalid date-time recognition in strings like '11-Aug-2013 03:36:11.372 error ...'
|
||||
# with wrong TZ "error", which is at least not backwards compatible.
|
||||
# Hence %z currently match literal Z|UTC|GMT only (and offset-based), and %Exz - all zone abbreviations.
|
||||
timeRE['Z'] = r"(?P<Z>Z|[A-Z]{3,5})"
|
||||
timeRE['z'] = r"(?P<z>Z|UTC|GMT|[+-][01]\d(?::?\d{2})?)"
|
||||
|
||||
# Note: this extended tokens supported zone abbreviations, but it can parse 1 or 3-5 char(s) in lowercase,
|
||||
# see todo above. Don't use them in default date-patterns (if not anchored, few precise resp. optional).
|
||||
timeRE['ExZ'] = r"(?P<Z>%s)" % (TZ_ABBR_RE,)
|
||||
timeRE['Exz'] = r"(?P<z>(?:%s)?[+-][01]\d(?::?\d{2})?|%s)" % (TZ_ABBR_RE, TZ_ABBR_RE)
|
||||
|
||||
# overwrite default patterns, since they can be non-optimal:
|
||||
timeRE['d'] = r"(?P<d>[1-2]\d|[0 ]?[1-9]|3[0-1])"
|
||||
timeRE['m'] = r"(?P<m>0?[1-9]|1[0-2])"
|
||||
timeRE['Y'] = r"(?P<Y>\d{4})"
|
||||
timeRE['H'] = r"(?P<H>[0-1]?\d|2[0-3])"
|
||||
timeRE['M'] = r"(?P<M>[0-5]?\d)"
|
||||
timeRE['S'] = r"(?P<S>[0-5]?\d|6[0-1])"
|
||||
|
||||
# Extend built-in TimeRE with some exact patterns
|
||||
# exact two-digit patterns:
|
||||
timeRE['Exd'] = r"(?P<d>[1-2]\d|0[1-9]|3[0-1])"
|
||||
timeRE['Exm'] = r"(?P<m>0[1-9]|1[0-2])"
|
||||
timeRE['ExH'] = r"(?P<H>[0-1]\d|2[0-3])"
|
||||
timeRE['Exk'] = r" ?(?P<H>[0-1]?\d|2[0-3])"
|
||||
timeRE['Exl'] = r" ?(?P<I>1[0-2]|\d)"
|
||||
timeRE['ExM'] = r"(?P<M>[0-5]\d)"
|
||||
timeRE['ExS'] = r"(?P<S>[0-5]\d|6[0-1])"
|
||||
|
||||
def _updateTimeRE():
|
||||
def _getYearCentRE(cent=(0,3), distance=3, now=(MyTime.now(), MyTime.alternateNow)):
|
||||
""" Build century regex for last year and the next years (distance).
|
||||
|
||||
Thereby respect possible run in the test-cases (alternate date used there)
|
||||
"""
|
||||
cent = lambda year, f=cent[0], t=cent[1]: str(year)[f:t]
|
||||
def grp(exprset):
|
||||
c = None
|
||||
if len(exprset) > 1:
|
||||
for i in exprset:
|
||||
if c is None or i[0:-1] == c:
|
||||
c = i[0:-1]
|
||||
else:
|
||||
c = None
|
||||
break
|
||||
if not c:
|
||||
for i in exprset:
|
||||
if c is None or i[0] == c:
|
||||
c = i[0]
|
||||
else:
|
||||
c = None
|
||||
break
|
||||
if c:
|
||||
return "%s%s" % (c, grp([i[len(c):] for i in exprset]))
|
||||
return ("(?:%s)" % "|".join(exprset) if len(exprset[0]) > 1 else "[%s]" % "".join(exprset)) \
|
||||
if len(exprset) > 1 else "".join(exprset)
|
||||
exprset = set( cent(now[0].year + i) for i in (-1, distance) )
|
||||
if len(now) > 1 and now[1]:
|
||||
exprset |= set( cent(now[1].year + i) for i in range(-1, now[0].year-now[1].year+1, distance) )
|
||||
return grp(sorted(list(exprset)))
|
||||
|
||||
# more precise year patterns, within same century of last year and
|
||||
# the next 3 years (for possible long uptime of fail2ban); thereby
|
||||
# consider possible run in the test-cases (alternate date used there),
|
||||
# so accept years: 20xx (from test-date or 2001 up to current century)
|
||||
timeRE['ExY'] = r"(?P<Y>%s\d)" % _getYearCentRE(cent=(0,3), distance=3,
|
||||
now=(datetime.datetime.now(), datetime.datetime.fromtimestamp(
|
||||
min(MyTime.alternateNowTime or 978393600, 978393600))
|
||||
)
|
||||
)
|
||||
timeRE['Exy'] = r"(?P<y>\d{2})"
|
||||
|
||||
_updateTimeRE()
|
||||
|
||||
def getTimePatternRE():
|
||||
keys = list(timeRE.keys())
|
||||
patt = (r"%%(%%|%s|[%s])" % (
|
||||
"|".join([k for k in keys if len(k) > 1]),
|
||||
"".join([k for k in keys if len(k) == 1]),
|
||||
))
|
||||
names = {
|
||||
'a': "DAY", 'A': "DAYNAME", 'b': "MON", 'B': "MONTH", 'd': "Day",
|
||||
'H': "24hour", 'I': "12hour", 'j': "Yearday", 'm': "Month",
|
||||
'M': "Minute", 'p': "AMPM", 'S': "Second", 'U': "Yearweek",
|
||||
'w': "Weekday", 'W': "Yearweek", 'y': 'Year2', 'Y': "Year", '%': "%",
|
||||
'z': "Zone offset", 'f': "Microseconds", 'Z': "Zone name",
|
||||
}
|
||||
for key in set(keys) - set(names): # may not have them all...
|
||||
if key.startswith('Ex'):
|
||||
kn = names.get(key[2:])
|
||||
if kn:
|
||||
names[key] = "Ex" + kn
|
||||
continue
|
||||
names[key] = "%%%s" % key
|
||||
return (patt, names)
|
||||
|
||||
|
||||
def validateTimeZone(tz):
|
||||
"""Validate a timezone and convert it to offset if it can (offset-based TZ).
|
||||
|
||||
For now this accepts the UTC[+-]hhmm format (UTC has aliases GMT/Z and optional).
|
||||
Additionally it accepts all zone abbreviations mentioned below in TZ_STR.
|
||||
Note that currently this zone abbreviations are offset-based and used fixed
|
||||
offset without automatically DST-switch (if CET used then no automatically CEST-switch).
|
||||
|
||||
In the future, it may be extended for named time zones (such as Europe/Paris)
|
||||
present on the system, if a suitable tz library is present (pytz).
|
||||
"""
|
||||
if tz is None:
|
||||
return None
|
||||
m = FIXED_OFFSET_TZ_RE.match(tz)
|
||||
if m is None:
|
||||
raise ValueError("Unknown or unsupported time zone: %r" % tz)
|
||||
tz = m.groups()
|
||||
return zone2offset(tz, 0)
|
||||
|
||||
def zone2offset(tz, dt):
|
||||
"""Return the proper offset, in minutes according to given timezone at a given time.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
tz: symbolic timezone or offset (for now only TZA?([+-]hh:?mm?)? is supported,
|
||||
as value are accepted:
|
||||
int offset;
|
||||
string in form like 'CET+0100' or 'UTC' or '-0400';
|
||||
tuple (or list) in form (zone name, zone offset);
|
||||
dt: datetime instance for offset computation (currently unused)
|
||||
"""
|
||||
if isinstance(tz, int):
|
||||
return tz
|
||||
if isinstance(tz, str):
|
||||
return validateTimeZone(tz)
|
||||
tz, tzo = tz
|
||||
if tzo is None or tzo == '': # without offset
|
||||
return TZ_ABBR_OFFS[tz]
|
||||
if len(tzo) <= 3: # short tzo (hh only)
|
||||
# [+-]hh --> [+-]hh*60
|
||||
return TZ_ABBR_OFFS[tz] + int(tzo)*60
|
||||
if tzo[3] != ':':
|
||||
# [+-]hhmm --> [+-]1 * (hh*60 + mm)
|
||||
return TZ_ABBR_OFFS[tz] + (-1 if tzo[0] == '-' else 1) * (int(tzo[1:3])*60 + int(tzo[3:5]))
|
||||
else:
|
||||
# [+-]hh:mm --> [+-]1 * (hh*60 + mm)
|
||||
return TZ_ABBR_OFFS[tz] + (-1 if tzo[0] == '-' else 1) * (int(tzo[1:3])*60 + int(tzo[4:6]))
|
||||
|
||||
def reGroupDictStrptime(found_dict, msec=False, default_tz=None):
|
||||
"""Return time from dictionary of strptime fields
|
||||
|
||||
This is tweaked from python built-in _strptime.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
found_dict : dict
|
||||
Dictionary where keys represent the strptime fields, and values the
|
||||
respective value.
|
||||
default_tz : default timezone to apply if nothing relevant is in found_dict
|
||||
(may be a non-fixed one in the future)
|
||||
Returns
|
||||
-------
|
||||
float
|
||||
Unix time stamp.
|
||||
"""
|
||||
|
||||
now = \
|
||||
year = month = day = tzoffset = \
|
||||
weekday = julian = week_of_year = None
|
||||
hour = minute = second = fraction = 0
|
||||
for key, val in found_dict.items():
|
||||
if val is None: continue
|
||||
# Directives not explicitly handled below:
|
||||
# c, x, X
|
||||
# handled by making out of other directives
|
||||
# U, W
|
||||
# worthless without day of the week
|
||||
if key == 'y':
|
||||
year = int(val)
|
||||
# Fail2ban year should be always in the current century (>= 2000)
|
||||
if year <= 2000:
|
||||
year += 2000
|
||||
elif key == 'Y':
|
||||
year = int(val)
|
||||
elif key == 'm':
|
||||
month = int(val)
|
||||
elif key == 'B':
|
||||
month = locale_time.f_month.index(val.lower())
|
||||
elif key == 'b':
|
||||
month = locale_time.a_month.index(val.lower())
|
||||
elif key == 'd':
|
||||
day = int(val)
|
||||
elif key == 'H':
|
||||
hour = int(val)
|
||||
elif key == 'I':
|
||||
hour = int(val)
|
||||
ampm = found_dict.get('p', '').lower()
|
||||
# If there was no AM/PM indicator, we'll treat this like AM
|
||||
if ampm in ('', locale_time.am_pm[0]):
|
||||
# We're in AM so the hour is correct unless we're
|
||||
# looking at 12 midnight.
|
||||
# 12 midnight == 12 AM == hour 0
|
||||
if hour == 12:
|
||||
hour = 0
|
||||
elif ampm == locale_time.am_pm[1]:
|
||||
# We're in PM so we need to add 12 to the hour unless
|
||||
# we're looking at 12 noon.
|
||||
# 12 noon == 12 PM == hour 12
|
||||
if hour != 12:
|
||||
hour += 12
|
||||
elif key == 'M':
|
||||
minute = int(val)
|
||||
elif key == 'S':
|
||||
second = int(val)
|
||||
elif key == 'f':
|
||||
if msec: # pragma: no cover - currently unused
|
||||
s = val
|
||||
# Pad to always return microseconds.
|
||||
s += "0" * (6 - len(s))
|
||||
fraction = int(s)
|
||||
elif key == 'A':
|
||||
weekday = locale_time.f_weekday.index(val.lower())
|
||||
elif key == 'a':
|
||||
weekday = locale_time.a_weekday.index(val.lower())
|
||||
elif key == 'w':
|
||||
weekday = int(val) - 1
|
||||
if weekday < 0: weekday = 6
|
||||
elif key == 'j':
|
||||
julian = int(val)
|
||||
elif key in ('U', 'W'):
|
||||
week_of_year = int(val)
|
||||
# U starts week on Sunday, W - on Monday
|
||||
week_of_year_start = 6 if key == 'U' else 0
|
||||
elif key in ('z', 'Z'):
|
||||
z = val
|
||||
if z in ("Z", "UTC", "GMT"):
|
||||
tzoffset = 0
|
||||
else:
|
||||
tzoffset = zone2offset(z, 0); # currently offset-based only
|
||||
|
||||
# Fail2Ban will assume it's this year
|
||||
assume_year = False
|
||||
if year is None:
|
||||
if not now: now = MyTime.now()
|
||||
year = now.year
|
||||
assume_year = True
|
||||
if month is None or day is None:
|
||||
# If we know the week of the year and what day of that week, we can figure
|
||||
# out the Julian day of the year.
|
||||
if julian is None and week_of_year is not None and weekday is not None:
|
||||
julian = _calc_julian_from_U_or_W(year, week_of_year, weekday,
|
||||
(week_of_year_start == 0))
|
||||
# Cannot pre-calculate datetime.datetime() since can change in Julian
|
||||
# calculation and thus could have different value for the day of the week
|
||||
# calculation.
|
||||
if julian is not None:
|
||||
datetime_result = datetime.datetime.fromordinal((julian - 1) + datetime.datetime(year, 1, 1).toordinal())
|
||||
year = datetime_result.year
|
||||
month = datetime_result.month
|
||||
day = datetime_result.day
|
||||
|
||||
# Fail2Ban assume today
|
||||
assume_today = False
|
||||
if month is None and day is None:
|
||||
if not now: now = MyTime.now()
|
||||
month = now.month
|
||||
day = now.day
|
||||
assume_today = True
|
||||
|
||||
# Actually create date
|
||||
date_result = datetime.datetime(
|
||||
year, month, day, hour, minute, second, fraction)
|
||||
# Correct timezone if not supplied in the log linge
|
||||
if tzoffset is None and default_tz is not None:
|
||||
tzoffset = zone2offset(default_tz, date_result)
|
||||
# Add timezone info
|
||||
if tzoffset is not None:
|
||||
date_result -= datetime.timedelta(seconds=tzoffset * 60)
|
||||
|
||||
if assume_today:
|
||||
if not now: now = MyTime.now()
|
||||
if date_result > now:
|
||||
# Rollover at midnight, could mean it's yesterday...
|
||||
date_result -= datetime.timedelta(days=1)
|
||||
if assume_year:
|
||||
if not now: now = MyTime.now()
|
||||
if date_result > now + datetime.timedelta(days=1): # ignore by timezone issues (+24h)
|
||||
# assume last year - also reset month and day as it's not yesterday...
|
||||
date_result = date_result.replace(
|
||||
year=year-1, month=month, day=day)
|
||||
|
||||
# make time:
|
||||
if tzoffset is not None:
|
||||
tm = calendar.timegm(date_result.utctimetuple())
|
||||
else:
|
||||
tm = time.mktime(date_result.timetuple())
|
||||
if msec: # pragma: no cover - currently unused
|
||||
tm += fraction/1000000.0
|
||||
return tm
|
||||
|
||||
|
||||
TZ_ABBR_OFFS = {'':0, None:0}
|
||||
TZ_STR = '''
|
||||
-12 Y
|
||||
-11 X NUT SST
|
||||
-10 W CKT HAST HST TAHT TKT
|
||||
-9 V AKST GAMT GIT HADT HNY
|
||||
-8 U AKDT CIST HAY HNP PST PT
|
||||
-7 T HAP HNR MST PDT
|
||||
-6 S CST EAST GALT HAR HNC MDT
|
||||
-5 R CDT COT EASST ECT EST ET HAC HNE PET
|
||||
-4 Q AST BOT CLT COST EDT FKT GYT HAE HNA PYT
|
||||
-3 P ADT ART BRT CLST FKST GFT HAA PMST PYST SRT UYT WGT
|
||||
-2 O BRST FNT PMDT UYST WGST
|
||||
-1 N AZOT CVT EGT
|
||||
0 Z EGST GMT UTC WET WT
|
||||
1 A CET DFT WAT WEDT WEST
|
||||
2 B CAT CEDT CEST EET SAST WAST
|
||||
3 C EAT EEDT EEST IDT MSK
|
||||
4 D AMT AZT GET GST KUYT MSD MUT RET SAMT SCT
|
||||
5 E AMST AQTT AZST HMT MAWT MVT PKT TFT TJT TMT UZT YEKT
|
||||
6 F ALMT BIOT BTT IOT KGT NOVT OMST YEKST
|
||||
7 G CXT DAVT HOVT ICT KRAT NOVST OMSST THA WIB
|
||||
8 H ACT AWST BDT BNT CAST HKT IRKT KRAST MYT PHT SGT ULAT WITA WST
|
||||
9 I AWDT IRKST JST KST PWT TLT WDT WIT YAKT
|
||||
10 K AEST ChST PGT VLAT YAKST YAPT
|
||||
11 L AEDT LHDT MAGT NCT PONT SBT VLAST VUT
|
||||
12 M ANAST ANAT FJT GILT MAGST MHT NZST PETST PETT TVT WFT
|
||||
13 FJST NZDT
|
||||
11.5 NFT
|
||||
10.5 ACDT LHST
|
||||
9.5 ACST
|
||||
6.5 CCT MMT
|
||||
5.75 NPT
|
||||
5.5 SLT
|
||||
4.5 AFT IRDT
|
||||
3.5 IRST
|
||||
-2.5 HAT NDT
|
||||
-3.5 HNT NST NT
|
||||
-4.5 HLV VET
|
||||
-9.5 MART MIT
|
||||
'''
|
||||
|
||||
def _init_TZ_ABBR():
|
||||
"""Initialized TZ_ABBR_OFFS dictionary (TZ -> offset in minutes)"""
|
||||
for tzline in map(str.split, TZ_STR.split('\n')):
|
||||
if not len(tzline): continue
|
||||
tzoffset = int(float(tzline[0]) * 60)
|
||||
for tz in tzline[1:]:
|
||||
TZ_ABBR_OFFS[tz] = tzoffset
|
||||
|
||||
_init_TZ_ABBR()
|
||||
293
fail2ban-master/fail2ban/server/ticket.py
Normal file
293
fail2ban-master/fail2ban/server/ticket.py
Normal file
@@ -0,0 +1,293 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Author: Cyril Jaquier
|
||||
#
|
||||
|
||||
__author__ = "Cyril Jaquier"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
|
||||
__license__ = "GPL"
|
||||
|
||||
from ..helpers import getLogger
|
||||
from .ipdns import IPAddr
|
||||
from .mytime import MyTime
|
||||
|
||||
# Gets the instance of the logger.
|
||||
logSys = getLogger(__name__)
|
||||
|
||||
|
||||
class Ticket(object):
|
||||
__slots__ = ('_id', '_flags', '_banCount', '_banTime', '_time', '_data', '_retry', '_lastReset')
|
||||
|
||||
MAX_TIME = 0X7FFFFFFFFFFF ;# 4461763-th year
|
||||
|
||||
RESTORED = 0x01
|
||||
BANNED = 0x08
|
||||
|
||||
def __init__(self, ip=None, time=None, matches=None, data={}, ticket=None):
|
||||
"""Ticket constructor
|
||||
|
||||
@param ip the IP address
|
||||
@param time the ban time
|
||||
@param matches (log) lines caused the ticket
|
||||
"""
|
||||
|
||||
self.setID(ip)
|
||||
self._flags = 0;
|
||||
self._banCount = 0;
|
||||
self._banTime = None;
|
||||
self._time = time if time is not None else MyTime.time()
|
||||
self._data = {'matches': matches or [], 'failures': 0}
|
||||
if data is not None:
|
||||
for k,v in data.items():
|
||||
if v is not None:
|
||||
self._data[k] = v
|
||||
if ticket:
|
||||
# ticket available - copy whole information from ticket:
|
||||
self.update(ticket)
|
||||
#self.__dict__.update(i for i in ticket.__dict__.iteritems() if i[0] in self.__dict__)
|
||||
|
||||
def __str__(self):
|
||||
return "%s: ip=%s time=%s bantime=%s bancount=%s #attempts=%d matches=%r" % \
|
||||
(self.__class__.__name__.split('.')[-1], self._id, self._time,
|
||||
self._banTime, self._banCount,
|
||||
self._data['failures'], self._data.get('matches', []))
|
||||
|
||||
def __repr__(self):
|
||||
return str(self)
|
||||
|
||||
def __eq__(self, other):
|
||||
try:
|
||||
return self._id == other._id and \
|
||||
round(self._time, 2) == round(other._time, 2) and \
|
||||
self._data == other._data
|
||||
except AttributeError:
|
||||
return False
|
||||
|
||||
def update(self, ticket):
|
||||
for n in ticket.__slots__:
|
||||
v = getattr(ticket, n, None)
|
||||
if v is not None:
|
||||
setattr(self, n, v)
|
||||
|
||||
def setID(self, value):
|
||||
# guarantee using IPAddr instead of unicode, str for the IP
|
||||
if isinstance(value, str):
|
||||
value = IPAddr(value)
|
||||
self._id = value
|
||||
|
||||
def getID(self):
|
||||
return self._id
|
||||
|
||||
def getIP(self):
|
||||
return self._data.get('ip', self._id)
|
||||
|
||||
def setTime(self, value):
|
||||
self._time = value
|
||||
|
||||
def getTime(self):
|
||||
return self._time
|
||||
|
||||
def setBanTime(self, value):
|
||||
self._banTime = value
|
||||
|
||||
def getBanTime(self, defaultBT=None):
|
||||
return (self._banTime if self._banTime is not None else defaultBT)
|
||||
|
||||
def setBanCount(self, value, always=False):
|
||||
if always or value > self._banCount:
|
||||
self._banCount = value
|
||||
|
||||
def incrBanCount(self, value=1):
|
||||
self._banCount += value
|
||||
|
||||
def getBanCount(self):
|
||||
return self._banCount;
|
||||
|
||||
def getEndOfBanTime(self, defaultBT=None):
|
||||
bantime = (self._banTime if self._banTime is not None else defaultBT)
|
||||
# permanent
|
||||
if bantime == -1:
|
||||
return Ticket.MAX_TIME
|
||||
# unban time (end of ban):
|
||||
return self._time + bantime
|
||||
|
||||
def isTimedOut(self, time, defaultBT=None):
|
||||
bantime = (self._banTime if self._banTime is not None else defaultBT)
|
||||
# permanent
|
||||
if bantime == -1:
|
||||
return False
|
||||
# timed out
|
||||
return (time > self._time + bantime)
|
||||
|
||||
def setAttempt(self, value):
|
||||
self._data['failures'] = value
|
||||
|
||||
def getAttempt(self):
|
||||
return self._data['failures']
|
||||
|
||||
def setMatches(self, matches):
|
||||
if matches:
|
||||
self._data['matches'] = matches
|
||||
else:
|
||||
try:
|
||||
del self._data['matches']
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
def getMatches(self):
|
||||
return [(line if not isinstance(line, (list, tuple)) else "".join(line)) \
|
||||
for line in self._data.get('matches', ())]
|
||||
|
||||
@property
|
||||
def restored(self):
|
||||
return self._flags & Ticket.RESTORED
|
||||
@restored.setter
|
||||
def restored(self, value):
|
||||
if value:
|
||||
self._flags |= Ticket.RESTORED
|
||||
else:
|
||||
self._flags &= ~(Ticket.RESTORED)
|
||||
|
||||
@property
|
||||
def banned(self):
|
||||
return self._flags & Ticket.BANNED
|
||||
@banned.setter
|
||||
def banned(self, value):
|
||||
if value:
|
||||
self._flags |= Ticket.BANNED
|
||||
else:
|
||||
self._flags &= ~(Ticket.BANNED)
|
||||
|
||||
def setData(self, *args, **argv):
|
||||
# if overwrite - set data and filter None values:
|
||||
if len(args) == 1:
|
||||
# todo: if support >= 2.7 only:
|
||||
# self._data = {k:v for k,v in args[0].iteritems() if v is not None}
|
||||
self._data = dict([(k,v) for k,v in args[0].items() if v is not None])
|
||||
# add k,v list or dict (merge):
|
||||
elif len(args) == 2:
|
||||
self._data.update((args,))
|
||||
elif len(args) > 2:
|
||||
self._data.update((k,v) for k,v in zip(*[iter(args)]*2))
|
||||
if len(argv):
|
||||
self._data.update(argv)
|
||||
# filter (delete) None values:
|
||||
# todo: if support >= 2.7 only:
|
||||
# self._data = {k:v for k,v in self._data.iteritems() if v is not None}
|
||||
self._data = dict([(k,v) for k,v in self._data.items() if v is not None])
|
||||
|
||||
def getData(self, key=None, default=None):
|
||||
# return whole data dict:
|
||||
if key is None:
|
||||
return self._data
|
||||
# return default if not exists:
|
||||
if not self._data:
|
||||
return default
|
||||
if not isinstance(key,(str,type(None),int,float,bool,complex)):
|
||||
# return filtered by lambda/function:
|
||||
if callable(key):
|
||||
# todo: if support >= 2.7 only:
|
||||
# return {k:v for k,v in self._data.iteritems() if key(k)}
|
||||
return dict([(k,v) for k,v in self._data.items() if key(k)])
|
||||
# return filtered by keys:
|
||||
if hasattr(key, '__iter__'):
|
||||
# todo: if support >= 2.7 only:
|
||||
# return {k:v for k,v in self._data.iteritems() if k in key}
|
||||
return dict([(k,v) for k,v in self._data.items() if k in key])
|
||||
# return single value of data:
|
||||
return self._data.get(key, default)
|
||||
|
||||
@property
|
||||
def banEpoch(self):
|
||||
return getattr(self, '_banEpoch', 0)
|
||||
@banEpoch.setter
|
||||
def banEpoch(self, value):
|
||||
self._banEpoch = value
|
||||
|
||||
|
||||
class FailTicket(Ticket):
|
||||
|
||||
def __init__(self, ip=None, time=None, matches=None, data={}, ticket=None):
|
||||
# this class variables:
|
||||
self._firstTime = None
|
||||
self._retry = 1
|
||||
# create/copy using default ticket constructor:
|
||||
Ticket.__init__(self, ip, time, matches, data, ticket)
|
||||
# init:
|
||||
if not isinstance(ticket, FailTicket):
|
||||
self._firstTime = time if time is not None else self.getTime()
|
||||
self._retry = self._data.get('failures', 1)
|
||||
|
||||
def setRetry(self, value):
|
||||
""" Set artificial retry count, normally equal failures / attempt,
|
||||
used in incremental features (BanTimeIncr) to increase retry count for bad IPs
|
||||
"""
|
||||
self._retry = value
|
||||
if not self._data['failures']:
|
||||
self._data['failures'] = 1
|
||||
if not value:
|
||||
self._data['failures'] = 0
|
||||
self._data['matches'] = []
|
||||
|
||||
def getRetry(self):
|
||||
""" Returns failures / attempt count or
|
||||
artificial retry count increased for bad IPs
|
||||
"""
|
||||
return self._retry
|
||||
|
||||
def adjustTime(self, time, maxTime):
|
||||
""" Adjust time of ticket and current attempts count considering given maxTime
|
||||
as estimation from rate by previous known interval (if it exceeds the findTime)
|
||||
"""
|
||||
if time > self._time:
|
||||
# expand current interval and attempts count (considering maxTime):
|
||||
if self._firstTime < time - maxTime:
|
||||
# adjust retry calculated as estimation from rate by previous known interval:
|
||||
self._retry = int(round(self._retry / float(time - self._firstTime) * maxTime))
|
||||
self._firstTime = time - maxTime
|
||||
# last time of failure:
|
||||
self._time = time
|
||||
|
||||
def inc(self, matches=None, attempt=1, count=1):
|
||||
self._retry += count
|
||||
self._data['failures'] += attempt
|
||||
if matches:
|
||||
# we should duplicate "matches", because possibly referenced to multiple tickets:
|
||||
if self._data['matches']:
|
||||
self._data['matches'] = self._data['matches'] + matches
|
||||
else:
|
||||
self._data['matches'] = matches
|
||||
|
||||
@staticmethod
|
||||
def wrap(o):
|
||||
o.__class__ = FailTicket
|
||||
return o
|
||||
|
||||
##
|
||||
# Ban Ticket.
|
||||
#
|
||||
# This class extends the Ticket class. It is mainly used by the BanManager.
|
||||
|
||||
class BanTicket(FailTicket):
|
||||
|
||||
@staticmethod
|
||||
def wrap(o):
|
||||
o.__class__ = BanTicket
|
||||
return o
|
||||
522
fail2ban-master/fail2ban/server/transmitter.py
Normal file
522
fail2ban-master/fail2ban/server/transmitter.py
Normal file
@@ -0,0 +1,522 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
# Author: Cyril Jaquier
|
||||
#
|
||||
|
||||
__author__ = "Cyril Jaquier"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
|
||||
__license__ = "GPL"
|
||||
|
||||
import time
|
||||
import json
|
||||
|
||||
from ..helpers import getLogger, logging
|
||||
from .. import version
|
||||
|
||||
# Gets the instance of the logger.
|
||||
logSys = getLogger(__name__)
|
||||
|
||||
|
||||
class Transmitter:
|
||||
|
||||
##
|
||||
# Constructor.
|
||||
#
|
||||
# @param The server reference
|
||||
|
||||
def __init__(self, server):
|
||||
self.__server = server
|
||||
self.__quiet = 0
|
||||
|
||||
##
|
||||
# Proceeds a command.
|
||||
#
|
||||
# Proceeds an incoming command.
|
||||
# @param command The incoming command
|
||||
|
||||
def proceed(self, command):
|
||||
# Deserialize object
|
||||
logSys.log(5, "Command: %r", command)
|
||||
try:
|
||||
ret = self.__commandHandler(command)
|
||||
ack = 0, ret
|
||||
except Exception as e:
|
||||
logSys.error("Command %r has failed. Received %r",
|
||||
command, e,
|
||||
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
|
||||
ack = 1, e
|
||||
return ack
|
||||
|
||||
##
|
||||
# Handle an command.
|
||||
#
|
||||
#
|
||||
|
||||
def __commandHandler(self, command):
|
||||
name = command[0]
|
||||
if name == "ping":
|
||||
return "pong"
|
||||
elif name == "add":
|
||||
name = command[1]
|
||||
if name == "--all":
|
||||
raise Exception("Reserved name %r" % (name,))
|
||||
try:
|
||||
backend = command[2]
|
||||
except IndexError:
|
||||
backend = "auto"
|
||||
self.__server.addJail(name, backend)
|
||||
return name
|
||||
elif name == "multi-set":
|
||||
return self.__commandSet(command[1:], True)
|
||||
elif name == "set":
|
||||
return self.__commandSet(command[1:])
|
||||
elif name == "start":
|
||||
name = command[1]
|
||||
self.__server.startJail(name)
|
||||
return None
|
||||
elif name == "stop":
|
||||
if len(command) == 1:
|
||||
self.__server.quit()
|
||||
elif command[1] == "--all":
|
||||
self.__server.stopAllJail()
|
||||
else:
|
||||
name = command[1]
|
||||
self.__server.stopJail(name)
|
||||
return None
|
||||
elif name == "reload":
|
||||
opts = command[1:3]
|
||||
self.__quiet = 1
|
||||
try:
|
||||
self.__server.reloadJails(*opts, begin=True)
|
||||
for cmd in command[3]:
|
||||
self.__commandHandler(cmd)
|
||||
finally:
|
||||
self.__quiet = 0
|
||||
self.__server.reloadJails(*opts, begin=False)
|
||||
return 'OK'
|
||||
elif name == "unban" and len(command) >= 2:
|
||||
# unban in all jails:
|
||||
value = command[1:]
|
||||
# if all ips:
|
||||
if len(value) == 1 and value[0] == "--all":
|
||||
return self.__server.setUnbanIP()
|
||||
return self.__server.setUnbanIP(None, value)
|
||||
elif name == "banned":
|
||||
# check IP is banned in all jails:
|
||||
return self.__server.banned(None, command[1:])
|
||||
elif name == "echo":
|
||||
return command[1:]
|
||||
elif name == "server-status":
|
||||
logSys.debug("Status: ready")
|
||||
return "Server ready"
|
||||
elif name == "server-stream":
|
||||
self.__quiet = 1
|
||||
try:
|
||||
for cmd in command[1]:
|
||||
self.__commandHandler(cmd)
|
||||
finally:
|
||||
self.__quiet = 0
|
||||
return None
|
||||
elif name == "sleep":
|
||||
value = command[1]
|
||||
time.sleep(float(value))
|
||||
return None
|
||||
elif name == "flushlogs":
|
||||
return self.__server.flushLogs()
|
||||
elif name == "get":
|
||||
return self.__commandGet(command[1:])
|
||||
elif name == "status":
|
||||
return self.status(command[1:])
|
||||
elif name in ("stats", "statistic", "statistics"):
|
||||
return self.__server.status("--all", "stats")
|
||||
elif name == "version":
|
||||
return version.version
|
||||
elif name == "config-error":
|
||||
logSys.error(command[1])
|
||||
return None
|
||||
raise Exception("Invalid command")
|
||||
|
||||
def __commandSet(self, command, multiple=False):
|
||||
name = command[0]
|
||||
# Logging
|
||||
if name == "loglevel":
|
||||
value = command[1]
|
||||
self.__server.setLogLevel(value)
|
||||
if self.__quiet: return
|
||||
return self.__server.getLogLevel()
|
||||
elif name == "logtarget":
|
||||
value = command[1]
|
||||
if self.__server.setLogTarget(value):
|
||||
if self.__quiet: return
|
||||
return self.__server.getLogTarget()
|
||||
else:
|
||||
raise Exception("Failed to change log target")
|
||||
elif name == "syslogsocket":
|
||||
value = command[1]
|
||||
if self.__server.setSyslogSocket(value):
|
||||
if self.__quiet: return
|
||||
return self.__server.getSyslogSocket()
|
||||
else:
|
||||
raise Exception("Failed to change syslog socket")
|
||||
elif name == "allowipv6":
|
||||
value = command[1]
|
||||
self.__server.setIPv6IsAllowed(value)
|
||||
if self.__quiet: return
|
||||
return value
|
||||
#Thread
|
||||
elif name == "thread":
|
||||
value = command[1]
|
||||
return self.__server.setThreadOptions(value)
|
||||
#Database
|
||||
elif name == "dbfile":
|
||||
self.__server.setDatabase(command[1])
|
||||
db = self.__server.getDatabase()
|
||||
if db is None:
|
||||
return None
|
||||
else:
|
||||
if self.__quiet: return
|
||||
return db.filename
|
||||
elif name == "dbmaxmatches":
|
||||
db = self.__server.getDatabase()
|
||||
if db is None:
|
||||
logSys.log(logging.MSG, "dbmaxmatches setting was not in effect since no db yet")
|
||||
return None
|
||||
else:
|
||||
db.maxMatches = int(command[1])
|
||||
if self.__quiet: return
|
||||
return db.maxMatches
|
||||
elif name == "dbpurgeage":
|
||||
db = self.__server.getDatabase()
|
||||
if db is None:
|
||||
logSys.log(logging.MSG, "dbpurgeage setting was not in effect since no db yet")
|
||||
return None
|
||||
else:
|
||||
db.purgeage = command[1]
|
||||
if self.__quiet: return
|
||||
return db.purgeage
|
||||
# Jail
|
||||
elif command[1] == "idle":
|
||||
if command[2] == "on":
|
||||
self.__server.setIdleJail(name, True)
|
||||
elif command[2] == "off":
|
||||
self.__server.setIdleJail(name, False)
|
||||
else:
|
||||
raise Exception("Invalid idle option, must be 'on' or 'off'")
|
||||
if self.__quiet: return
|
||||
return self.__server.getIdleJail(name)
|
||||
# Filter
|
||||
elif command[1] == "ignoreself":
|
||||
value = command[2]
|
||||
self.__server.setIgnoreSelf(name, value)
|
||||
if self.__quiet: return
|
||||
return self.__server.getIgnoreSelf(name)
|
||||
elif command[1] == "addignoreip":
|
||||
for value in command[2:]:
|
||||
self.__server.addIgnoreIP(name, value)
|
||||
if self.__quiet: return
|
||||
return self.__server.getIgnoreIP(name)
|
||||
elif command[1] == "delignoreip":
|
||||
value = command[2]
|
||||
self.__server.delIgnoreIP(name, value)
|
||||
if self.__quiet: return
|
||||
return self.__server.getIgnoreIP(name)
|
||||
elif command[1] == "ignorecommand":
|
||||
value = command[2]
|
||||
self.__server.setIgnoreCommand(name, value)
|
||||
if self.__quiet: return
|
||||
return self.__server.getIgnoreCommand(name)
|
||||
elif command[1] == "ignorecache":
|
||||
value = command[2]
|
||||
self.__server.setIgnoreCache(name, value)
|
||||
if self.__quiet: return
|
||||
return self.__server.getIgnoreCache(name)
|
||||
elif command[1] == "addlogpath":
|
||||
value = command[2]
|
||||
tail = False
|
||||
if len(command) == 4:
|
||||
if command[3].lower() == "tail":
|
||||
tail = True
|
||||
elif command[3].lower() != "head":
|
||||
raise ValueError("File option must be 'head' or 'tail'")
|
||||
elif len(command) > 4:
|
||||
raise ValueError("Only one file can be added at a time")
|
||||
self.__server.addLogPath(name, value, tail)
|
||||
if self.__quiet: return
|
||||
return self.__server.getLogPath(name)
|
||||
elif command[1] == "dellogpath":
|
||||
value = command[2]
|
||||
self.__server.delLogPath(name, value)
|
||||
if self.__quiet: return
|
||||
return self.__server.getLogPath(name)
|
||||
elif command[1] == "logencoding":
|
||||
value = command[2]
|
||||
self.__server.setLogEncoding(name, value)
|
||||
if self.__quiet: return
|
||||
return self.__server.getLogEncoding(name)
|
||||
elif command[1] == "addjournalmatch": # pragma: systemd no cover
|
||||
value = command[2:]
|
||||
self.__server.addJournalMatch(name, value)
|
||||
if self.__quiet: return
|
||||
return self.__server.getJournalMatch(name)
|
||||
elif command[1] == "deljournalmatch": # pragma: systemd no cover
|
||||
value = command[2:]
|
||||
self.__server.delJournalMatch(name, value)
|
||||
if self.__quiet: return
|
||||
return self.__server.getJournalMatch(name)
|
||||
elif command[1] == "prefregex":
|
||||
value = command[2]
|
||||
self.__server.setPrefRegex(name, value)
|
||||
if self.__quiet: return
|
||||
v = self.__server.getPrefRegex(name)
|
||||
return v.getRegex() if v else ""
|
||||
elif command[1] == "addfailregex":
|
||||
value = command[2]
|
||||
self.__server.addFailRegex(name, value, multiple=multiple)
|
||||
if multiple:
|
||||
return True
|
||||
if self.__quiet: return
|
||||
return self.__server.getFailRegex(name)
|
||||
elif command[1] == "delfailregex":
|
||||
value = int(command[2])
|
||||
self.__server.delFailRegex(name, value)
|
||||
if self.__quiet: return
|
||||
return self.__server.getFailRegex(name)
|
||||
elif command[1] == "addignoreregex":
|
||||
value = command[2]
|
||||
self.__server.addIgnoreRegex(name, value, multiple=multiple)
|
||||
if multiple:
|
||||
return True
|
||||
if self.__quiet: return
|
||||
return self.__server.getIgnoreRegex(name)
|
||||
elif command[1] == "delignoreregex":
|
||||
value = int(command[2])
|
||||
self.__server.delIgnoreRegex(name, value)
|
||||
if self.__quiet: return
|
||||
return self.__server.getIgnoreRegex(name)
|
||||
elif command[1] == "usedns":
|
||||
value = command[2]
|
||||
self.__server.setUseDns(name, value)
|
||||
if self.__quiet: return
|
||||
return self.__server.getUseDns(name)
|
||||
elif command[1] == "findtime":
|
||||
value = command[2]
|
||||
self.__server.setFindTime(name, value)
|
||||
if self.__quiet: return
|
||||
return self.__server.getFindTime(name)
|
||||
elif command[1] == "datepattern":
|
||||
value = command[2]
|
||||
self.__server.setDatePattern(name, value)
|
||||
if self.__quiet: return
|
||||
return self.__server.getDatePattern(name)
|
||||
elif command[1] == "logtimezone":
|
||||
value = command[2]
|
||||
self.__server.setLogTimeZone(name, value)
|
||||
if self.__quiet: return
|
||||
return self.__server.getLogTimeZone(name)
|
||||
elif command[1] == "maxmatches":
|
||||
value = command[2]
|
||||
self.__server.setMaxMatches(name, int(value))
|
||||
if self.__quiet: return
|
||||
return self.__server.getMaxMatches(name)
|
||||
elif command[1] == "maxretry":
|
||||
value = command[2]
|
||||
self.__server.setMaxRetry(name, int(value))
|
||||
if self.__quiet: return
|
||||
return self.__server.getMaxRetry(name)
|
||||
elif command[1] == "maxlines":
|
||||
value = command[2]
|
||||
self.__server.setMaxLines(name, int(value))
|
||||
if self.__quiet: return
|
||||
return self.__server.getMaxLines(name)
|
||||
# command
|
||||
elif command[1] == "bantime":
|
||||
value = command[2]
|
||||
self.__server.setBanTime(name, value)
|
||||
if self.__quiet: return
|
||||
return self.__server.getBanTime(name)
|
||||
elif command[1] == "attempt":
|
||||
value = command[2:]
|
||||
return self.__server.addAttemptIP(name, *value)
|
||||
elif command[1].startswith("bantime."):
|
||||
value = command[2]
|
||||
opt = command[1][len("bantime."):]
|
||||
self.__server.setBanTimeExtra(name, opt, value)
|
||||
if self.__quiet: return
|
||||
return self.__server.getBanTimeExtra(name, opt)
|
||||
elif command[1] == "banip":
|
||||
value = command[2:]
|
||||
return self.__server.setBanIP(name,value)
|
||||
elif command[1] == "unbanip":
|
||||
ifexists = True
|
||||
if command[2] != "--report-absent":
|
||||
value = command[2:]
|
||||
else:
|
||||
ifexists = False
|
||||
value = command[3:]
|
||||
return self.__server.setUnbanIP(name, value, ifexists=ifexists)
|
||||
elif command[1] == "addaction":
|
||||
args = [command[2]]
|
||||
if len(command) > 3:
|
||||
args.extend([command[3], json.loads(command[4])])
|
||||
self.__server.addAction(name, *args)
|
||||
if self.__quiet: return
|
||||
return args[0]
|
||||
elif command[1] == "delaction":
|
||||
value = command[2]
|
||||
self.__server.delAction(name, value)
|
||||
return None
|
||||
elif command[1] == "action":
|
||||
actionname = command[2]
|
||||
action = self.__server.getAction(name, actionname)
|
||||
if multiple:
|
||||
for cmd in command[3]:
|
||||
logSys.log(5, " %r", cmd)
|
||||
actionkey = cmd[0]
|
||||
if callable(getattr(action, actionkey, None)):
|
||||
actionvalue = json.loads(cmd[1]) if len(cmd)>1 else {}
|
||||
getattr(action, actionkey)(**actionvalue)
|
||||
else:
|
||||
actionvalue = cmd[1]
|
||||
setattr(action, actionkey, actionvalue)
|
||||
return True
|
||||
else:
|
||||
actionkey = command[3]
|
||||
if callable(getattr(action, actionkey, None)):
|
||||
actionvalue = json.loads(command[4]) if len(command)>4 else {}
|
||||
if self.__quiet: return
|
||||
return getattr(action, actionkey)(**actionvalue)
|
||||
else:
|
||||
actionvalue = command[4]
|
||||
setattr(action, actionkey, actionvalue)
|
||||
if self.__quiet: return
|
||||
return getattr(action, actionkey)
|
||||
raise Exception("Invalid command %r (no set action or not yet implemented)" % (command[1],))
|
||||
|
||||
def __commandGet(self, command):
|
||||
name = command[0]
|
||||
# Logging
|
||||
if name == "loglevel":
|
||||
return self.__server.getLogLevel()
|
||||
elif name == "logtarget":
|
||||
return self.__server.getLogTarget()
|
||||
elif name == "syslogsocket":
|
||||
return self.__server.getSyslogSocket()
|
||||
#Thread
|
||||
elif name == "thread":
|
||||
return self.__server.getThreadOptions()
|
||||
#Database
|
||||
elif name == "dbfile":
|
||||
db = self.__server.getDatabase()
|
||||
if db is None:
|
||||
return None
|
||||
else:
|
||||
return db.filename
|
||||
elif name == "dbmaxmatches":
|
||||
db = self.__server.getDatabase()
|
||||
if db is None:
|
||||
return None
|
||||
else:
|
||||
return db.maxMatches
|
||||
elif name == "dbpurgeage":
|
||||
db = self.__server.getDatabase()
|
||||
if db is None:
|
||||
return None
|
||||
else:
|
||||
return db.purgeage
|
||||
# Jail, Filter
|
||||
elif command[1] == "banned":
|
||||
# check IP is banned in all jails:
|
||||
return self.__server.banned(name, command[2:])
|
||||
elif command[1] == "logpath":
|
||||
return self.__server.getLogPath(name)
|
||||
elif command[1] == "logencoding":
|
||||
return self.__server.getLogEncoding(name)
|
||||
elif command[1] == "journalmatch": # pragma: systemd no cover
|
||||
return self.__server.getJournalMatch(name)
|
||||
elif command[1] == "ignoreself":
|
||||
return self.__server.getIgnoreSelf(name)
|
||||
elif command[1] == "ignoreip":
|
||||
return self.__server.getIgnoreIP(name)
|
||||
elif command[1] == "ignorecommand":
|
||||
return self.__server.getIgnoreCommand(name)
|
||||
elif command[1] == "ignorecache":
|
||||
return self.__server.getIgnoreCache(name)
|
||||
elif command[1] == "prefregex":
|
||||
v = self.__server.getPrefRegex(name)
|
||||
return v.getRegex() if v else ""
|
||||
elif command[1] == "failregex":
|
||||
return self.__server.getFailRegex(name)
|
||||
elif command[1] == "ignoreregex":
|
||||
return self.__server.getIgnoreRegex(name)
|
||||
elif command[1] == "usedns":
|
||||
return self.__server.getUseDns(name)
|
||||
elif command[1] == "findtime":
|
||||
return self.__server.getFindTime(name)
|
||||
elif command[1] == "datepattern":
|
||||
return self.__server.getDatePattern(name)
|
||||
elif command[1] == "logtimezone":
|
||||
return self.__server.getLogTimeZone(name)
|
||||
elif command[1] == "maxmatches":
|
||||
return self.__server.getMaxMatches(name)
|
||||
elif command[1] == "maxretry":
|
||||
return self.__server.getMaxRetry(name)
|
||||
elif command[1] == "maxlines":
|
||||
return self.__server.getMaxLines(name)
|
||||
# Action
|
||||
elif command[1] == "bantime":
|
||||
return self.__server.getBanTime(name)
|
||||
elif command[1] == "banip":
|
||||
return self.__server.getBanList(name,
|
||||
withTime=len(command) > 2 and command[2] == "--with-time")
|
||||
elif command[1].startswith("bantime."):
|
||||
opt = command[1][len("bantime."):]
|
||||
return self.__server.getBanTimeExtra(name, opt)
|
||||
elif command[1] == "actions":
|
||||
return list(self.__server.getActions(name).keys())
|
||||
elif command[1] == "action":
|
||||
actionname = command[2]
|
||||
actionvalue = command[3]
|
||||
action = self.__server.getAction(name, actionname)
|
||||
return getattr(action, actionvalue)
|
||||
elif command[1] == "actionproperties":
|
||||
actionname = command[2]
|
||||
action = self.__server.getAction(name, actionname)
|
||||
return [
|
||||
key for key in dir(action)
|
||||
if not key.startswith("_") and
|
||||
not callable(getattr(action, key))]
|
||||
elif command[1] == "actionmethods":
|
||||
actionname = command[2]
|
||||
action = self.__server.getAction(name, actionname)
|
||||
return [
|
||||
key for key in dir(action)
|
||||
if not key.startswith("_") and callable(getattr(action, key))]
|
||||
raise Exception("Invalid command (no get action or not yet implemented)")
|
||||
|
||||
def status(self, command):
|
||||
if len(command) == 0:
|
||||
return self.__server.status()
|
||||
elif len(command) >= 1 and len(command) <= 2:
|
||||
name = command[0]
|
||||
flavor = command[1] if len(command) == 2 else "basic"
|
||||
if name == "--all":
|
||||
return self.__server.status("--all", flavor)
|
||||
return self.__server.statusJail(name, flavor=flavor)
|
||||
raise Exception("Invalid command (no status)")
|
||||
359
fail2ban-master/fail2ban/server/utils.py
Normal file
359
fail2ban-master/fail2ban/server/utils.py
Normal file
@@ -0,0 +1,359 @@
|
||||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
|
||||
# vi: set ft=python sts=4 ts=4 sw=4 noet :
|
||||
|
||||
# This file is part of Fail2Ban.
|
||||
#
|
||||
# Fail2Ban is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Fail2Ban is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Fail2Ban; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
|
||||
__author__ = "Serg G. Brester (sebres) and Fail2Ban Contributors"
|
||||
__copyright__ = "Copyright (c) 2004 Cyril Jaquier, 2011-2012 Yaroslav Halchenko, 2012-2015 Serg G. Brester"
|
||||
__license__ = "GPL"
|
||||
|
||||
import fcntl
|
||||
import logging
|
||||
import os
|
||||
import signal
|
||||
import subprocess
|
||||
import sys
|
||||
from threading import Lock
|
||||
import time
|
||||
import types
|
||||
from ..helpers import getLogger, _merge_dicts, uni_decode
|
||||
from collections import OrderedDict
|
||||
|
||||
import importlib.machinery
|
||||
|
||||
# Gets the instance of the logger.
|
||||
logSys = getLogger(__name__)
|
||||
|
||||
# Some hints on common abnormal exit codes
|
||||
_RETCODE_HINTS = {
|
||||
127: '"Command not found". Make sure that all commands in %(realCmd)r '
|
||||
'are in the PATH of fail2ban-server process '
|
||||
'(grep -a PATH= /proc/`pidof -x fail2ban-server`/environ). '
|
||||
'You may want to start '
|
||||
'"fail2ban-server -f" separately, initiate it with '
|
||||
'"fail2ban-client reload" in another shell session and observe if '
|
||||
'additional informative error messages appear in the terminals.'
|
||||
}
|
||||
|
||||
# Dictionary to lookup signal name from number
|
||||
signame = dict((num, name)
|
||||
for name, num in signal.__dict__.items() if name.startswith("SIG"))
|
||||
|
||||
class Utils():
|
||||
"""Utilities provide diverse static methods like executes OS shell commands, etc.
|
||||
"""
|
||||
|
||||
DEFAULT_SLEEP_TIME = 2
|
||||
DEFAULT_SLEEP_INTERVAL = 0.2
|
||||
DEFAULT_SHORT_INTERVAL = 0.001
|
||||
DEFAULT_SHORTEST_INTERVAL = DEFAULT_SHORT_INTERVAL / 100
|
||||
|
||||
|
||||
class Cache(object):
|
||||
"""A simple cache with a TTL and limit on size
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.setOptions(*args, **kwargs)
|
||||
self._cache = OrderedDict()
|
||||
self.__lock = Lock()
|
||||
|
||||
def setOptions(self, maxCount=1000, maxTime=60):
|
||||
self.maxCount = maxCount
|
||||
self.maxTime = maxTime
|
||||
|
||||
def __len__(self):
|
||||
return len(self._cache)
|
||||
|
||||
def get(self, k, defv=None):
|
||||
v = self._cache.get(k)
|
||||
if v:
|
||||
if v[1] > time.time():
|
||||
return v[0]
|
||||
self.unset(k)
|
||||
return defv
|
||||
|
||||
def set(self, k, v):
|
||||
t = time.time()
|
||||
# avoid multiple modification of dict multi-threaded:
|
||||
cache = self._cache
|
||||
with self.__lock:
|
||||
# clean cache if max count reached:
|
||||
if len(cache) >= self.maxCount:
|
||||
# ordered (so remove some from ahead, FIFO)
|
||||
while cache:
|
||||
(ck, cv) = cache.popitem(last=False)
|
||||
# if not yet expired (but has free slot for new entry):
|
||||
if cv[1] > t and len(cache) < self.maxCount:
|
||||
break
|
||||
# set now:
|
||||
cache[k] = (v, t + self.maxTime)
|
||||
|
||||
def unset(self, k):
|
||||
with self.__lock:
|
||||
self._cache.pop(k, None)
|
||||
|
||||
def clear(self):
|
||||
with self.__lock:
|
||||
self._cache.clear()
|
||||
|
||||
|
||||
@staticmethod
|
||||
def setFBlockMode(fhandle, value):
|
||||
flags = fcntl.fcntl(fhandle, fcntl.F_GETFL)
|
||||
if not value:
|
||||
flags |= os.O_NONBLOCK
|
||||
else:
|
||||
flags &= ~os.O_NONBLOCK
|
||||
fcntl.fcntl(fhandle, fcntl.F_SETFL, flags)
|
||||
return flags
|
||||
|
||||
@staticmethod
|
||||
def buildShellCmd(realCmd, varsDict):
|
||||
"""Generates new shell command as array, contains map as variables to
|
||||
arguments statement (varsStat), the command (realCmd) used this variables and
|
||||
the list of the arguments, mapped from varsDict
|
||||
|
||||
Example:
|
||||
buildShellCmd('echo "V2: $v2, V1: $v1"', {"v1": "val 1", "v2": "val 2", "vUnused": "unused var"})
|
||||
returns:
|
||||
['v1=$0 v2=$1 vUnused=$2 \necho "V2: $v2, V1: $v1"', 'val 1', 'val 2', 'unused var']
|
||||
"""
|
||||
# build map as array of vars and command line array:
|
||||
varsStat = ""
|
||||
if not isinstance(realCmd, list):
|
||||
realCmd = [realCmd]
|
||||
i = len(realCmd)-1
|
||||
for k, v in varsDict.items():
|
||||
varsStat += "%s=$%s " % (k, i)
|
||||
realCmd.append(v)
|
||||
i += 1
|
||||
realCmd[0] = varsStat + "\n" + realCmd[0]
|
||||
return realCmd
|
||||
|
||||
@staticmethod
|
||||
def executeCmd(realCmd, timeout=60, shell=True, output=False, tout_kill_tree=True,
|
||||
success_codes=(0,), varsDict=None):
|
||||
"""Executes a command.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
realCmd : str
|
||||
The command to execute.
|
||||
timeout : int
|
||||
The time out in seconds for the command.
|
||||
shell : bool
|
||||
If shell is True (default), the specified command (may be a string) will be
|
||||
executed through the shell.
|
||||
output : bool
|
||||
If output is True, the function returns tuple (success, stdoutdata, stderrdata, returncode).
|
||||
If False, just indication of success is returned
|
||||
varsDict: dict
|
||||
variables supplied to the command (or to the shell script)
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool or (bool, str, str, int)
|
||||
True if the command succeeded and with stdout, stderr, returncode if output was set to True
|
||||
|
||||
Raises
|
||||
------
|
||||
OSError
|
||||
If command fails to be executed.
|
||||
RuntimeError
|
||||
If command execution times out.
|
||||
"""
|
||||
stdout = stderr = None
|
||||
retcode = None
|
||||
popen = env = None
|
||||
if varsDict:
|
||||
if shell:
|
||||
# build map as array of vars and command line array:
|
||||
realCmd = Utils.buildShellCmd(realCmd, varsDict)
|
||||
else: # pragma: no cover - currently unused
|
||||
env = _merge_dicts(os.environ, varsDict)
|
||||
realCmdId = id(realCmd)
|
||||
logCmd = lambda level: logSys.log(level, "%x -- exec: %s", realCmdId, realCmd)
|
||||
try:
|
||||
popen = subprocess.Popen(
|
||||
realCmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell, env=env,
|
||||
preexec_fn=os.setsid # so that killpg does not kill our process
|
||||
)
|
||||
# wait with timeout for process has terminated:
|
||||
retcode = popen.poll()
|
||||
if retcode is None:
|
||||
def _popen_wait_end():
|
||||
retcode = popen.poll()
|
||||
return (True, retcode) if retcode is not None else None
|
||||
# popen.poll is fast operation so we can use the shortest sleep interval:
|
||||
retcode = Utils.wait_for(_popen_wait_end, timeout, Utils.DEFAULT_SHORTEST_INTERVAL)
|
||||
if retcode:
|
||||
retcode = retcode[1]
|
||||
# if timeout:
|
||||
if retcode is None:
|
||||
if logCmd: logCmd(logging.ERROR); logCmd = None
|
||||
logSys.error("%x -- timed out after %s seconds." %
|
||||
(realCmdId, timeout))
|
||||
pgid = os.getpgid(popen.pid)
|
||||
# if not tree - first try to terminate and then kill, otherwise - kill (-9) only:
|
||||
os.killpg(pgid, signal.SIGTERM) # Terminate the process
|
||||
time.sleep(Utils.DEFAULT_SLEEP_INTERVAL)
|
||||
retcode = popen.poll()
|
||||
#logSys.debug("%s -- terminated %s ", realCmd, retcode)
|
||||
if retcode is None or tout_kill_tree: # Still going...
|
||||
os.killpg(pgid, signal.SIGKILL) # Kill the process
|
||||
time.sleep(Utils.DEFAULT_SLEEP_INTERVAL)
|
||||
if retcode is None: # pragma: no cover - too sporadic
|
||||
retcode = popen.poll()
|
||||
#logSys.debug("%s -- killed %s ", realCmd, retcode)
|
||||
if retcode is None and not Utils.pid_exists(pgid): # pragma: no cover
|
||||
retcode = signal.SIGKILL
|
||||
except OSError as e:
|
||||
if logCmd: logCmd(logging.ERROR); logCmd = None
|
||||
stderr = "%s -- failed with %s" % (realCmd, e)
|
||||
logSys.error(stderr)
|
||||
if not popen:
|
||||
return False if not output else (False, stdout, stderr, retcode)
|
||||
|
||||
std_level = logging.DEBUG if retcode in success_codes else logging.ERROR
|
||||
if std_level >= logSys.getEffectiveLevel():
|
||||
if logCmd: logCmd(std_level-1 if std_level == logging.DEBUG else logging.ERROR); logCmd = None
|
||||
# if we need output (to return or to log it):
|
||||
if output or std_level >= logSys.getEffectiveLevel():
|
||||
|
||||
# if was timeouted (killed/terminated) - to prevent waiting, set std handles to non-blocking mode.
|
||||
if popen.stdout:
|
||||
try:
|
||||
if retcode is None or retcode < 0:
|
||||
Utils.setFBlockMode(popen.stdout, False)
|
||||
stdout = popen.stdout.read()
|
||||
except IOError as e: # pragma: no cover
|
||||
logSys.error(" ... -- failed to read stdout %s", e)
|
||||
if stdout is not None and stdout != '' and std_level >= logSys.getEffectiveLevel():
|
||||
for l in stdout.splitlines():
|
||||
logSys.log(std_level, "%x -- stdout: %r", realCmdId, uni_decode(l))
|
||||
if popen.stderr:
|
||||
try:
|
||||
if retcode is None or retcode < 0:
|
||||
Utils.setFBlockMode(popen.stderr, False)
|
||||
stderr = popen.stderr.read()
|
||||
except IOError as e: # pragma: no cover
|
||||
logSys.error(" ... -- failed to read stderr %s", e)
|
||||
if stderr is not None and stderr != '' and std_level >= logSys.getEffectiveLevel():
|
||||
for l in stderr.splitlines():
|
||||
logSys.log(std_level, "%x -- stderr: %r", realCmdId, uni_decode(l))
|
||||
|
||||
if popen.stdout: popen.stdout.close()
|
||||
if popen.stderr: popen.stderr.close()
|
||||
|
||||
success = False
|
||||
if retcode in success_codes:
|
||||
logSys.debug("%x -- returned successfully %i", realCmdId, retcode)
|
||||
success = True
|
||||
elif retcode is None:
|
||||
logSys.error("%x -- unable to kill PID %i", realCmdId, popen.pid)
|
||||
elif retcode < 0 or retcode > 128:
|
||||
# dash would return negative while bash 128 + n
|
||||
sigcode = -retcode if retcode < 0 else retcode - 128
|
||||
logSys.error("%x -- killed with %s (return code: %s)",
|
||||
realCmdId, signame.get(sigcode, "signal %i" % sigcode), retcode)
|
||||
else:
|
||||
msg = _RETCODE_HINTS.get(retcode, None)
|
||||
logSys.error("%x -- returned %i", realCmdId, retcode)
|
||||
if msg:
|
||||
logSys.info("HINT on %i: %s", retcode, msg % locals())
|
||||
if output:
|
||||
return success, stdout, stderr, retcode
|
||||
return success if len(success_codes) == 1 else (success, retcode)
|
||||
|
||||
@staticmethod
|
||||
def wait_for(cond, timeout, interval=None):
|
||||
"""Wait until condition expression `cond` is True, up to `timeout` sec
|
||||
|
||||
Parameters
|
||||
----------
|
||||
cond : callable
|
||||
The expression to check condition
|
||||
(should return equivalent to bool True if wait successful).
|
||||
timeout : float or callable
|
||||
The time out for end of wait
|
||||
(in seconds or callable that returns True if timeout occurred).
|
||||
interval : float (optional)
|
||||
Polling start interval for wait cycle in seconds.
|
||||
|
||||
Returns
|
||||
-------
|
||||
variable
|
||||
The return value of the last call of `cond`,
|
||||
logical False (or None, 0, etc) if timeout occurred.
|
||||
"""
|
||||
#logSys.log(5, " wait for %r, tout: %r / %r", cond, timeout, interval)
|
||||
ini = 1 # to delay initializations until/when necessary
|
||||
while True:
|
||||
ret = cond()
|
||||
if ret:
|
||||
return ret
|
||||
if ini:
|
||||
ini = stm = 0
|
||||
if not callable(timeout):
|
||||
time0 = time.time() + timeout
|
||||
timeout_expr = lambda: time.time() > time0
|
||||
else:
|
||||
timeout_expr = timeout
|
||||
if timeout_expr():
|
||||
break
|
||||
stm = min(stm + (interval or Utils.DEFAULT_SLEEP_INTERVAL), Utils.DEFAULT_SLEEP_TIME)
|
||||
time.sleep(stm)
|
||||
return ret
|
||||
|
||||
# Solution from http://stackoverflow.com/questions/568271/how-to-check-if-there-exists-a-process-with-a-given-pid
|
||||
# under cc by-sa 3.0
|
||||
if os.name == 'posix':
|
||||
@staticmethod
|
||||
def pid_exists(pid):
|
||||
"""Check whether pid exists in the current process table."""
|
||||
import errno
|
||||
if pid < 0:
|
||||
return False
|
||||
try:
|
||||
os.kill(pid, 0)
|
||||
except OSError as e:
|
||||
return e.errno == errno.EPERM
|
||||
else:
|
||||
return True
|
||||
else: # pragma: no cover (no windows currently supported)
|
||||
@staticmethod
|
||||
def pid_exists(pid):
|
||||
import ctypes
|
||||
kernel32 = ctypes.windll.kernel32
|
||||
SYNCHRONIZE = 0x100000
|
||||
|
||||
process = kernel32.OpenProcess(SYNCHRONIZE, 0, pid)
|
||||
if process != 0:
|
||||
kernel32.CloseHandle(process)
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def load_python_module(pythonModule):
|
||||
pythonModuleName = os.path.splitext(
|
||||
os.path.basename(pythonModule))[0]
|
||||
ldr = importlib.machinery.SourceFileLoader(pythonModuleName, pythonModule)
|
||||
mod = types.ModuleType(ldr.name)
|
||||
ldr.exec_module(mod)
|
||||
return mod
|
||||
Reference in New Issue
Block a user